]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/mir/place.rs
Rollup merge of #55750 - oli-obk:node_id_x, r=michaelwoerister
[rust.git] / src / librustc_codegen_llvm / mir / place.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, LLVMConstInBoundsGEP};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx};
14 use rustc::mir;
15 use rustc::mir::tcx::PlaceTy;
16 use base;
17 use builder::Builder;
18 use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big};
19 use consts;
20 use type_of::LayoutLlvmExt;
21 use type_::Type;
22 use value::Value;
23 use glue;
24 use mir::constant::const_alloc_to_llvm;
25
26 use super::{FunctionCx, LocalRef};
27 use super::operand::{OperandRef, OperandValue};
28
29 #[derive(Copy, Clone, Debug)]
30 pub struct PlaceRef<'ll, 'tcx> {
31     /// Pointer to the contents of the place
32     pub llval: &'ll Value,
33
34     /// This place's extra data if it is unsized, or null
35     pub llextra: Option<&'ll Value>,
36
37     /// Monomorphized type of this place, including variant information
38     pub layout: TyLayout<'tcx>,
39
40     /// What alignment we know for this place
41     pub align: Align,
42 }
43
44 impl PlaceRef<'ll, 'tcx> {
45     pub fn new_sized(
46         llval: &'ll Value,
47         layout: TyLayout<'tcx>,
48         align: Align,
49     ) -> PlaceRef<'ll, 'tcx> {
50         assert!(!layout.is_unsized());
51         PlaceRef {
52             llval,
53             llextra: None,
54             layout,
55             align
56         }
57     }
58
59     pub fn from_const_alloc(
60         bx: &Builder<'a, 'll, 'tcx>,
61         layout: TyLayout<'tcx>,
62         alloc: &mir::interpret::Allocation,
63         offset: Size,
64     ) -> PlaceRef<'ll, 'tcx> {
65         let init = const_alloc_to_llvm(bx.cx, alloc);
66         let base_addr = consts::addr_of(bx.cx, init, layout.align, None);
67
68         let llval = unsafe { LLVMConstInBoundsGEP(
69             consts::bitcast(base_addr, Type::i8p(bx.cx)),
70             &C_usize(bx.cx, offset.bytes()),
71             1,
72         )};
73         let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to());
74         PlaceRef::new_sized(llval, layout, alloc.align)
75     }
76
77     pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
78                   -> PlaceRef<'ll, 'tcx> {
79         debug!("alloca({:?}: {:?})", name, layout);
80         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
81         let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align);
82         Self::new_sized(tmp, layout, layout.align)
83     }
84
85     /// Returns a place for an indirect reference to an unsized place.
86     pub fn alloca_unsized_indirect(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
87                   -> PlaceRef<'ll, 'tcx> {
88         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
89         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
90         let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty);
91         let ptr_layout = bx.cx.layout_of(ptr_ty);
92         Self::alloca(bx, ptr_layout, name)
93     }
94
95     pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
96         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
97             if self.layout.is_unsized() {
98                 assert_eq!(count, 0);
99                 self.llextra.unwrap()
100             } else {
101                 C_usize(cx, count)
102             }
103         } else {
104             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
105         }
106     }
107
108     pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'ll, 'tcx> {
109         debug!("PlaceRef::load: {:?}", self);
110
111         assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
112
113         if self.layout.is_zst() {
114             return OperandRef::new_zst(bx.cx, self.layout);
115         }
116
117         let scalar_load_metadata = |load, scalar: &layout::Scalar| {
118             let vr = scalar.valid_range.clone();
119             match scalar.value {
120                 layout::Int(..) => {
121                     let range = scalar.valid_range_exclusive(bx.cx);
122                     if range.start != range.end {
123                         bx.range_metadata(load, range);
124                     }
125                 }
126                 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
127                     bx.nonnull_metadata(load);
128                 }
129                 _ => {}
130             }
131         };
132
133         let val = if let Some(llextra) = self.llextra {
134             OperandValue::Ref(self.llval, Some(llextra), self.align)
135         } else if self.layout.is_llvm_immediate() {
136             let mut const_llval = None;
137             unsafe {
138                 if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) {
139                     if llvm::LLVMIsGlobalConstant(global) == llvm::True {
140                         const_llval = llvm::LLVMGetInitializer(global);
141                     }
142                 }
143             }
144             let llval = const_llval.unwrap_or_else(|| {
145                 let load = bx.load(self.llval, self.align);
146                 if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
147                     scalar_load_metadata(load, scalar);
148                 }
149                 load
150             });
151             OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
152         } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
153             let load = |i, scalar: &layout::Scalar| {
154                 let llptr = bx.struct_gep(self.llval, i as u64);
155                 let load = bx.load(llptr, self.align);
156                 scalar_load_metadata(load, scalar);
157                 if scalar.is_bool() {
158                     bx.trunc(load, Type::i1(bx.cx))
159                 } else {
160                     load
161                 }
162             };
163             OperandValue::Pair(load(0, a), load(1, b))
164         } else {
165             OperandValue::Ref(self.llval, None, self.align)
166         };
167
168         OperandRef { val, layout: self.layout }
169     }
170
171     /// Access a field, at a point when the value's case is known.
172     pub fn project_field(self, bx: &Builder<'a, 'll, 'tcx>, ix: usize) -> PlaceRef<'ll, 'tcx> {
173         let cx = bx.cx;
174         let field = self.layout.field(cx, ix);
175         let offset = self.layout.fields.offset(ix);
176         let effective_field_align = self.align.restrict_for_offset(offset);
177
178         let simple = || {
179             // Unions and newtypes only use an offset of 0.
180             let llval = if offset.bytes() == 0 {
181                 self.llval
182             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
183                 // Offsets have to match either first or second field.
184                 assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
185                 bx.struct_gep(self.llval, 1)
186             } else {
187                 bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
188             };
189             PlaceRef {
190                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
191                 llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()),
192                 llextra: if cx.type_has_metadata(field.ty) {
193                     self.llextra
194                 } else {
195                     None
196                 },
197                 layout: field,
198                 align: effective_field_align,
199             }
200         };
201
202         // Simple cases, which don't need DST adjustment:
203         //   * no metadata available - just log the case
204         //   * known alignment - sized types, [T], str or a foreign type
205         //   * packed struct - there is no alignment padding
206         match field.ty.sty {
207             _ if self.llextra.is_none() => {
208                 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
209                     ix, self.llval);
210                 return simple();
211             }
212             _ if !field.is_unsized() => return simple(),
213             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
214             ty::Adt(def, _) => {
215                 if def.repr.packed() {
216                     // FIXME(eddyb) generalize the adjustment when we
217                     // start supporting packing to larger alignments.
218                     assert_eq!(self.layout.align.abi(), 1);
219                     return simple();
220                 }
221             }
222             _ => {}
223         }
224
225         // We need to get the pointer manually now.
226         // We do this by casting to a *i8, then offsetting it by the appropriate amount.
227         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
228         // because the field may have an arbitrary alignment in the LLVM representation
229         // anyway.
230         //
231         // To demonstrate:
232         //   struct Foo<T: ?Sized> {
233         //      x: u16,
234         //      y: T
235         //   }
236         //
237         // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
238         // the `y` field has 16-bit alignment.
239
240         let meta = self.llextra;
241
242         let unaligned_offset = C_usize(cx, offset.bytes());
243
244         // Get the alignment of the field
245         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
246
247         // Bump the unaligned offset up to the appropriate alignment using the
248         // following expression:
249         //
250         //   (unaligned offset + (align - 1)) & -align
251
252         // Calculate offset
253         let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64));
254         let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
255         bx.neg(unsized_align));
256
257         debug!("struct_field_ptr: DST field offset: {:?}", offset);
258
259         // Cast and adjust pointer
260         let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx));
261         let byte_ptr = bx.gep(byte_ptr, &[offset]);
262
263         // Finally, cast back to the type expected
264         let ll_fty = field.llvm_type(cx);
265         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
266
267         PlaceRef {
268             llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()),
269             llextra: self.llextra,
270             layout: field,
271             align: effective_field_align,
272         }
273     }
274
275     /// Obtain the actual discriminant of a value.
276     pub fn codegen_get_discr(self, bx: &Builder<'a, 'll, 'tcx>, cast_to: Ty<'tcx>) -> &'ll Value {
277         let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx);
278         if self.layout.abi.is_uninhabited() {
279             return C_undef(cast_to);
280         }
281         match self.layout.variants {
282             layout::Variants::Single { index } => {
283                 let discr_val = self.layout.ty.ty_adt_def().map_or(
284                     index.as_u32() as u128,
285                     |def| def.discriminant_for_variant(bx.cx.tcx, index).val);
286                 return C_uint_big(cast_to, discr_val);
287             }
288             layout::Variants::Tagged { .. } |
289             layout::Variants::NicheFilling { .. } => {},
290         }
291
292         let discr = self.project_field(bx, 0);
293         let lldiscr = discr.load(bx).immediate();
294         match self.layout.variants {
295             layout::Variants::Single { .. } => bug!(),
296             layout::Variants::Tagged { ref tag, .. } => {
297                 let signed = match tag.value {
298                     // We use `i1` for bytes that are always `0` or `1`,
299                     // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
300                     // let LLVM interpret the `i1` as signed, because
301                     // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
302                     layout::Int(_, signed) => !tag.is_bool() && signed,
303                     _ => false
304                 };
305                 bx.intcast(lldiscr, cast_to, signed)
306             }
307             layout::Variants::NicheFilling {
308                 dataful_variant,
309                 ref niche_variants,
310                 niche_start,
311                 ..
312             } => {
313                 let niche_llty = discr.layout.immediate_llvm_type(bx.cx);
314                 if niche_variants.start() == niche_variants.end() {
315                     // FIXME(eddyb) Check the actual primitive type here.
316                     let niche_llval = if niche_start == 0 {
317                         // HACK(eddyb) Using `C_null` as it works on all types.
318                         C_null(niche_llty)
319                     } else {
320                         C_uint_big(niche_llty, niche_start)
321                     };
322                     bx.select(bx.icmp(llvm::IntEQ, lldiscr, niche_llval),
323                         C_uint(cast_to, niche_variants.start().as_u32() as u64),
324                         C_uint(cast_to, dataful_variant.as_u32() as u64))
325                 } else {
326                     // Rebase from niche values to discriminant values.
327                     let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
328                     let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta));
329                     let lldiscr_max = C_uint(niche_llty, niche_variants.end().as_u32() as u64);
330                     bx.select(bx.icmp(llvm::IntULE, lldiscr, lldiscr_max),
331                         bx.intcast(lldiscr, cast_to, false),
332                         C_uint(cast_to, dataful_variant.as_u32() as u64))
333                 }
334             }
335         }
336     }
337
338     /// Set the discriminant for a new value of the given case of the given
339     /// representation.
340     pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
341         if self.layout.for_variant(bx.cx, variant_index).abi.is_uninhabited() {
342             return;
343         }
344         match self.layout.variants {
345             layout::Variants::Single { index } => {
346                 assert_eq!(index, variant_index);
347             }
348             layout::Variants::Tagged { .. } => {
349                 let ptr = self.project_field(bx, 0);
350                 let to = self.layout.ty.ty_adt_def().unwrap()
351                     .discriminant_for_variant(bx.tcx(), variant_index)
352                     .val;
353                 bx.store(
354                     C_uint_big(ptr.layout.llvm_type(bx.cx), to),
355                     ptr.llval,
356                     ptr.align);
357             }
358             layout::Variants::NicheFilling {
359                 dataful_variant,
360                 ref niche_variants,
361                 niche_start,
362                 ..
363             } => {
364                 if variant_index != dataful_variant {
365                     if bx.sess().target.target.arch == "arm" ||
366                        bx.sess().target.target.arch == "aarch64" {
367                         // Issue #34427: As workaround for LLVM bug on ARM,
368                         // use memset of 0 before assigning niche value.
369                         let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to());
370                         let fill_byte = C_u8(bx.cx, 0);
371                         let (size, align) = self.layout.size_and_align();
372                         let size = C_usize(bx.cx, size.bytes());
373                         let align = C_u32(bx.cx, align.abi() as u32);
374                         base::call_memset(bx, llptr, fill_byte, size, align, false);
375                     }
376
377                     let niche = self.project_field(bx, 0);
378                     let niche_llty = niche.layout.immediate_llvm_type(bx.cx);
379                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
380                     let niche_value = (niche_value as u128)
381                         .wrapping_add(niche_start);
382                     // FIXME(eddyb) Check the actual primitive type here.
383                     let niche_llval = if niche_value == 0 {
384                         // HACK(eddyb) Using `C_null` as it works on all types.
385                         C_null(niche_llty)
386                     } else {
387                         C_uint_big(niche_llty, niche_value)
388                     };
389                     OperandValue::Immediate(niche_llval).store(bx, niche);
390                 }
391             }
392         }
393     }
394
395     pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
396                          -> PlaceRef<'ll, 'tcx> {
397         PlaceRef {
398             llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]),
399             llextra: None,
400             layout: self.layout.field(bx.cx, 0),
401             align: self.align
402         }
403     }
404
405     pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
406                             -> PlaceRef<'ll, 'tcx> {
407         let mut downcast = *self;
408         downcast.layout = self.layout.for_variant(bx.cx, variant_index);
409
410         // Cast to the appropriate variant struct type.
411         let variant_ty = downcast.layout.llvm_type(bx.cx);
412         downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to());
413
414         downcast
415     }
416
417     pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) {
418         bx.lifetime_start(self.llval, self.layout.size);
419     }
420
421     pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) {
422         bx.lifetime_end(self.llval, self.layout.size);
423     }
424 }
425
426 impl FunctionCx<'a, 'll, 'tcx> {
427     pub fn codegen_place(&mut self,
428                         bx: &Builder<'a, 'll, 'tcx>,
429                         place: &mir::Place<'tcx>)
430                         -> PlaceRef<'ll, 'tcx> {
431         debug!("codegen_place(place={:?})", place);
432
433         let cx = bx.cx;
434         let tcx = cx.tcx;
435
436         if let mir::Place::Local(index) = *place {
437             match self.locals[index] {
438                 LocalRef::Place(place) => {
439                     return place;
440                 }
441                 LocalRef::UnsizedPlace(place) => {
442                     return place.load(bx).deref(&cx);
443                 }
444                 LocalRef::Operand(..) => {
445                     bug!("using operand local {:?} as place", place);
446                 }
447             }
448         }
449
450         let result = match *place {
451             mir::Place::Local(_) => bug!(), // handled above
452             mir::Place::Promoted(box (index, ty)) => {
453                 let param_env = ty::ParamEnv::reveal_all();
454                 let cid = mir::interpret::GlobalId {
455                     instance: self.instance,
456                     promoted: Some(index),
457                 };
458                 let layout = cx.layout_of(self.monomorphize(&ty));
459                 match bx.tcx().const_eval(param_env.and(cid)) {
460                     Ok(val) => match val.val {
461                         mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
462                             PlaceRef::from_const_alloc(bx, layout, alloc, offset)
463                         }
464                         _ => bug!("promoteds should have an allocation: {:?}", val),
465                     },
466                     Err(_) => {
467                         // this is unreachable as long as runtime
468                         // and compile-time agree on values
469                         // With floats that won't always be true
470                         // so we generate an abort
471                         let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
472                         bx.call(fnname, &[], None);
473                         let llval = C_undef(layout.llvm_type(bx.cx).ptr_to());
474                         PlaceRef::new_sized(llval, layout, layout.align)
475                     }
476                 }
477             }
478             mir::Place::Static(box mir::Static { def_id, ty }) => {
479                 let layout = cx.layout_of(self.monomorphize(&ty));
480                 PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align)
481             },
482             mir::Place::Projection(box mir::Projection {
483                 ref base,
484                 elem: mir::ProjectionElem::Deref
485             }) => {
486                 // Load the pointer from its location.
487                 self.codegen_consume(bx, base).deref(bx.cx)
488             }
489             mir::Place::Projection(ref projection) => {
490                 let cg_base = self.codegen_place(bx, &projection.base);
491
492                 match projection.elem {
493                     mir::ProjectionElem::Deref => bug!(),
494                     mir::ProjectionElem::Field(ref field, _) => {
495                         cg_base.project_field(bx, field.index())
496                     }
497                     mir::ProjectionElem::Index(index) => {
498                         let index = &mir::Operand::Copy(mir::Place::Local(index));
499                         let index = self.codegen_operand(bx, index);
500                         let llindex = index.immediate();
501                         cg_base.project_index(bx, llindex)
502                     }
503                     mir::ProjectionElem::ConstantIndex { offset,
504                                                          from_end: false,
505                                                          min_length: _ } => {
506                         let lloffset = C_usize(bx.cx, offset as u64);
507                         cg_base.project_index(bx, lloffset)
508                     }
509                     mir::ProjectionElem::ConstantIndex { offset,
510                                                          from_end: true,
511                                                          min_length: _ } => {
512                         let lloffset = C_usize(bx.cx, offset as u64);
513                         let lllen = cg_base.len(bx.cx);
514                         let llindex = bx.sub(lllen, lloffset);
515                         cg_base.project_index(bx, llindex)
516                     }
517                     mir::ProjectionElem::Subslice { from, to } => {
518                         let mut subslice = cg_base.project_index(bx,
519                             C_usize(bx.cx, from as u64));
520                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
521                             .projection_ty(tcx, &projection.elem)
522                             .to_ty(bx.tcx());
523                         subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty));
524
525                         if subslice.layout.is_unsized() {
526                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
527                                 C_usize(bx.cx, (from as u64) + (to as u64))));
528                         }
529
530                         // Cast the place pointer type to the new
531                         // array or slice type (*[%_; new_len]).
532                         subslice.llval = bx.pointercast(subslice.llval,
533                             subslice.layout.llvm_type(bx.cx).ptr_to());
534
535                         subslice
536                     }
537                     mir::ProjectionElem::Downcast(_, v) => {
538                         cg_base.project_downcast(bx, v)
539                     }
540                 }
541             }
542         };
543         debug!("codegen_place(place={:?}) => {:?}", place, result);
544         result
545     }
546
547     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
548         let tcx = self.cx.tcx;
549         let place_ty = place.ty(self.mir, tcx);
550         self.monomorphize(&place_ty.to_ty(tcx))
551     }
552 }