]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_ssa/mir/place.rs
Improve some compiletest documentation
[rust.git] / src / librustc_codegen_ssa / mir / place.rs
1 use rustc::ty::{self, Ty};
2 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
3 use rustc::mir;
4 use rustc::mir::tcx::PlaceTy;
5 use crate::MemFlags;
6 use crate::common::IntPredicate;
7 use crate::glue;
8
9 use crate::traits::*;
10
11 use super::{FunctionCx, LocalRef};
12 use super::operand::OperandValue;
13
14 #[derive(Copy, Clone, Debug)]
15 pub struct PlaceRef<'tcx, V> {
16     /// Pointer to the contents of the place.
17     pub llval: V,
18
19     /// This place's extra data if it is unsized, or null.
20     pub llextra: Option<V>,
21
22     /// Monomorphized type of this place, including variant information.
23     pub layout: TyLayout<'tcx>,
24
25     /// What alignment we know for this place.
26     pub align: Align,
27 }
28
29 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
30     pub fn new_sized(
31         llval: V,
32         layout: TyLayout<'tcx>,
33         align: Align,
34     ) -> PlaceRef<'tcx, V> {
35         assert!(!layout.is_unsized());
36         PlaceRef {
37             llval,
38             llextra: None,
39             layout,
40             align
41         }
42     }
43
44     fn new_thin_place<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
45         bx: &mut Bx,
46         llval: V,
47         layout: TyLayout<'tcx>,
48         align: Align,
49     ) -> PlaceRef<'tcx, V> {
50         assert!(!bx.cx().type_has_metadata(layout.ty));
51         PlaceRef {
52             llval,
53             llextra: None,
54             layout,
55             align
56         }
57     }
58
59     pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
60         bx: &mut Bx,
61         layout: TyLayout<'tcx>,
62         name: &str
63     ) -> Self {
64         debug!("alloca({:?}: {:?})", name, layout);
65         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
66         let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
67         Self::new_sized(tmp, layout, layout.align.abi)
68     }
69
70     /// Returns a place for an indirect reference to an unsized place.
71     pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
72         bx: &mut Bx,
73         layout: TyLayout<'tcx>,
74         name: &str,
75     ) -> Self {
76         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
77         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
78         let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
79         let ptr_layout = bx.cx().layout_of(ptr_ty);
80         Self::alloca(bx, ptr_layout, name)
81     }
82
83     pub fn len<Cx: CodegenMethods<'tcx, Value = V>>(
84         &self,
85         cx: &Cx
86     ) -> V {
87         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
88             if self.layout.is_unsized() {
89                 assert_eq!(count, 0);
90                 self.llextra.unwrap()
91             } else {
92                 cx.const_usize(count)
93             }
94         } else {
95             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
96         }
97     }
98
99 }
100
101 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
102     /// Access a field, at a point when the value's case is known.
103     pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
104         self, bx: &mut Bx,
105         ix: usize,
106     ) -> Self {
107         let field = self.layout.field(bx.cx(), ix);
108         let offset = self.layout.fields.offset(ix);
109         let effective_field_align = self.align.restrict_for_offset(offset);
110
111         let mut simple = || {
112             // Unions and newtypes only use an offset of 0.
113             let llval = if offset.bytes() == 0 {
114                 self.llval
115             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
116                 // Offsets have to match either first or second field.
117                 assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
118                 bx.struct_gep(self.llval, 1)
119             } else {
120                 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
121             };
122             PlaceRef {
123                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
124                 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
125                 llextra: if bx.cx().type_has_metadata(field.ty) {
126                     self.llextra
127                 } else {
128                     None
129                 },
130                 layout: field,
131                 align: effective_field_align,
132             }
133         };
134
135         // Simple cases, which don't need DST adjustment:
136         //   * no metadata available - just log the case
137         //   * known alignment - sized types, [T], str or a foreign type
138         //   * packed struct - there is no alignment padding
139         match field.ty.sty {
140             _ if self.llextra.is_none() => {
141                 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
142                     ix, self.llval);
143                 return simple();
144             }
145             _ if !field.is_unsized() => return simple(),
146             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
147             ty::Adt(def, _) => {
148                 if def.repr.packed() {
149                     // FIXME(eddyb) generalize the adjustment when we
150                     // start supporting packing to larger alignments.
151                     assert_eq!(self.layout.align.abi.bytes(), 1);
152                     return simple();
153                 }
154             }
155             _ => {}
156         }
157
158         // We need to get the pointer manually now.
159         // We do this by casting to a *i8, then offsetting it by the appropriate amount.
160         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
161         // because the field may have an arbitrary alignment in the LLVM representation
162         // anyway.
163         //
164         // To demonstrate:
165         //   struct Foo<T: ?Sized> {
166         //      x: u16,
167         //      y: T
168         //   }
169         //
170         // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
171         // the `y` field has 16-bit alignment.
172
173         let meta = self.llextra;
174
175         let unaligned_offset = bx.cx().const_usize(offset.bytes());
176
177         // Get the alignment of the field
178         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
179
180         // Bump the unaligned offset up to the appropriate alignment using the
181         // following expression:
182         //
183         //   (unaligned offset + (align - 1)) & -align
184
185         // Calculate offset
186         let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
187         let and_lhs = bx.add(unaligned_offset, align_sub_1);
188         let and_rhs = bx.neg(unsized_align);
189         let offset = bx.and(and_lhs, and_rhs);
190
191         debug!("struct_field_ptr: DST field offset: {:?}", offset);
192
193         // Cast and adjust pointer
194         let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
195         let byte_ptr = bx.gep(byte_ptr, &[offset]);
196
197         // Finally, cast back to the type expected
198         let ll_fty = bx.cx().backend_type(field);
199         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
200
201         PlaceRef {
202             llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
203             llextra: self.llextra,
204             layout: field,
205             align: effective_field_align,
206         }
207     }
208
209     /// Obtain the actual discriminant of a value.
210     pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
211         self,
212         bx: &mut Bx,
213         cast_to: Ty<'tcx>
214     ) -> V {
215         let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
216         if self.layout.abi.is_uninhabited() {
217             return bx.cx().const_undef(cast_to);
218         }
219         match self.layout.variants {
220             layout::Variants::Single { index } => {
221                 let discr_val = self.layout.ty.ty_adt_def().map_or(
222                     index.as_u32() as u128,
223                     |def| def.discriminant_for_variant(bx.cx().tcx(), index).val);
224                 return bx.cx().const_uint_big(cast_to, discr_val);
225             }
226             layout::Variants::Tagged { .. } |
227             layout::Variants::NicheFilling { .. } => {},
228         }
229
230         let discr = self.project_field(bx, 0);
231         let lldiscr = bx.load_operand(discr).immediate();
232         match self.layout.variants {
233             layout::Variants::Single { .. } => bug!(),
234             layout::Variants::Tagged { ref tag, .. } => {
235                 let signed = match tag.value {
236                     // We use `i1` for bytes that are always `0` or `1`,
237                     // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
238                     // let LLVM interpret the `i1` as signed, because
239                     // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
240                     layout::Int(_, signed) => !tag.is_bool() && signed,
241                     _ => false
242                 };
243                 bx.intcast(lldiscr, cast_to, signed)
244             }
245             layout::Variants::NicheFilling {
246                 dataful_variant,
247                 ref niche_variants,
248                 niche_start,
249                 ..
250             } => {
251                 let niche_llty = bx.cx().immediate_backend_type(discr.layout);
252                 if niche_variants.start() == niche_variants.end() {
253                     // FIXME(eddyb) Check the actual primitive type here.
254                     let niche_llval = if niche_start == 0 {
255                         // HACK(eddyb) Using `c_null` as it works on all types.
256                         bx.cx().const_null(niche_llty)
257                     } else {
258                         bx.cx().const_uint_big(niche_llty, niche_start)
259                     };
260                     let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval);
261                     bx.select(select_arg,
262                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
263                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
264                 } else {
265                     // Rebase from niche values to discriminant values.
266                     let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
267                     let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
268                     let lldiscr_max =
269                         bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
270                     let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max);
271                     let cast = bx.intcast(lldiscr, cast_to, false);
272                     bx.select(select_arg,
273                         cast,
274                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
275                 }
276             }
277         }
278     }
279
280     /// Sets the discriminant for a new value of the given case of the given
281     /// representation.
282     pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
283         &self,
284         bx: &mut Bx,
285         variant_index: VariantIdx
286     ) {
287         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
288             return;
289         }
290         match self.layout.variants {
291             layout::Variants::Single { index } => {
292                 assert_eq!(index, variant_index);
293             }
294             layout::Variants::Tagged { .. } => {
295                 let ptr = self.project_field(bx, 0);
296                 let to = self.layout.ty.ty_adt_def().unwrap()
297                     .discriminant_for_variant(bx.tcx(), variant_index)
298                     .val;
299                 bx.store(
300                     bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
301                     ptr.llval,
302                     ptr.align);
303             }
304             layout::Variants::NicheFilling {
305                 dataful_variant,
306                 ref niche_variants,
307                 niche_start,
308                 ..
309             } => {
310                 if variant_index != dataful_variant {
311                     if bx.cx().sess().target.target.arch == "arm" ||
312                        bx.cx().sess().target.target.arch == "aarch64" {
313                         // Issue #34427: As workaround for LLVM bug on ARM,
314                         // use memset of 0 before assigning niche value.
315                         let fill_byte = bx.cx().const_u8(0);
316                         let size = bx.cx().const_usize(self.layout.size.bytes());
317                         bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
318                     }
319
320                     let niche = self.project_field(bx, 0);
321                     let niche_llty = bx.cx().immediate_backend_type(niche.layout);
322                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
323                     let niche_value = (niche_value as u128)
324                         .wrapping_add(niche_start);
325                     // FIXME(eddyb) Check the actual primitive type here.
326                     let niche_llval = if niche_value == 0 {
327                         // HACK(eddyb) Using `c_null` as it works on all types.
328                         bx.cx().const_null(niche_llty)
329                     } else {
330                         bx.cx().const_uint_big(niche_llty, niche_value)
331                     };
332                     OperandValue::Immediate(niche_llval).store(bx, niche);
333                 }
334             }
335         }
336     }
337
338     pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
339         &self,
340         bx: &mut Bx,
341         llindex: V
342     ) -> Self {
343         // Statically compute the offset if we can, otherwise just use the element size,
344         // as this will yield the lowest alignment.
345         let layout = self.layout.field(bx, 0);
346         let offset = if bx.is_const_integral(llindex) {
347             layout.size.checked_mul(bx.const_to_uint(llindex), bx).unwrap_or(layout.size)
348         } else {
349             layout.size
350         };
351
352         PlaceRef {
353             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
354             llextra: None,
355             layout,
356             align: self.align.restrict_for_offset(offset),
357         }
358     }
359
360     pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
361         &self,
362         bx: &mut Bx,
363         variant_index: VariantIdx
364     ) -> Self {
365         let mut downcast = *self;
366         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
367
368         // Cast to the appropriate variant struct type.
369         let variant_ty = bx.cx().backend_type(downcast.layout);
370         downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
371
372         downcast
373     }
374
375     pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
376         bx.lifetime_start(self.llval, self.layout.size);
377     }
378
379     pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
380         bx.lifetime_end(self.llval, self.layout.size);
381     }
382 }
383
384 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
385     pub fn codegen_place(
386         &mut self,
387         bx: &mut Bx,
388         place: &mir::Place<'tcx>
389     ) -> PlaceRef<'tcx, Bx::Value> {
390         debug!("codegen_place(place={:?})", place);
391
392         let cx = self.cx;
393         let tcx = self.cx.tcx();
394
395         if let mir::Place::Base(mir::PlaceBase::Local(index)) = *place {
396             match self.locals[index] {
397                 LocalRef::Place(place) => {
398                     return place;
399                 }
400                 LocalRef::UnsizedPlace(place) => {
401                     return bx.load_operand(place).deref(cx);
402                 }
403                 LocalRef::Operand(..) => {
404                     bug!("using operand local {:?} as place", place);
405                 }
406             }
407         }
408
409         let result = match *place {
410             mir::Place::Base(mir::PlaceBase::Local(_)) => bug!(), // handled above
411             mir::Place::Base(mir::PlaceBase::Promoted(box (index, ty))) => {
412                 let param_env = ty::ParamEnv::reveal_all();
413                 let cid = mir::interpret::GlobalId {
414                     instance: self.instance,
415                     promoted: Some(index),
416                 };
417                 let layout = cx.layout_of(self.monomorphize(&ty));
418                 match bx.tcx().const_eval(param_env.and(cid)) {
419                     Ok(val) => match val.val {
420                         mir::interpret::ConstValue::ByRef(ptr, alloc) => {
421                             bx.cx().from_const_alloc(layout, alloc, ptr.offset)
422                         }
423                         _ => bug!("promoteds should have an allocation: {:?}", val),
424                     },
425                     Err(_) => {
426                         // this is unreachable as long as runtime
427                         // and compile-time agree on values
428                         // With floats that won't always be true
429                         // so we generate an abort
430                         bx.abort();
431                         let llval = bx.cx().const_undef(
432                             bx.cx().type_ptr_to(bx.cx().backend_type(layout))
433                         );
434                         PlaceRef::new_sized(llval, layout, layout.align.abi)
435                     }
436                 }
437             }
438             mir::Place::Base(mir::PlaceBase::Static(box mir::Static { def_id, ty })) => {
439                 // NB: The layout of a static may be unsized as is the case when working
440                 // with a static that is an extern_type.
441                 let layout = cx.layout_of(self.monomorphize(&ty));
442                 PlaceRef::new_thin_place(bx, bx.get_static(def_id), layout, layout.align.abi)
443             },
444             mir::Place::Projection(box mir::Projection {
445                 ref base,
446                 elem: mir::ProjectionElem::Deref
447             }) => {
448                 // Load the pointer from its location.
449                 self.codegen_consume(bx, base).deref(bx.cx())
450             }
451             mir::Place::Projection(ref projection) => {
452                 let cg_base = self.codegen_place(bx, &projection.base);
453
454                 match projection.elem {
455                     mir::ProjectionElem::Deref => bug!(),
456                     mir::ProjectionElem::Field(ref field, _) => {
457                         cg_base.project_field(bx, field.index())
458                     }
459                     mir::ProjectionElem::Index(index) => {
460                         let index = &mir::Operand::Copy(
461                             mir::Place::Base(mir::PlaceBase::Local(index))
462                         );
463                         let index = self.codegen_operand(bx, index);
464                         let llindex = index.immediate();
465                         cg_base.project_index(bx, llindex)
466                     }
467                     mir::ProjectionElem::ConstantIndex { offset,
468                                                          from_end: false,
469                                                          min_length: _ } => {
470                         let lloffset = bx.cx().const_usize(offset as u64);
471                         cg_base.project_index(bx, lloffset)
472                     }
473                     mir::ProjectionElem::ConstantIndex { offset,
474                                                          from_end: true,
475                                                          min_length: _ } => {
476                         let lloffset = bx.cx().const_usize(offset as u64);
477                         let lllen = cg_base.len(bx.cx());
478                         let llindex = bx.sub(lllen, lloffset);
479                         cg_base.project_index(bx, llindex)
480                     }
481                     mir::ProjectionElem::Subslice { from, to } => {
482                         let mut subslice = cg_base.project_index(bx,
483                             bx.cx().const_usize(from as u64));
484                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
485                             .projection_ty(tcx, &projection.elem).to_ty(tcx);
486                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
487
488                         if subslice.layout.is_unsized() {
489                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
490                                 bx.cx().const_usize((from as u64) + (to as u64))));
491                         }
492
493                         // Cast the place pointer type to the new
494                         // array or slice type (*[%_; new_len]).
495                         subslice.llval = bx.pointercast(subslice.llval,
496                             bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
497
498                         subslice
499                     }
500                     mir::ProjectionElem::Downcast(_, v) => {
501                         cg_base.project_downcast(bx, v)
502                     }
503                 }
504             }
505         };
506         debug!("codegen_place(place={:?}) => {:?}", place, result);
507         result
508     }
509
510     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
511         let tcx = self.cx.tcx();
512         let place_ty = place.ty(self.mir, tcx);
513         self.monomorphize(&place_ty.to_ty(tcx))
514     }
515 }