]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_ssa/mir/place.rs
Remove StaticKind::Promoted
[rust.git] / src / librustc_codegen_ssa / mir / place.rs
1 use super::operand::OperandValue;
2 use super::{FunctionCx, LocalRef};
3
4 use crate::common::IntPredicate;
5 use crate::glue;
6 use crate::traits::*;
7 use crate::MemFlags;
8
9 use rustc::mir;
10 use rustc::mir::tcx::PlaceTy;
11 use rustc::ty::layout::{self, Align, HasTyCtxt, LayoutOf, TyLayout, VariantIdx};
12 use rustc::ty::{self, Ty};
13
14 #[derive(Copy, Clone, Debug)]
15 pub struct PlaceRef<'tcx, V> {
16     /// A pointer to the contents of the place.
17     pub llval: V,
18
19     /// This place's extra data if it is unsized, or `None` if null.
20     pub llextra: Option<V>,
21
22     /// The monomorphized type of this place, including variant information.
23     pub layout: TyLayout<'tcx>,
24
25     /// The alignment we know for this place.
26     pub align: Align,
27 }
28
29 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
30     pub fn new_sized(llval: V, layout: TyLayout<'tcx>) -> PlaceRef<'tcx, V> {
31         assert!(!layout.is_unsized());
32         PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
33     }
34
35     pub fn new_sized_aligned(llval: V, layout: TyLayout<'tcx>, align: Align) -> PlaceRef<'tcx, V> {
36         assert!(!layout.is_unsized());
37         PlaceRef { llval, llextra: None, layout, align }
38     }
39
40     fn new_thin_place<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
41         bx: &mut Bx,
42         llval: V,
43         layout: TyLayout<'tcx>,
44     ) -> PlaceRef<'tcx, V> {
45         assert!(!bx.cx().type_has_metadata(layout.ty));
46         PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
47     }
48
49     // FIXME(eddyb) pass something else for the name so no work is done
50     // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
51     pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
52         bx: &mut Bx,
53         layout: TyLayout<'tcx>,
54     ) -> Self {
55         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
56         let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
57         Self::new_sized(tmp, layout)
58     }
59
60     /// Returns a place for an indirect reference to an unsized place.
61     // FIXME(eddyb) pass something else for the name so no work is done
62     // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
63     pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
64         bx: &mut Bx,
65         layout: TyLayout<'tcx>,
66     ) -> Self {
67         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
68         let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
69         let ptr_layout = bx.cx().layout_of(ptr_ty);
70         Self::alloca(bx, ptr_layout)
71     }
72
73     pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
74         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
75             if self.layout.is_unsized() {
76                 assert_eq!(count, 0);
77                 self.llextra.unwrap()
78             } else {
79                 cx.const_usize(count)
80             }
81         } else {
82             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
83         }
84     }
85 }
86
87 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
88     /// Access a field, at a point when the value's case is known.
89     pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
90         self,
91         bx: &mut Bx,
92         ix: usize,
93     ) -> Self {
94         let field = self.layout.field(bx.cx(), ix);
95         let offset = self.layout.fields.offset(ix);
96         let effective_field_align = self.align.restrict_for_offset(offset);
97
98         let mut simple = || {
99             // Unions and newtypes only use an offset of 0.
100             let llval = if offset.bytes() == 0 {
101                 self.llval
102             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
103                 // Offsets have to match either first or second field.
104                 assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
105                 bx.struct_gep(self.llval, 1)
106             } else {
107                 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
108             };
109             PlaceRef {
110                 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
111                 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
112                 llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
113                 layout: field,
114                 align: effective_field_align,
115             }
116         };
117
118         // Simple cases, which don't need DST adjustment:
119         //   * no metadata available - just log the case
120         //   * known alignment - sized types, `[T]`, `str` or a foreign type
121         //   * packed struct - there is no alignment padding
122         match field.ty.kind {
123             _ if self.llextra.is_none() => {
124                 debug!(
125                     "unsized field `{}`, of `{:?}` has no metadata for adjustment",
126                     ix, self.llval
127                 );
128                 return simple();
129             }
130             _ if !field.is_unsized() => return simple(),
131             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
132             ty::Adt(def, _) => {
133                 if def.repr.packed() {
134                     // FIXME(eddyb) generalize the adjustment when we
135                     // start supporting packing to larger alignments.
136                     assert_eq!(self.layout.align.abi.bytes(), 1);
137                     return simple();
138                 }
139             }
140             _ => {}
141         }
142
143         // We need to get the pointer manually now.
144         // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
145         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
146         // because the field may have an arbitrary alignment in the LLVM representation
147         // anyway.
148         //
149         // To demonstrate:
150         //
151         //     struct Foo<T: ?Sized> {
152         //         x: u16,
153         //         y: T
154         //     }
155         //
156         // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
157         // the `y` field has 16-bit alignment.
158
159         let meta = self.llextra;
160
161         let unaligned_offset = bx.cx().const_usize(offset.bytes());
162
163         // Get the alignment of the field
164         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
165
166         // Bump the unaligned offset up to the appropriate alignment using the
167         // following expression:
168         //
169         //     (unaligned offset + (align - 1)) & -align
170
171         // Calculate offset.
172         let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
173         let and_lhs = bx.add(unaligned_offset, align_sub_1);
174         let and_rhs = bx.neg(unsized_align);
175         let offset = bx.and(and_lhs, and_rhs);
176
177         debug!("struct_field_ptr: DST field offset: {:?}", offset);
178
179         // Cast and adjust pointer.
180         let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
181         let byte_ptr = bx.gep(byte_ptr, &[offset]);
182
183         // Finally, cast back to the type expected.
184         let ll_fty = bx.cx().backend_type(field);
185         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
186
187         PlaceRef {
188             llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
189             llextra: self.llextra,
190             layout: field,
191             align: effective_field_align,
192         }
193     }
194
195     /// Obtain the actual discriminant of a value.
196     pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
197         self,
198         bx: &mut Bx,
199         cast_to: Ty<'tcx>,
200     ) -> V {
201         let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
202         if self.layout.abi.is_uninhabited() {
203             return bx.cx().const_undef(cast_to);
204         }
205         let (discr_scalar, discr_kind, discr_index) = match self.layout.variants {
206             layout::Variants::Single { index } => {
207                 let discr_val = self
208                     .layout
209                     .ty
210                     .discriminant_for_variant(bx.cx().tcx(), index)
211                     .map_or(index.as_u32() as u128, |discr| discr.val);
212                 return bx.cx().const_uint_big(cast_to, discr_val);
213             }
214             layout::Variants::Multiple { ref discr, ref discr_kind, discr_index, .. } => {
215                 (discr, discr_kind, discr_index)
216             }
217         };
218
219         // Read the tag/niche-encoded discriminant from memory.
220         let encoded_discr = self.project_field(bx, discr_index);
221         let encoded_discr = bx.load_operand(encoded_discr);
222
223         // Decode the discriminant (specifically if it's niche-encoded).
224         match *discr_kind {
225             layout::DiscriminantKind::Tag => {
226                 let signed = match discr_scalar.value {
227                     // We use `i1` for bytes that are always `0` or `1`,
228                     // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
229                     // let LLVM interpret the `i1` as signed, because
230                     // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
231                     layout::Int(_, signed) => !discr_scalar.is_bool() && signed,
232                     _ => false,
233                 };
234                 bx.intcast(encoded_discr.immediate(), cast_to, signed)
235             }
236             layout::DiscriminantKind::Niche {
237                 dataful_variant,
238                 ref niche_variants,
239                 niche_start,
240             } => {
241                 // Rebase from niche values to discriminants, and check
242                 // whether the result is in range for the niche variants.
243                 let niche_llty = bx.cx().immediate_backend_type(encoded_discr.layout);
244                 let encoded_discr = encoded_discr.immediate();
245
246                 // We first compute the "relative discriminant" (wrt `niche_variants`),
247                 // that is, if `n = niche_variants.end() - niche_variants.start()`,
248                 // we remap `niche_start..=niche_start + n` (which may wrap around)
249                 // to (non-wrap-around) `0..=n`, to be able to check whether the
250                 // discriminant corresponds to a niche variant with one comparison.
251                 // We also can't go directly to the (variant index) discriminant
252                 // and check that it is in the range `niche_variants`, because
253                 // that might not fit in the same type, on top of needing an extra
254                 // comparison (see also the comment on `let niche_discr`).
255                 let relative_discr = if niche_start == 0 {
256                     // Avoid subtracting `0`, which wouldn't work for pointers.
257                     // FIXME(eddyb) check the actual primitive type here.
258                     encoded_discr
259                 } else {
260                     bx.sub(encoded_discr, bx.cx().const_uint_big(niche_llty, niche_start))
261                 };
262                 let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
263                 let is_niche = {
264                     let relative_max = if relative_max == 0 {
265                         // Avoid calling `const_uint`, which wouldn't work for pointers.
266                         // FIXME(eddyb) check the actual primitive type here.
267                         bx.cx().const_null(niche_llty)
268                     } else {
269                         bx.cx().const_uint(niche_llty, relative_max as u64)
270                     };
271                     bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
272                 };
273
274                 // NOTE(eddyb) this addition needs to be performed on the final
275                 // type, in case the niche itself can't represent all variant
276                 // indices (e.g. `u8` niche with more than `256` variants,
277                 // but enough uninhabited variants so that the remaining variants
278                 // fit in the niche).
279                 // In other words, `niche_variants.end - niche_variants.start`
280                 // is representable in the niche, but `niche_variants.end`
281                 // might not be, in extreme cases.
282                 let niche_discr = {
283                     let relative_discr = if relative_max == 0 {
284                         // HACK(eddyb) since we have only one niche, we know which
285                         // one it is, and we can avoid having a dynamic value here.
286                         bx.cx().const_uint(cast_to, 0)
287                     } else {
288                         bx.intcast(relative_discr, cast_to, false)
289                     };
290                     bx.add(
291                         relative_discr,
292                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
293                     )
294                 };
295
296                 bx.select(
297                     is_niche,
298                     niche_discr,
299                     bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
300                 )
301             }
302         }
303     }
304
305     /// Sets the discriminant for a new value of the given case of the given
306     /// representation.
307     pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
308         &self,
309         bx: &mut Bx,
310         variant_index: VariantIdx,
311     ) {
312         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
313             // We play it safe by using a well-defined `abort`, but we could go for immediate UB
314             // if that turns out to be helpful.
315             bx.abort();
316             return;
317         }
318         match self.layout.variants {
319             layout::Variants::Single { index } => {
320                 assert_eq!(index, variant_index);
321             }
322             layout::Variants::Multiple {
323                 discr_kind: layout::DiscriminantKind::Tag,
324                 discr_index,
325                 ..
326             } => {
327                 let ptr = self.project_field(bx, discr_index);
328                 let to =
329                     self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
330                 bx.store(
331                     bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
332                     ptr.llval,
333                     ptr.align,
334                 );
335             }
336             layout::Variants::Multiple {
337                 discr_kind:
338                     layout::DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start },
339                 discr_index,
340                 ..
341             } => {
342                 if variant_index != dataful_variant {
343                     if bx.cx().sess().target.target.arch == "arm"
344                         || bx.cx().sess().target.target.arch == "aarch64"
345                     {
346                         // FIXME(#34427): as workaround for LLVM bug on ARM,
347                         // use memset of 0 before assigning niche value.
348                         let fill_byte = bx.cx().const_u8(0);
349                         let size = bx.cx().const_usize(self.layout.size.bytes());
350                         bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
351                     }
352
353                     let niche = self.project_field(bx, discr_index);
354                     let niche_llty = bx.cx().immediate_backend_type(niche.layout);
355                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
356                     let niche_value = (niche_value as u128).wrapping_add(niche_start);
357                     // FIXME(eddyb): check the actual primitive type here.
358                     let niche_llval = if niche_value == 0 {
359                         // HACK(eddyb): using `c_null` as it works on all types.
360                         bx.cx().const_null(niche_llty)
361                     } else {
362                         bx.cx().const_uint_big(niche_llty, niche_value)
363                     };
364                     OperandValue::Immediate(niche_llval).store(bx, niche);
365                 }
366             }
367         }
368     }
369
370     pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
371         &self,
372         bx: &mut Bx,
373         llindex: V,
374     ) -> Self {
375         // Statically compute the offset if we can, otherwise just use the element size,
376         // as this will yield the lowest alignment.
377         let layout = self.layout.field(bx, 0);
378         let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
379             layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
380         } else {
381             layout.size
382         };
383
384         PlaceRef {
385             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
386             llextra: None,
387             layout,
388             align: self.align.restrict_for_offset(offset),
389         }
390     }
391
392     pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
393         &self,
394         bx: &mut Bx,
395         variant_index: VariantIdx,
396     ) -> Self {
397         let mut downcast = *self;
398         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
399
400         // Cast to the appropriate variant struct type.
401         let variant_ty = bx.cx().backend_type(downcast.layout);
402         downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
403
404         downcast
405     }
406
407     pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
408         bx.lifetime_start(self.llval, self.layout.size);
409     }
410
411     pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
412         bx.lifetime_end(self.llval, self.layout.size);
413     }
414 }
415
416 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
417     pub fn codegen_place(
418         &mut self,
419         bx: &mut Bx,
420         place_ref: &mir::PlaceRef<'_, 'tcx>,
421     ) -> PlaceRef<'tcx, Bx::Value> {
422         debug!("codegen_place(place_ref={:?})", place_ref);
423         let cx = self.cx;
424         let tcx = self.cx.tcx();
425
426         let result = match place_ref {
427             mir::PlaceRef { base: mir::PlaceBase::Local(index), projection: [] } => {
428                 match self.locals[*index] {
429                     LocalRef::Place(place) => {
430                         return place;
431                     }
432                     LocalRef::UnsizedPlace(place) => {
433                         return bx.load_operand(place).deref(cx);
434                     }
435                     LocalRef::Operand(..) => {
436                         bug!("using operand local {:?} as place", place_ref);
437                     }
438                 }
439             }
440             mir::PlaceRef {
441                 base:
442                     mir::PlaceBase::Static(box mir::Static {
443                         ty,
444                         kind: mir::StaticKind::Static,
445                         def_id,
446                     }),
447                 projection: [],
448             } => {
449                 // NB: The layout of a static may be unsized as is the case when working
450                 // with a static that is an extern_type.
451                 let layout = cx.layout_of(self.monomorphize(&ty));
452                 let static_ = bx.get_static(*def_id);
453                 PlaceRef::new_thin_place(bx, static_, layout)
454             }
455             mir::PlaceRef { base, projection: [proj_base @ .., mir::ProjectionElem::Deref] } => {
456                 // Load the pointer from its location.
457                 self.codegen_consume(bx, &mir::PlaceRef { base, projection: proj_base })
458                     .deref(bx.cx())
459             }
460             mir::PlaceRef { base, projection: [proj_base @ .., elem] } => {
461                 // FIXME turn this recursion into iteration
462                 let cg_base =
463                     self.codegen_place(bx, &mir::PlaceRef { base, projection: proj_base });
464
465                 match elem {
466                     mir::ProjectionElem::Deref => bug!(),
467                     mir::ProjectionElem::Field(ref field, _) => {
468                         cg_base.project_field(bx, field.index())
469                     }
470                     mir::ProjectionElem::Index(index) => {
471                         let index = &mir::Operand::Copy(mir::Place::from(*index));
472                         let index = self.codegen_operand(bx, index);
473                         let llindex = index.immediate();
474                         cg_base.project_index(bx, llindex)
475                     }
476                     mir::ProjectionElem::ConstantIndex {
477                         offset,
478                         from_end: false,
479                         min_length: _,
480                     } => {
481                         let lloffset = bx.cx().const_usize(*offset as u64);
482                         cg_base.project_index(bx, lloffset)
483                     }
484                     mir::ProjectionElem::ConstantIndex {
485                         offset,
486                         from_end: true,
487                         min_length: _,
488                     } => {
489                         let lloffset = bx.cx().const_usize(*offset as u64);
490                         let lllen = cg_base.len(bx.cx());
491                         let llindex = bx.sub(lllen, lloffset);
492                         cg_base.project_index(bx, llindex)
493                     }
494                     mir::ProjectionElem::Subslice { from, to, from_end } => {
495                         let mut subslice =
496                             cg_base.project_index(bx, bx.cx().const_usize(*from as u64));
497                         let projected_ty =
498                             PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, elem).ty;
499                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
500
501                         if subslice.layout.is_unsized() {
502                             assert!(from_end, "slice subslices should be `from_end`");
503                             subslice.llextra = Some(bx.sub(
504                                 cg_base.llextra.unwrap(),
505                                 bx.cx().const_usize((*from as u64) + (*to as u64)),
506                             ));
507                         }
508
509                         // Cast the place pointer type to the new
510                         // array or slice type (`*[%_; new_len]`).
511                         subslice.llval = bx.pointercast(
512                             subslice.llval,
513                             bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
514                         );
515
516                         subslice
517                     }
518                     mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, *v),
519                 }
520             }
521         };
522         debug!("codegen_place(place={:?}) => {:?}", place_ref, result);
523         result
524     }
525
526     pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
527         let tcx = self.cx.tcx();
528         let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, *self.mir, tcx);
529         self.monomorphize(&place_ty.ty)
530     }
531 }