]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_ssa/mir/place.rs
Auto merge of #67020 - pnkfelix:issue-59535-accumulate-past-lto-imports, r=mw
[rust.git] / src / librustc_codegen_ssa / mir / place.rs
1 use super::{FunctionCx, LocalRef};
2 use super::operand::OperandValue;
3
4 use crate::MemFlags;
5 use crate::common::IntPredicate;
6 use crate::glue;
7 use crate::traits::*;
8
9 use rustc::ty::{self, Instance, Ty};
10 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
11 use rustc::mir;
12 use rustc::mir::tcx::PlaceTy;
13
14 #[derive(Copy, Clone, Debug)]
15 pub struct PlaceRef<'tcx, V> {
16     /// A pointer to the contents of the place.
17     pub llval: V,
18
19     /// This place's extra data if it is unsized, or `None` if null.
20     pub llextra: Option<V>,
21
22     /// The monomorphized type of this place, including variant information.
23     pub layout: TyLayout<'tcx>,
24
25     /// The alignment we know for this place.
26     pub align: Align,
27 }
28
29 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
30     pub fn new_sized(
31         llval: V,
32         layout: TyLayout<'tcx>,
33     ) -> PlaceRef<'tcx, V> {
34         assert!(!layout.is_unsized());
35         PlaceRef {
36             llval,
37             llextra: None,
38             layout,
39             align: layout.align.abi
40         }
41     }
42
43     pub fn new_sized_aligned(
44         llval: V,
45         layout: TyLayout<'tcx>,
46         align: Align,
47     ) -> PlaceRef<'tcx, V> {
48         assert!(!layout.is_unsized());
49         PlaceRef {
50             llval,
51             llextra: None,
52             layout,
53             align
54         }
55     }
56
57     fn new_thin_place<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
58         bx: &mut Bx,
59         llval: V,
60         layout: TyLayout<'tcx>,
61     ) -> PlaceRef<'tcx, V> {
62         assert!(!bx.cx().type_has_metadata(layout.ty));
63         PlaceRef {
64             llval,
65             llextra: None,
66             layout,
67             align: layout.align.abi
68         }
69     }
70
71     // FIXME(eddyb) pass something else for the name so no work is done
72     // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
73     pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
74         bx: &mut Bx,
75         layout: TyLayout<'tcx>,
76     ) -> Self {
77         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
78         let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
79         Self::new_sized(tmp, layout)
80     }
81
82     /// Returns a place for an indirect reference to an unsized place.
83     // FIXME(eddyb) pass something else for the name so no work is done
84     // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
85     pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
86         bx: &mut Bx,
87         layout: TyLayout<'tcx>,
88     ) -> Self {
89         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
90         let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
91         let ptr_layout = bx.cx().layout_of(ptr_ty);
92         Self::alloca(bx, ptr_layout)
93     }
94
95     pub fn len<Cx: ConstMethods<'tcx, Value = V>>(
96         &self,
97         cx: &Cx
98     ) -> V {
99         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
100             if self.layout.is_unsized() {
101                 assert_eq!(count, 0);
102                 self.llextra.unwrap()
103             } else {
104                 cx.const_usize(count)
105             }
106         } else {
107             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
108         }
109     }
110 }
111
112 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
113     /// Access a field, at a point when the value's case is known.
114     pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
115         self, bx: &mut Bx,
116         ix: usize,
117     ) -> Self {
118         let field = self.layout.field(bx.cx(), ix);
119         let offset = self.layout.fields.offset(ix);
120         let effective_field_align = self.align.restrict_for_offset(offset);
121
122         let mut simple = || {
123             // Unions and newtypes only use an offset of 0.
124             let llval = if offset.bytes() == 0 {
125                 self.llval
126             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
127                 // Offsets have to match either first or second field.
128                 assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
129                 bx.struct_gep(self.llval, 1)
130             } else {
131                 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
132             };
133             PlaceRef {
134                 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
135                 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
136                 llextra: if bx.cx().type_has_metadata(field.ty) {
137                     self.llextra
138                 } else {
139                     None
140                 },
141                 layout: field,
142                 align: effective_field_align,
143             }
144         };
145
146         // Simple cases, which don't need DST adjustment:
147         //   * no metadata available - just log the case
148         //   * known alignment - sized types, `[T]`, `str` or a foreign type
149         //   * packed struct - there is no alignment padding
150         match field.ty.kind {
151             _ if self.llextra.is_none() => {
152                 debug!("unsized field `{}`, of `{:?}` has no metadata for adjustment",
153                     ix, self.llval);
154                 return simple();
155             }
156             _ if !field.is_unsized() => return simple(),
157             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
158             ty::Adt(def, _) => {
159                 if def.repr.packed() {
160                     // FIXME(eddyb) generalize the adjustment when we
161                     // start supporting packing to larger alignments.
162                     assert_eq!(self.layout.align.abi.bytes(), 1);
163                     return simple();
164                 }
165             }
166             _ => {}
167         }
168
169         // We need to get the pointer manually now.
170         // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
171         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
172         // because the field may have an arbitrary alignment in the LLVM representation
173         // anyway.
174         //
175         // To demonstrate:
176         //
177         //     struct Foo<T: ?Sized> {
178         //         x: u16,
179         //         y: T
180         //     }
181         //
182         // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
183         // the `y` field has 16-bit alignment.
184
185         let meta = self.llextra;
186
187         let unaligned_offset = bx.cx().const_usize(offset.bytes());
188
189         // Get the alignment of the field
190         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
191
192         // Bump the unaligned offset up to the appropriate alignment using the
193         // following expression:
194         //
195         //     (unaligned offset + (align - 1)) & -align
196
197         // Calculate offset.
198         let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
199         let and_lhs = bx.add(unaligned_offset, align_sub_1);
200         let and_rhs = bx.neg(unsized_align);
201         let offset = bx.and(and_lhs, and_rhs);
202
203         debug!("struct_field_ptr: DST field offset: {:?}", offset);
204
205         // Cast and adjust pointer.
206         let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
207         let byte_ptr = bx.gep(byte_ptr, &[offset]);
208
209         // Finally, cast back to the type expected.
210         let ll_fty = bx.cx().backend_type(field);
211         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
212
213         PlaceRef {
214             llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
215             llextra: self.llextra,
216             layout: field,
217             align: effective_field_align,
218         }
219     }
220
221     /// Obtain the actual discriminant of a value.
222     pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
223         self,
224         bx: &mut Bx,
225         cast_to: Ty<'tcx>
226     ) -> V {
227         let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
228         if self.layout.abi.is_uninhabited() {
229             return bx.cx().const_undef(cast_to);
230         }
231         let (discr_scalar, discr_kind, discr_index) = match self.layout.variants {
232             layout::Variants::Single { index } => {
233                 let discr_val = self.layout.ty.discriminant_for_variant(bx.cx().tcx(), index)
234                     .map_or(index.as_u32() as u128, |discr| discr.val);
235                 return bx.cx().const_uint_big(cast_to, discr_val);
236             }
237             layout::Variants::Multiple { ref discr, ref discr_kind, discr_index, .. } => {
238                 (discr, discr_kind, discr_index)
239             }
240         };
241
242         // Read the tag/niche-encoded discriminant from memory.
243         let encoded_discr = self.project_field(bx, discr_index);
244         let encoded_discr = bx.load_operand(encoded_discr);
245
246         // Decode the discriminant (specifically if it's niche-encoded).
247         match *discr_kind {
248             layout::DiscriminantKind::Tag => {
249                 let signed = match discr_scalar.value {
250                     // We use `i1` for bytes that are always `0` or `1`,
251                     // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
252                     // let LLVM interpret the `i1` as signed, because
253                     // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
254                     layout::Int(_, signed) => !discr_scalar.is_bool() && signed,
255                     _ => false
256                 };
257                 bx.intcast(encoded_discr.immediate(), cast_to, signed)
258             }
259             layout::DiscriminantKind::Niche {
260                 dataful_variant,
261                 ref niche_variants,
262                 niche_start,
263             } => {
264                 // Rebase from niche values to discriminants, and check
265                 // whether the result is in range for the niche variants.
266                 let niche_llty = bx.cx().immediate_backend_type(encoded_discr.layout);
267                 let encoded_discr = encoded_discr.immediate();
268
269                 // We first compute the "relative discriminant" (wrt `niche_variants`),
270                 // that is, if `n = niche_variants.end() - niche_variants.start()`,
271                 // we remap `niche_start..=niche_start + n` (which may wrap around)
272                 // to (non-wrap-around) `0..=n`, to be able to check whether the
273                 // discriminant corresponds to a niche variant with one comparison.
274                 // We also can't go directly to the (variant index) discriminant
275                 // and check that it is in the range `niche_variants`, because
276                 // that might not fit in the same type, on top of needing an extra
277                 // comparison (see also the comment on `let niche_discr`).
278                 let relative_discr = if niche_start == 0 {
279                     // Avoid subtracting `0`, which wouldn't work for pointers.
280                     // FIXME(eddyb) check the actual primitive type here.
281                     encoded_discr
282                 } else {
283                     bx.sub(encoded_discr, bx.cx().const_uint_big(niche_llty, niche_start))
284                 };
285                 let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
286                 let is_niche = {
287                     let relative_max = if relative_max == 0 {
288                         // Avoid calling `const_uint`, which wouldn't work for pointers.
289                         // FIXME(eddyb) check the actual primitive type here.
290                         bx.cx().const_null(niche_llty)
291                     } else {
292                         bx.cx().const_uint(niche_llty, relative_max as u64)
293                     };
294                     bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
295                 };
296
297                 // NOTE(eddyb) this addition needs to be performed on the final
298                 // type, in case the niche itself can't represent all variant
299                 // indices (e.g. `u8` niche with more than `256` variants,
300                 // but enough uninhabited variants so that the remaining variants
301                 // fit in the niche).
302                 // In other words, `niche_variants.end - niche_variants.start`
303                 // is representable in the niche, but `niche_variants.end`
304                 // might not be, in extreme cases.
305                 let niche_discr = {
306                     let relative_discr = if relative_max == 0 {
307                         // HACK(eddyb) since we have only one niche, we know which
308                         // one it is, and we can avoid having a dynamic value here.
309                         bx.cx().const_uint(cast_to, 0)
310                     } else {
311                         bx.intcast(relative_discr, cast_to, false)
312                     };
313                     bx.add(
314                         relative_discr,
315                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
316                     )
317                 };
318
319                 bx.select(
320                     is_niche,
321                     niche_discr,
322                     bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
323                 )
324             }
325         }
326     }
327
328     /// Sets the discriminant for a new value of the given case of the given
329     /// representation.
330     pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
331         &self,
332         bx: &mut Bx,
333         variant_index: VariantIdx
334     ) {
335         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
336             // We play it safe by using a well-defined `abort`, but we could go for immediate UB
337             // if that turns out to be helpful.
338             bx.abort();
339             return;
340         }
341         match self.layout.variants {
342             layout::Variants::Single { index } => {
343                 assert_eq!(index, variant_index);
344             }
345             layout::Variants::Multiple {
346                 discr_kind: layout::DiscriminantKind::Tag,
347                 discr_index,
348                 ..
349             } => {
350                 let ptr = self.project_field(bx, discr_index);
351                 let to =
352                     self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
353                 bx.store(
354                     bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
355                     ptr.llval,
356                     ptr.align);
357             }
358             layout::Variants::Multiple {
359                 discr_kind: layout::DiscriminantKind::Niche {
360                     dataful_variant,
361                     ref niche_variants,
362                     niche_start,
363                 },
364                 discr_index,
365                 ..
366             } => {
367                 if variant_index != dataful_variant {
368                     if bx.cx().sess().target.target.arch == "arm" ||
369                        bx.cx().sess().target.target.arch == "aarch64" {
370                         // FIXME(#34427): as workaround for LLVM bug on ARM,
371                         // use memset of 0 before assigning niche value.
372                         let fill_byte = bx.cx().const_u8(0);
373                         let size = bx.cx().const_usize(self.layout.size.bytes());
374                         bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
375                     }
376
377                     let niche = self.project_field(bx, discr_index);
378                     let niche_llty = bx.cx().immediate_backend_type(niche.layout);
379                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
380                     let niche_value = (niche_value as u128)
381                         .wrapping_add(niche_start);
382                     // FIXME(eddyb): check the actual primitive type here.
383                     let niche_llval = if niche_value == 0 {
384                         // HACK(eddyb): using `c_null` as it works on all types.
385                         bx.cx().const_null(niche_llty)
386                     } else {
387                         bx.cx().const_uint_big(niche_llty, niche_value)
388                     };
389                     OperandValue::Immediate(niche_llval).store(bx, niche);
390                 }
391             }
392         }
393     }
394
395     pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
396         &self,
397         bx: &mut Bx,
398         llindex: V
399     ) -> Self {
400         // Statically compute the offset if we can, otherwise just use the element size,
401         // as this will yield the lowest alignment.
402         let layout = self.layout.field(bx, 0);
403         let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
404             layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
405         } else {
406             layout.size
407         };
408
409         PlaceRef {
410             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
411             llextra: None,
412             layout,
413             align: self.align.restrict_for_offset(offset),
414         }
415     }
416
417     pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
418         &self,
419         bx: &mut Bx,
420         variant_index: VariantIdx
421     ) -> Self {
422         let mut downcast = *self;
423         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
424
425         // Cast to the appropriate variant struct type.
426         let variant_ty = bx.cx().backend_type(downcast.layout);
427         downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
428
429         downcast
430     }
431
432     pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
433         bx.lifetime_start(self.llval, self.layout.size);
434     }
435
436     pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
437         bx.lifetime_end(self.llval, self.layout.size);
438     }
439 }
440
441 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
442     pub fn codegen_place(
443         &mut self,
444         bx: &mut Bx,
445         place_ref: &mir::PlaceRef<'_, 'tcx>
446     ) -> PlaceRef<'tcx, Bx::Value> {
447         debug!("codegen_place(place_ref={:?})", place_ref);
448         let cx = self.cx;
449         let tcx = self.cx.tcx();
450
451         let result = match place_ref {
452             mir::PlaceRef {
453                 base: mir::PlaceBase::Local(index),
454                 projection: [],
455             } => {
456                 match self.locals[*index] {
457                     LocalRef::Place(place) => {
458                         return place;
459                     }
460                     LocalRef::UnsizedPlace(place) => {
461                         return bx.load_operand(place).deref(cx);
462                     }
463                     LocalRef::Operand(..) => {
464                         bug!("using operand local {:?} as place", place_ref);
465                     }
466                 }
467             }
468             mir::PlaceRef {
469                 base: mir::PlaceBase::Static(box mir::Static {
470                     ty,
471                     kind: mir::StaticKind::Promoted(promoted, substs),
472                     def_id,
473                 }),
474                 projection: [],
475             } => {
476                 let param_env = ty::ParamEnv::reveal_all();
477                 let instance = Instance::new(*def_id, self.monomorphize(substs));
478                 let cid = mir::interpret::GlobalId {
479                     instance: instance,
480                     promoted: Some(*promoted),
481                 };
482                 let layout = cx.layout_of(self.monomorphize(&ty));
483                 match bx.tcx().const_eval(param_env.and(cid)) {
484                     Ok(val) => match val.val {
485                         ty::ConstKind::Value(mir::interpret::ConstValue::ByRef {
486                             alloc, offset
487                         }) => {
488                             bx.cx().from_const_alloc(layout, alloc, offset)
489                         }
490                         _ => bug!("promoteds should have an allocation: {:?}", val),
491                     },
492                     Err(_) => {
493                         // This is unreachable as long as runtime
494                         // and compile-time agree perfectly.
495                         // With floats that won't always be true,
496                         // so we generate a (safe) abort.
497                         bx.abort();
498                         // We still have to return a place but it doesn't matter,
499                         // this code is unreachable.
500                         let llval = bx.cx().const_undef(
501                             bx.cx().type_ptr_to(bx.cx().backend_type(layout))
502                         );
503                         PlaceRef::new_sized(llval, layout)
504                     }
505                 }
506             }
507             mir::PlaceRef {
508                 base: mir::PlaceBase::Static(box mir::Static {
509                     ty,
510                     kind: mir::StaticKind::Static,
511                     def_id,
512                 }),
513                 projection: [],
514             } => {
515                 // NB: The layout of a static may be unsized as is the case when working
516                 // with a static that is an extern_type.
517                 let layout = cx.layout_of(self.monomorphize(&ty));
518                 let static_ = bx.get_static(*def_id);
519                 PlaceRef::new_thin_place(bx, static_, layout)
520             },
521             mir::PlaceRef {
522                 base,
523                 projection: [proj_base @ .., mir::ProjectionElem::Deref],
524             } => {
525                 // Load the pointer from its location.
526                 self.codegen_consume(bx, &mir::PlaceRef {
527                     base,
528                     projection: proj_base,
529                 }).deref(bx.cx())
530             }
531             mir::PlaceRef {
532                 base,
533                 projection: [proj_base @ .., elem],
534             } => {
535                 // FIXME turn this recursion into iteration
536                 let cg_base = self.codegen_place(bx, &mir::PlaceRef {
537                     base,
538                     projection: proj_base,
539                 });
540
541                 match elem {
542                     mir::ProjectionElem::Deref => bug!(),
543                     mir::ProjectionElem::Field(ref field, _) => {
544                         cg_base.project_field(bx, field.index())
545                     }
546                     mir::ProjectionElem::Index(index) => {
547                         let index = &mir::Operand::Copy(
548                             mir::Place::from(*index)
549                         );
550                         let index = self.codegen_operand(bx, index);
551                         let llindex = index.immediate();
552                         cg_base.project_index(bx, llindex)
553                     }
554                     mir::ProjectionElem::ConstantIndex { offset,
555                                                          from_end: false,
556                                                          min_length: _ } => {
557                         let lloffset = bx.cx().const_usize(*offset as u64);
558                         cg_base.project_index(bx, lloffset)
559                     }
560                     mir::ProjectionElem::ConstantIndex { offset,
561                                                          from_end: true,
562                                                          min_length: _ } => {
563                         let lloffset = bx.cx().const_usize(*offset as u64);
564                         let lllen = cg_base.len(bx.cx());
565                         let llindex = bx.sub(lllen, lloffset);
566                         cg_base.project_index(bx, llindex)
567                     }
568                     mir::ProjectionElem::Subslice { from, to, from_end } => {
569                         let mut subslice = cg_base.project_index(bx,
570                             bx.cx().const_usize(*from as u64));
571                         let projected_ty = PlaceTy::from_ty(cg_base.layout.ty)
572                             .projection_ty(tcx, elem).ty;
573                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
574
575                         if subslice.layout.is_unsized() {
576                             assert!(from_end, "slice subslices should be `from_end`");
577                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
578                                 bx.cx().const_usize((*from as u64) + (*to as u64))));
579                         }
580
581                         // Cast the place pointer type to the new
582                         // array or slice type (`*[%_; new_len]`).
583                         subslice.llval = bx.pointercast(subslice.llval,
584                             bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
585
586                         subslice
587                     }
588                     mir::ProjectionElem::Downcast(_, v) => {
589                         cg_base.project_downcast(bx, *v)
590                     }
591                 }
592             }
593         };
594         debug!("codegen_place(place={:?}) => {:?}", place_ref, result);
595         result
596     }
597
598     pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
599         let tcx = self.cx.tcx();
600         let place_ty = mir::Place::ty_from(
601             place_ref.base,
602             place_ref.projection,
603             *self.mir,
604             tcx,
605         );
606         self.monomorphize(&place_ty.ty)
607     }
608 }