]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_ssa/mir/place.rs
Move def_id out add substsref
[rust.git] / src / librustc_codegen_ssa / mir / place.rs
1 use rustc::ty::{self, Instance, Ty};
2 use rustc::ty::subst::Subst;
3 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
4 use rustc::mir;
5 use rustc::mir::tcx::PlaceTy;
6 use crate::MemFlags;
7 use crate::common::IntPredicate;
8 use crate::glue;
9
10 use crate::traits::*;
11
12 use super::{FunctionCx, LocalRef};
13 use super::operand::OperandValue;
14
15 #[derive(Copy, Clone, Debug)]
16 pub struct PlaceRef<'tcx, V> {
17     /// Pointer to the contents of the place.
18     pub llval: V,
19
20     /// This place's extra data if it is unsized, or null.
21     pub llextra: Option<V>,
22
23     /// Monomorphized type of this place, including variant information.
24     pub layout: TyLayout<'tcx>,
25
26     /// What alignment we know for this place.
27     pub align: Align,
28 }
29
30 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
31     pub fn new_sized(
32         llval: V,
33         layout: TyLayout<'tcx>,
34         align: Align,
35     ) -> PlaceRef<'tcx, V> {
36         assert!(!layout.is_unsized());
37         PlaceRef {
38             llval,
39             llextra: None,
40             layout,
41             align
42         }
43     }
44
45     fn new_thin_place<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
46         bx: &mut Bx,
47         llval: V,
48         layout: TyLayout<'tcx>,
49         align: Align,
50     ) -> PlaceRef<'tcx, V> {
51         assert!(!bx.cx().type_has_metadata(layout.ty));
52         PlaceRef {
53             llval,
54             llextra: None,
55             layout,
56             align
57         }
58     }
59
60     pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
61         bx: &mut Bx,
62         layout: TyLayout<'tcx>,
63         name: &str
64     ) -> Self {
65         debug!("alloca({:?}: {:?})", name, layout);
66         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
67         let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
68         Self::new_sized(tmp, layout, layout.align.abi)
69     }
70
71     /// Returns a place for an indirect reference to an unsized place.
72     pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
73         bx: &mut Bx,
74         layout: TyLayout<'tcx>,
75         name: &str,
76     ) -> Self {
77         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
78         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
79         let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
80         let ptr_layout = bx.cx().layout_of(ptr_ty);
81         Self::alloca(bx, ptr_layout, name)
82     }
83
84     pub fn len<Cx: ConstMethods<'tcx, Value = V>>(
85         &self,
86         cx: &Cx
87     ) -> V {
88         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
89             if self.layout.is_unsized() {
90                 assert_eq!(count, 0);
91                 self.llextra.unwrap()
92             } else {
93                 cx.const_usize(count)
94             }
95         } else {
96             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
97         }
98     }
99
100 }
101
102 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
103     /// Access a field, at a point when the value's case is known.
104     pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
105         self, bx: &mut Bx,
106         ix: usize,
107     ) -> Self {
108         let field = self.layout.field(bx.cx(), ix);
109         let offset = self.layout.fields.offset(ix);
110         let effective_field_align = self.align.restrict_for_offset(offset);
111
112         let mut simple = || {
113             // Unions and newtypes only use an offset of 0.
114             let llval = if offset.bytes() == 0 {
115                 self.llval
116             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
117                 // Offsets have to match either first or second field.
118                 assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
119                 bx.struct_gep(self.llval, 1)
120             } else {
121                 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
122             };
123             PlaceRef {
124                 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
125                 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
126                 llextra: if bx.cx().type_has_metadata(field.ty) {
127                     self.llextra
128                 } else {
129                     None
130                 },
131                 layout: field,
132                 align: effective_field_align,
133             }
134         };
135
136         // Simple cases, which don't need DST adjustment:
137         //   * no metadata available - just log the case
138         //   * known alignment - sized types, `[T]`, `str` or a foreign type
139         //   * packed struct - there is no alignment padding
140         match field.ty.sty {
141             _ if self.llextra.is_none() => {
142                 debug!("unsized field `{}`, of `{:?}` has no metadata for adjustment",
143                     ix, self.llval);
144                 return simple();
145             }
146             _ if !field.is_unsized() => return simple(),
147             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
148             ty::Adt(def, _) => {
149                 if def.repr.packed() {
150                     // FIXME(eddyb) generalize the adjustment when we
151                     // start supporting packing to larger alignments.
152                     assert_eq!(self.layout.align.abi.bytes(), 1);
153                     return simple();
154                 }
155             }
156             _ => {}
157         }
158
159         // We need to get the pointer manually now.
160         // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
161         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
162         // because the field may have an arbitrary alignment in the LLVM representation
163         // anyway.
164         //
165         // To demonstrate:
166         //
167         //     struct Foo<T: ?Sized> {
168         //         x: u16,
169         //         y: T
170         //     }
171         //
172         // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
173         // the `y` field has 16-bit alignment.
174
175         let meta = self.llextra;
176
177         let unaligned_offset = bx.cx().const_usize(offset.bytes());
178
179         // Get the alignment of the field
180         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
181
182         // Bump the unaligned offset up to the appropriate alignment using the
183         // following expression:
184         //
185         //     (unaligned offset + (align - 1)) & -align
186
187         // Calculate offset.
188         let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
189         let and_lhs = bx.add(unaligned_offset, align_sub_1);
190         let and_rhs = bx.neg(unsized_align);
191         let offset = bx.and(and_lhs, and_rhs);
192
193         debug!("struct_field_ptr: DST field offset: {:?}", offset);
194
195         // Cast and adjust pointer.
196         let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
197         let byte_ptr = bx.gep(byte_ptr, &[offset]);
198
199         // Finally, cast back to the type expected.
200         let ll_fty = bx.cx().backend_type(field);
201         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
202
203         PlaceRef {
204             llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
205             llextra: self.llextra,
206             layout: field,
207             align: effective_field_align,
208         }
209     }
210
211     /// Obtain the actual discriminant of a value.
212     pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
213         self,
214         bx: &mut Bx,
215         cast_to: Ty<'tcx>
216     ) -> V {
217         let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
218         if self.layout.abi.is_uninhabited() {
219             return bx.cx().const_undef(cast_to);
220         }
221         let (discr_scalar, discr_kind, discr_index) = match self.layout.variants {
222             layout::Variants::Single { index } => {
223                 let discr_val = self.layout.ty.discriminant_for_variant(bx.cx().tcx(), index)
224                     .map_or(index.as_u32() as u128, |discr| discr.val);
225                 return bx.cx().const_uint_big(cast_to, discr_val);
226             }
227             layout::Variants::Multiple { ref discr, ref discr_kind, discr_index, .. } => {
228                 (discr, discr_kind, discr_index)
229             }
230         };
231
232         // Read the tag/niche-encoded discriminant from memory.
233         let encoded_discr = self.project_field(bx, discr_index);
234         let encoded_discr = bx.load_operand(encoded_discr);
235
236         // Decode the discriminant (specifically if it's niche-encoded).
237         match *discr_kind {
238             layout::DiscriminantKind::Tag => {
239                 let signed = match discr_scalar.value {
240                     // We use `i1` for bytes that are always `0` or `1`,
241                     // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
242                     // let LLVM interpret the `i1` as signed, because
243                     // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
244                     layout::Int(_, signed) => !discr_scalar.is_bool() && signed,
245                     _ => false
246                 };
247                 bx.intcast(encoded_discr.immediate(), cast_to, signed)
248             }
249             layout::DiscriminantKind::Niche {
250                 dataful_variant,
251                 ref niche_variants,
252                 niche_start,
253             } => {
254                 // Rebase from niche values to discriminants, and check
255                 // whether the result is in range for the niche variants.
256                 let niche_llty = bx.cx().immediate_backend_type(encoded_discr.layout);
257                 let encoded_discr = encoded_discr.immediate();
258
259                 // We first compute the "relative discriminant" (wrt `niche_variants`),
260                 // that is, if `n = niche_variants.end() - niche_variants.start()`,
261                 // we remap `niche_start..=niche_start + n` (which may wrap around)
262                 // to (non-wrap-around) `0..=n`, to be able to check whether the
263                 // discriminant corresponds to a niche variant with one comparison.
264                 // We also can't go directly to the (variant index) discriminant
265                 // and check that it is in the range `niche_variants`, because
266                 // that might not fit in the same type, on top of needing an extra
267                 // comparison (see also the comment on `let niche_discr`).
268                 let relative_discr = if niche_start == 0 {
269                     // Avoid subtracting `0`, which wouldn't work for pointers.
270                     // FIXME(eddyb) check the actual primitive type here.
271                     encoded_discr
272                 } else {
273                     bx.sub(encoded_discr, bx.cx().const_uint_big(niche_llty, niche_start))
274                 };
275                 let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
276                 let is_niche = {
277                     let relative_max = if relative_max == 0 {
278                         // Avoid calling `const_uint`, which wouldn't work for pointers.
279                         // FIXME(eddyb) check the actual primitive type here.
280                         bx.cx().const_null(niche_llty)
281                     } else {
282                         bx.cx().const_uint(niche_llty, relative_max as u64)
283                     };
284                     bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
285                 };
286
287                 // NOTE(eddyb) this addition needs to be performed on the final
288                 // type, in case the niche itself can't represent all variant
289                 // indices (e.g. `u8` niche with more than `256` variants,
290                 // but enough uninhabited variants so that the remaining variants
291                 // fit in the niche).
292                 // In other words, `niche_variants.end - niche_variants.start`
293                 // is representable in the niche, but `niche_variants.end`
294                 // might not be, in extreme cases.
295                 let niche_discr = {
296                     let relative_discr = if relative_max == 0 {
297                         // HACK(eddyb) since we have only one niche, we know which
298                         // one it is, and we can avoid having a dynamic value here.
299                         bx.cx().const_uint(cast_to, 0)
300                     } else {
301                         bx.intcast(relative_discr, cast_to, false)
302                     };
303                     bx.add(
304                         relative_discr,
305                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
306                     )
307                 };
308
309                 bx.select(
310                     is_niche,
311                     niche_discr,
312                     bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
313                 )
314             }
315         }
316     }
317
318     /// Sets the discriminant for a new value of the given case of the given
319     /// representation.
320     pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
321         &self,
322         bx: &mut Bx,
323         variant_index: VariantIdx
324     ) {
325         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
326             return;
327         }
328         match self.layout.variants {
329             layout::Variants::Single { index } => {
330                 assert_eq!(index, variant_index);
331             }
332             layout::Variants::Multiple {
333                 discr_kind: layout::DiscriminantKind::Tag,
334                 discr_index,
335                 ..
336             } => {
337                 let ptr = self.project_field(bx, discr_index);
338                 let to =
339                     self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
340                 bx.store(
341                     bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
342                     ptr.llval,
343                     ptr.align);
344             }
345             layout::Variants::Multiple {
346                 discr_kind: layout::DiscriminantKind::Niche {
347                     dataful_variant,
348                     ref niche_variants,
349                     niche_start,
350                 },
351                 discr_index,
352                 ..
353             } => {
354                 if variant_index != dataful_variant {
355                     if bx.cx().sess().target.target.arch == "arm" ||
356                        bx.cx().sess().target.target.arch == "aarch64" {
357                         // FIXME(#34427): as workaround for LLVM bug on ARM,
358                         // use memset of 0 before assigning niche value.
359                         let fill_byte = bx.cx().const_u8(0);
360                         let size = bx.cx().const_usize(self.layout.size.bytes());
361                         bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
362                     }
363
364                     let niche = self.project_field(bx, discr_index);
365                     let niche_llty = bx.cx().immediate_backend_type(niche.layout);
366                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
367                     let niche_value = (niche_value as u128)
368                         .wrapping_add(niche_start);
369                     // FIXME(eddyb): check the actual primitive type here.
370                     let niche_llval = if niche_value == 0 {
371                         // HACK(eddyb): using `c_null` as it works on all types.
372                         bx.cx().const_null(niche_llty)
373                     } else {
374                         bx.cx().const_uint_big(niche_llty, niche_value)
375                     };
376                     OperandValue::Immediate(niche_llval).store(bx, niche);
377                 }
378             }
379         }
380     }
381
382     pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
383         &self,
384         bx: &mut Bx,
385         llindex: V
386     ) -> Self {
387         // Statically compute the offset if we can, otherwise just use the element size,
388         // as this will yield the lowest alignment.
389         let layout = self.layout.field(bx, 0);
390         let offset = if bx.is_const_integral(llindex) {
391             layout.size.checked_mul(bx.const_to_uint(llindex), bx).unwrap_or(layout.size)
392         } else {
393             layout.size
394         };
395
396         PlaceRef {
397             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
398             llextra: None,
399             layout,
400             align: self.align.restrict_for_offset(offset),
401         }
402     }
403
404     pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
405         &self,
406         bx: &mut Bx,
407         variant_index: VariantIdx
408     ) -> Self {
409         let mut downcast = *self;
410         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
411
412         // Cast to the appropriate variant struct type.
413         let variant_ty = bx.cx().backend_type(downcast.layout);
414         downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
415
416         downcast
417     }
418
419     pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
420         bx.lifetime_start(self.llval, self.layout.size);
421     }
422
423     pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
424         bx.lifetime_end(self.llval, self.layout.size);
425     }
426 }
427
428 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
429     pub fn codegen_place(
430         &mut self,
431         bx: &mut Bx,
432         place_ref: &mir::PlaceRef<'_, 'tcx>
433     ) -> PlaceRef<'tcx, Bx::Value> {
434         debug!("codegen_place(place_ref={:?})", place_ref);
435         let cx = self.cx;
436         let tcx = self.cx.tcx();
437
438         let result = match &place_ref {
439             mir::PlaceRef {
440                 base: mir::PlaceBase::Local(index),
441                 projection: None,
442             } => {
443                 match self.locals[*index] {
444                     LocalRef::Place(place) => {
445                         return place;
446                     }
447                     LocalRef::UnsizedPlace(place) => {
448                         return bx.load_operand(place).deref(cx);
449                     }
450                     LocalRef::Operand(..) => {
451                         bug!("using operand local {:?} as place", place_ref);
452                     }
453                 }
454             }
455             mir::PlaceRef {
456                 base: mir::PlaceBase::Static(box mir::Static {
457                     ty,
458                     kind: mir::StaticKind::Promoted(promoted, substs),
459                     def_id,
460                 }),
461                 projection: None,
462             } => {
463                 debug!("promoted={:?}, def_id={:?}, substs={:?}, self_substs={:?}", promoted, def_id, substs, self.instance.substs);
464                 let param_env = ty::ParamEnv::reveal_all();
465                 let instance = Instance::new(*def_id, substs.subst(bx.tcx(), self.instance.substs));
466                 debug!("instance: {:?}", instance);
467                 let cid = mir::interpret::GlobalId {
468                     instance: instance,
469                     promoted: Some(*promoted),
470                 };
471                 let mono_ty = tcx.subst_and_normalize_erasing_regions(
472                     instance.substs,
473                     param_env,
474                     ty,
475                 );
476                 let layout = cx.layout_of(mono_ty);
477                 match bx.tcx().const_eval(param_env.and(cid)) {
478                     Ok(val) => match val.val {
479                         mir::interpret::ConstValue::ByRef { alloc, offset } => {
480                             bx.cx().from_const_alloc(layout, alloc, offset)
481                         }
482                         _ => bug!("promoteds should have an allocation: {:?}", val),
483                     },
484                     Err(_) => {
485                         // This is unreachable as long as runtime
486                         // and compile-time agree on values
487                         // With floats that won't always be true,
488                         // so we generate an abort.
489                         bx.abort();
490                         let llval = bx.cx().const_undef(
491                             bx.cx().type_ptr_to(bx.cx().backend_type(layout))
492                         );
493                         PlaceRef::new_sized(llval, layout, layout.align.abi)
494                     }
495                 }
496             }
497             mir::PlaceRef {
498                 base: mir::PlaceBase::Static(box mir::Static {
499                     ty,
500                     kind: mir::StaticKind::Static,
501                     def_id,
502                 }),
503                 projection: None,
504             } => {
505                 // NB: The layout of a static may be unsized as is the case when working
506                 // with a static that is an extern_type.
507                 let layout = cx.layout_of(self.monomorphize(&ty));
508                 let static_ = bx.get_static(*def_id);
509                 PlaceRef::new_thin_place(bx, static_, layout, layout.align.abi)
510             },
511             mir::PlaceRef {
512                 base,
513                 projection: Some(box mir::Projection {
514                     base: proj_base,
515                     elem: mir::ProjectionElem::Deref,
516                 }),
517             } => {
518                 // Load the pointer from its location.
519                 self.codegen_consume(bx, &mir::PlaceRef {
520                     base,
521                     projection: proj_base,
522                 }).deref(bx.cx())
523             }
524             mir::PlaceRef {
525                 base,
526                 projection: Some(projection),
527             } => {
528                 // FIXME turn this recursion into iteration
529                 let cg_base = self.codegen_place(bx, &mir::PlaceRef {
530                     base,
531                     projection: &projection.base,
532                 });
533
534                 match projection.elem {
535                     mir::ProjectionElem::Deref => bug!(),
536                     mir::ProjectionElem::Field(ref field, _) => {
537                         cg_base.project_field(bx, field.index())
538                     }
539                     mir::ProjectionElem::Index(index) => {
540                         let index = &mir::Operand::Copy(
541                             mir::Place::from(index)
542                         );
543                         let index = self.codegen_operand(bx, index);
544                         let llindex = index.immediate();
545                         cg_base.project_index(bx, llindex)
546                     }
547                     mir::ProjectionElem::ConstantIndex { offset,
548                                                          from_end: false,
549                                                          min_length: _ } => {
550                         let lloffset = bx.cx().const_usize(offset as u64);
551                         cg_base.project_index(bx, lloffset)
552                     }
553                     mir::ProjectionElem::ConstantIndex { offset,
554                                                          from_end: true,
555                                                          min_length: _ } => {
556                         let lloffset = bx.cx().const_usize(offset as u64);
557                         let lllen = cg_base.len(bx.cx());
558                         let llindex = bx.sub(lllen, lloffset);
559                         cg_base.project_index(bx, llindex)
560                     }
561                     mir::ProjectionElem::Subslice { from, to } => {
562                         let mut subslice = cg_base.project_index(bx,
563                             bx.cx().const_usize(from as u64));
564                         let projected_ty = PlaceTy::from_ty(cg_base.layout.ty)
565                             .projection_ty(tcx, &projection.elem).ty;
566                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
567
568                         if subslice.layout.is_unsized() {
569                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
570                                 bx.cx().const_usize((from as u64) + (to as u64))));
571                         }
572
573                         // Cast the place pointer type to the new
574                         // array or slice type (`*[%_; new_len]`).
575                         subslice.llval = bx.pointercast(subslice.llval,
576                             bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
577
578                         subslice
579                     }
580                     mir::ProjectionElem::Downcast(_, v) => {
581                         cg_base.project_downcast(bx, v)
582                     }
583                 }
584             }
585         };
586         debug!("codegen_place(place={:?}) => {:?}", place_ref, result);
587         result
588     }
589
590     pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
591         let tcx = self.cx.tcx();
592         let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, self.mir, tcx);
593         self.monomorphize(&place_ty.ty)
594     }
595 }