]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_ssa/mir/place.rs
Rollup merge of #55485 - petertodd:2018-10-manuallydrop-deref, r=TimNN
[rust.git] / src / librustc_codegen_ssa / mir / place.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use rustc::ty::{self, Ty};
12 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
13 use rustc::mir;
14 use rustc::mir::tcx::PlaceTy;
15 use MemFlags;
16 use common::IntPredicate;
17 use glue;
18
19 use traits::*;
20
21 use super::{FunctionCx, LocalRef};
22 use super::operand::OperandValue;
23
24 #[derive(Copy, Clone, Debug)]
25 pub struct PlaceRef<'tcx, V> {
26     /// Pointer to the contents of the place
27     pub llval: V,
28
29     /// This place's extra data if it is unsized, or null
30     pub llextra: Option<V>,
31
32     /// Monomorphized type of this place, including variant information
33     pub layout: TyLayout<'tcx>,
34
35     /// What alignment we know for this place
36     pub align: Align,
37 }
38
39 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
40     pub fn new_sized(
41         llval: V,
42         layout: TyLayout<'tcx>,
43         align: Align,
44     ) -> PlaceRef<'tcx, V> {
45         assert!(!layout.is_unsized());
46         PlaceRef {
47             llval,
48             llextra: None,
49             layout,
50             align
51         }
52     }
53
54     pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
55         bx: &mut Bx,
56         layout: TyLayout<'tcx>,
57         name: &str
58     ) -> Self {
59         debug!("alloca({:?}: {:?})", name, layout);
60         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
61         let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align);
62         Self::new_sized(tmp, layout, layout.align)
63     }
64
65     /// Returns a place for an indirect reference to an unsized place.
66     pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
67         bx: &mut Bx,
68         layout: TyLayout<'tcx>,
69         name: &str,
70     ) -> Self {
71         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
72         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
73         let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
74         let ptr_layout = bx.cx().layout_of(ptr_ty);
75         Self::alloca(bx, ptr_layout, name)
76     }
77
78     pub fn len<Cx: CodegenMethods<'tcx, Value = V>>(
79         &self,
80         cx: &Cx
81     ) -> V {
82         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
83             if self.layout.is_unsized() {
84                 assert_eq!(count, 0);
85                 self.llextra.unwrap()
86             } else {
87                 cx.const_usize(count)
88             }
89         } else {
90             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
91         }
92     }
93
94 }
95
96 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
97     /// Access a field, at a point when the value's case is known.
98     pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
99         self, bx: &mut Bx,
100         ix: usize,
101     ) -> Self {
102         let field = self.layout.field(bx.cx(), ix);
103         let offset = self.layout.fields.offset(ix);
104         let effective_field_align = self.align.restrict_for_offset(offset);
105
106         let mut simple = || {
107             // Unions and newtypes only use an offset of 0.
108             let llval = if offset.bytes() == 0 {
109                 self.llval
110             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
111                 // Offsets have to match either first or second field.
112                 assert_eq!(offset, a.value.size(bx.cx()).abi_align(b.value.align(bx.cx())));
113                 bx.struct_gep(self.llval, 1)
114             } else {
115                 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
116             };
117             PlaceRef {
118                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
119                 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
120                 llextra: if bx.cx().type_has_metadata(field.ty) {
121                     self.llextra
122                 } else {
123                     None
124                 },
125                 layout: field,
126                 align: effective_field_align,
127             }
128         };
129
130         // Simple cases, which don't need DST adjustment:
131         //   * no metadata available - just log the case
132         //   * known alignment - sized types, [T], str or a foreign type
133         //   * packed struct - there is no alignment padding
134         match field.ty.sty {
135             _ if self.llextra.is_none() => {
136                 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
137                     ix, self.llval);
138                 return simple();
139             }
140             _ if !field.is_unsized() => return simple(),
141             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
142             ty::Adt(def, _) => {
143                 if def.repr.packed() {
144                     // FIXME(eddyb) generalize the adjustment when we
145                     // start supporting packing to larger alignments.
146                     assert_eq!(self.layout.align.abi(), 1);
147                     return simple();
148                 }
149             }
150             _ => {}
151         }
152
153         // We need to get the pointer manually now.
154         // We do this by casting to a *i8, then offsetting it by the appropriate amount.
155         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
156         // because the field may have an arbitrary alignment in the LLVM representation
157         // anyway.
158         //
159         // To demonstrate:
160         //   struct Foo<T: ?Sized> {
161         //      x: u16,
162         //      y: T
163         //   }
164         //
165         // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
166         // the `y` field has 16-bit alignment.
167
168         let meta = self.llextra;
169
170         let unaligned_offset = bx.cx().const_usize(offset.bytes());
171
172         // Get the alignment of the field
173         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
174
175         // Bump the unaligned offset up to the appropriate alignment using the
176         // following expression:
177         //
178         //   (unaligned offset + (align - 1)) & -align
179
180         // Calculate offset
181         let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
182         let and_lhs = bx.add(unaligned_offset, align_sub_1);
183         let and_rhs = bx.neg(unsized_align);
184         let offset = bx.and(and_lhs, and_rhs);
185
186         debug!("struct_field_ptr: DST field offset: {:?}", offset);
187
188         // Cast and adjust pointer
189         let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
190         let byte_ptr = bx.gep(byte_ptr, &[offset]);
191
192         // Finally, cast back to the type expected
193         let ll_fty = bx.cx().backend_type(field);
194         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
195
196         PlaceRef {
197             llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
198             llextra: self.llextra,
199             layout: field,
200             align: effective_field_align,
201         }
202     }
203
204     /// Obtain the actual discriminant of a value.
205     pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
206         self,
207         bx: &mut Bx,
208         cast_to: Ty<'tcx>
209     ) -> V {
210         let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
211         if self.layout.abi.is_uninhabited() {
212             return bx.cx().const_undef(cast_to);
213         }
214         match self.layout.variants {
215             layout::Variants::Single { index } => {
216                 let discr_val = self.layout.ty.ty_adt_def().map_or(
217                     index.as_u32() as u128,
218                     |def| def.discriminant_for_variant(bx.cx().tcx(), index).val);
219                 return bx.cx().const_uint_big(cast_to, discr_val);
220             }
221             layout::Variants::Tagged { .. } |
222             layout::Variants::NicheFilling { .. } => {},
223         }
224
225         let discr = self.project_field(bx, 0);
226         let lldiscr = bx.load_operand(discr).immediate();
227         match self.layout.variants {
228             layout::Variants::Single { .. } => bug!(),
229             layout::Variants::Tagged { ref tag, .. } => {
230                 let signed = match tag.value {
231                     // We use `i1` for bytes that are always `0` or `1`,
232                     // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
233                     // let LLVM interpret the `i1` as signed, because
234                     // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
235                     layout::Int(_, signed) => !tag.is_bool() && signed,
236                     _ => false
237                 };
238                 bx.intcast(lldiscr, cast_to, signed)
239             }
240             layout::Variants::NicheFilling {
241                 dataful_variant,
242                 ref niche_variants,
243                 niche_start,
244                 ..
245             } => {
246                 let niche_llty = bx.cx().immediate_backend_type(discr.layout);
247                 if niche_variants.start() == niche_variants.end() {
248                     // FIXME(eddyb) Check the actual primitive type here.
249                     let niche_llval = if niche_start == 0 {
250                         // HACK(eddyb) Using `c_null` as it works on all types.
251                         bx.cx().const_null(niche_llty)
252                     } else {
253                         bx.cx().const_uint_big(niche_llty, niche_start)
254                     };
255                     let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval);
256                     bx.select(select_arg,
257                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
258                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
259                 } else {
260                     // Rebase from niche values to discriminant values.
261                     let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
262                     let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
263                     let lldiscr_max =
264                         bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
265                     let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max);
266                     let cast = bx.intcast(lldiscr, cast_to, false);
267                     bx.select(select_arg,
268                         cast,
269                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
270                 }
271             }
272         }
273     }
274
275     /// Set the discriminant for a new value of the given case of the given
276     /// representation.
277     pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
278         &self,
279         bx: &mut Bx,
280         variant_index: VariantIdx
281     ) {
282         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
283             return;
284         }
285         match self.layout.variants {
286             layout::Variants::Single { index } => {
287                 assert_eq!(index, variant_index);
288             }
289             layout::Variants::Tagged { .. } => {
290                 let ptr = self.project_field(bx, 0);
291                 let to = self.layout.ty.ty_adt_def().unwrap()
292                     .discriminant_for_variant(bx.tcx(), variant_index)
293                     .val;
294                 bx.store(
295                     bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
296                     ptr.llval,
297                     ptr.align);
298             }
299             layout::Variants::NicheFilling {
300                 dataful_variant,
301                 ref niche_variants,
302                 niche_start,
303                 ..
304             } => {
305                 if variant_index != dataful_variant {
306                     if bx.cx().sess().target.target.arch == "arm" ||
307                        bx.cx().sess().target.target.arch == "aarch64" {
308                         // Issue #34427: As workaround for LLVM bug on ARM,
309                         // use memset of 0 before assigning niche value.
310                         let fill_byte = bx.cx().const_u8(0);
311                         let (size, align) = self.layout.size_and_align();
312                         let size = bx.cx().const_usize(size.bytes());
313                         bx.memset(self.llval, fill_byte, size, align, MemFlags::empty());
314                     }
315
316                     let niche = self.project_field(bx, 0);
317                     let niche_llty = bx.cx().immediate_backend_type(niche.layout);
318                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
319                     let niche_value = (niche_value as u128)
320                         .wrapping_add(niche_start);
321                     // FIXME(eddyb) Check the actual primitive type here.
322                     let niche_llval = if niche_value == 0 {
323                         // HACK(eddyb) Using `c_null` as it works on all types.
324                         bx.cx().const_null(niche_llty)
325                     } else {
326                         bx.cx().const_uint_big(niche_llty, niche_value)
327                     };
328                     OperandValue::Immediate(niche_llval).store(bx, niche);
329                 }
330             }
331         }
332     }
333
334     pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
335         &self,
336         bx: &mut Bx,
337         llindex: V
338     ) -> Self {
339         PlaceRef {
340             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
341             llextra: None,
342             layout: self.layout.field(bx.cx(), 0),
343             align: self.align
344         }
345     }
346
347     pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
348         &self,
349         bx: &mut Bx,
350         variant_index: VariantIdx
351     ) -> Self {
352         let mut downcast = *self;
353         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
354
355         // Cast to the appropriate variant struct type.
356         let variant_ty = bx.cx().backend_type(downcast.layout);
357         downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
358
359         downcast
360     }
361
362     pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
363         bx.lifetime_start(self.llval, self.layout.size);
364     }
365
366     pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
367         bx.lifetime_end(self.llval, self.layout.size);
368     }
369 }
370
371 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
372     pub fn codegen_place(
373         &mut self,
374         bx: &mut Bx,
375         place: &mir::Place<'tcx>
376     ) -> PlaceRef<'tcx, Bx::Value> {
377         debug!("codegen_place(place={:?})", place);
378
379         let cx = self.cx;
380         let tcx = self.cx.tcx();
381
382         if let mir::Place::Local(index) = *place {
383             match self.locals[index] {
384                 LocalRef::Place(place) => {
385                     return place;
386                 }
387                 LocalRef::UnsizedPlace(place) => {
388                     return bx.load_operand(place).deref(cx);
389                 }
390                 LocalRef::Operand(..) => {
391                     bug!("using operand local {:?} as place", place);
392                 }
393             }
394         }
395
396         let result = match *place {
397             mir::Place::Local(_) => bug!(), // handled above
398             mir::Place::Promoted(box (index, ty)) => {
399                 let param_env = ty::ParamEnv::reveal_all();
400                 let cid = mir::interpret::GlobalId {
401                     instance: self.instance,
402                     promoted: Some(index),
403                 };
404                 let layout = cx.layout_of(self.monomorphize(&ty));
405                 match bx.tcx().const_eval(param_env.and(cid)) {
406                     Ok(val) => match val.val {
407                         mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
408                             bx.cx().from_const_alloc(layout, alloc, offset)
409                         }
410                         _ => bug!("promoteds should have an allocation: {:?}", val),
411                     },
412                     Err(_) => {
413                         // this is unreachable as long as runtime
414                         // and compile-time agree on values
415                         // With floats that won't always be true
416                         // so we generate an abort
417                         let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
418                         bx.call(fnname, &[], None);
419                         let llval = bx.cx().const_undef(
420                             bx.cx().type_ptr_to(bx.cx().backend_type(layout))
421                         );
422                         PlaceRef::new_sized(llval, layout, layout.align)
423                     }
424                 }
425             }
426             mir::Place::Static(box mir::Static { def_id, ty }) => {
427                 let layout = cx.layout_of(self.monomorphize(&ty));
428                 PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align)
429             },
430             mir::Place::Projection(box mir::Projection {
431                 ref base,
432                 elem: mir::ProjectionElem::Deref
433             }) => {
434                 // Load the pointer from its location.
435                 self.codegen_consume(bx, base).deref(bx.cx())
436             }
437             mir::Place::Projection(ref projection) => {
438                 let cg_base = self.codegen_place(bx, &projection.base);
439
440                 match projection.elem {
441                     mir::ProjectionElem::Deref => bug!(),
442                     mir::ProjectionElem::Field(ref field, _) => {
443                         cg_base.project_field(bx, field.index())
444                     }
445                     mir::ProjectionElem::Index(index) => {
446                         let index = &mir::Operand::Copy(mir::Place::Local(index));
447                         let index = self.codegen_operand(bx, index);
448                         let llindex = index.immediate();
449                         cg_base.project_index(bx, llindex)
450                     }
451                     mir::ProjectionElem::ConstantIndex { offset,
452                                                          from_end: false,
453                                                          min_length: _ } => {
454                         let lloffset = bx.cx().const_usize(offset as u64);
455                         cg_base.project_index(bx, lloffset)
456                     }
457                     mir::ProjectionElem::ConstantIndex { offset,
458                                                          from_end: true,
459                                                          min_length: _ } => {
460                         let lloffset = bx.cx().const_usize(offset as u64);
461                         let lllen = cg_base.len(bx.cx());
462                         let llindex = bx.sub(lllen, lloffset);
463                         cg_base.project_index(bx, llindex)
464                     }
465                     mir::ProjectionElem::Subslice { from, to } => {
466                         let mut subslice = cg_base.project_index(bx,
467                             bx.cx().const_usize(from as u64));
468                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
469                             .projection_ty(tcx, &projection.elem).to_ty(tcx);
470                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
471
472                         if subslice.layout.is_unsized() {
473                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
474                                 bx.cx().const_usize((from as u64) + (to as u64))));
475                         }
476
477                         // Cast the place pointer type to the new
478                         // array or slice type (*[%_; new_len]).
479                         subslice.llval = bx.pointercast(subslice.llval,
480                             bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
481
482                         subslice
483                     }
484                     mir::ProjectionElem::Downcast(_, v) => {
485                         cg_base.project_downcast(bx, v)
486                     }
487                 }
488             }
489         };
490         debug!("codegen_place(place={:?}) => {:?}", place, result);
491         result
492     }
493
494     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
495         let tcx = self.cx.tcx();
496         let place_ty = place.ty(self.mir, tcx);
497         self.monomorphize(&place_ty.to_ty(tcx))
498     }
499 }