]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/mir/place.rs
Beginning of moving all backend-agnostic code to rustc_codegen_ssa
[rust.git] / src / librustc_codegen_llvm / mir / place.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use rustc::ty::{self, Ty};
12 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
13 use rustc::mir;
14 use rustc::mir::tcx::PlaceTy;
15 use builder::MemFlags;
16 use rustc_codegen_ssa::common::IntPredicate;
17 use type_of::LayoutLlvmExt;
18 use glue;
19
20 use interfaces::*;
21
22 use super::{FunctionCx, LocalRef};
23 use super::operand::OperandValue;
24
25 #[derive(Copy, Clone, Debug)]
26 pub struct PlaceRef<'tcx, V> {
27     /// Pointer to the contents of the place
28     pub llval: V,
29
30     /// This place's extra data if it is unsized, or null
31     pub llextra: Option<V>,
32
33     /// Monomorphized type of this place, including variant information
34     pub layout: TyLayout<'tcx>,
35
36     /// What alignment we know for this place
37     pub align: Align,
38 }
39
40 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
41     pub fn new_sized(
42         llval: V,
43         layout: TyLayout<'tcx>,
44         align: Align,
45     ) -> PlaceRef<'tcx, V> {
46         assert!(!layout.is_unsized());
47         PlaceRef {
48             llval,
49             llextra: None,
50             layout,
51             align
52         }
53     }
54
55     pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
56         bx: &Bx,
57         layout: TyLayout<'tcx>,
58         name: &str
59     ) -> Self {
60         debug!("alloca({:?}: {:?})", name, layout);
61         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
62         let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align);
63         Self::new_sized(tmp, layout, layout.align)
64     }
65
66     /// Returns a place for an indirect reference to an unsized place.
67     pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
68         bx: &Bx,
69         layout: TyLayout<'tcx>,
70         name: &str,
71     ) -> Self {
72         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
73         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
74         let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
75         let ptr_layout = bx.cx().layout_of(ptr_ty);
76         Self::alloca(bx, ptr_layout, name)
77     }
78
79     pub fn len<Cx: CodegenMethods<'tcx, Value = V>>(
80         &self,
81         cx: &Cx
82     ) -> V {
83         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
84             if self.layout.is_unsized() {
85                 assert_eq!(count, 0);
86                 self.llextra.unwrap()
87             } else {
88                 cx.const_usize(count)
89             }
90         } else {
91             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
92         }
93     }
94
95 }
96
97 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
98     /// Access a field, at a point when the value's case is known.
99     pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
100         self, bx: &Bx,
101         ix: usize,
102     ) -> Self {
103         let cx = bx.cx();
104         let field = self.layout.field(cx, ix);
105         let offset = self.layout.fields.offset(ix);
106         let effective_field_align = self.align.restrict_for_offset(offset);
107
108         let simple = || {
109             // Unions and newtypes only use an offset of 0.
110             let llval = if offset.bytes() == 0 {
111                 self.llval
112             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
113                 // Offsets have to match either first or second field.
114                 assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
115                 bx.struct_gep(self.llval, 1)
116             } else {
117                 bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
118             };
119             PlaceRef {
120                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
121                 llval: bx.pointercast(llval, cx.type_ptr_to(cx.backend_type(field))),
122                 llextra: if cx.type_has_metadata(field.ty) {
123                     self.llextra
124                 } else {
125                     None
126                 },
127                 layout: field,
128                 align: effective_field_align,
129             }
130         };
131
132         // Simple cases, which don't need DST adjustment:
133         //   * no metadata available - just log the case
134         //   * known alignment - sized types, [T], str or a foreign type
135         //   * packed struct - there is no alignment padding
136         match field.ty.sty {
137             _ if self.llextra.is_none() => {
138                 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
139                     ix, self.llval);
140                 return simple();
141             }
142             _ if !field.is_unsized() => return simple(),
143             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
144             ty::Adt(def, _) => {
145                 if def.repr.packed() {
146                     // FIXME(eddyb) generalize the adjustment when we
147                     // start supporting packing to larger alignments.
148                     assert_eq!(self.layout.align.abi(), 1);
149                     return simple();
150                 }
151             }
152             _ => {}
153         }
154
155         // We need to get the pointer manually now.
156         // We do this by casting to a *i8, then offsetting it by the appropriate amount.
157         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
158         // because the field may have an arbitrary alignment in the LLVM representation
159         // anyway.
160         //
161         // To demonstrate:
162         //   struct Foo<T: ?Sized> {
163         //      x: u16,
164         //      y: T
165         //   }
166         //
167         // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
168         // the `y` field has 16-bit alignment.
169
170         let meta = self.llextra;
171
172         let unaligned_offset = cx.const_usize(offset.bytes());
173
174         // Get the alignment of the field
175         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
176
177         // Bump the unaligned offset up to the appropriate alignment using the
178         // following expression:
179         //
180         //   (unaligned offset + (align - 1)) & -align
181
182         // Calculate offset
183         let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64));
184         let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
185         bx.neg(unsized_align));
186
187         debug!("struct_field_ptr: DST field offset: {:?}", offset);
188
189         // Cast and adjust pointer
190         let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
191         let byte_ptr = bx.gep(byte_ptr, &[offset]);
192
193         // Finally, cast back to the type expected
194         let ll_fty = cx.backend_type(field);
195         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
196
197         PlaceRef {
198             llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
199             llextra: self.llextra,
200             layout: field,
201             align: effective_field_align,
202         }
203     }
204
205     /// Obtain the actual discriminant of a value.
206     pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
207         self,
208         bx: &Bx,
209         cast_to: Ty<'tcx>
210     ) -> V {
211         let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
212         if self.layout.abi.is_uninhabited() {
213             return bx.cx().const_undef(cast_to);
214         }
215         match self.layout.variants {
216             layout::Variants::Single { index } => {
217                 let discr_val = self.layout.ty.ty_adt_def().map_or(
218                     index.as_u32() as u128,
219                     |def| def.discriminant_for_variant(bx.cx().tcx(), index).val);
220                 return bx.cx().const_uint_big(cast_to, discr_val);
221             }
222             layout::Variants::Tagged { .. } |
223             layout::Variants::NicheFilling { .. } => {},
224         }
225
226         let discr = self.project_field(bx, 0);
227         let lldiscr = bx.load_operand(discr).immediate();
228         match self.layout.variants {
229             layout::Variants::Single { .. } => bug!(),
230             layout::Variants::Tagged { ref tag, .. } => {
231                 let signed = match tag.value {
232                     // We use `i1` for bytes that are always `0` or `1`,
233                     // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
234                     // let LLVM interpret the `i1` as signed, because
235                     // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
236                     layout::Int(_, signed) => !tag.is_bool() && signed,
237                     _ => false
238                 };
239                 bx.intcast(lldiscr, cast_to, signed)
240             }
241             layout::Variants::NicheFilling {
242                 dataful_variant,
243                 ref niche_variants,
244                 niche_start,
245                 ..
246             } => {
247                 let niche_llty = bx.cx().immediate_backend_type(discr.layout);
248                 if niche_variants.start() == niche_variants.end() {
249                     // FIXME(eddyb) Check the actual primitive type here.
250                     let niche_llval = if niche_start == 0 {
251                         // HACK(eddyb) Using `c_null` as it works on all types.
252                         bx.cx().const_null(niche_llty)
253                     } else {
254                         bx.cx().const_uint_big(niche_llty, niche_start)
255                     };
256                     bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
257                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
258                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
259                 } else {
260                     // Rebase from niche values to discriminant values.
261                     let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
262                     let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
263                     let lldiscr_max =
264                         bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
265                     bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
266                         bx.intcast(lldiscr, cast_to, false),
267                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
268                 }
269             }
270         }
271     }
272
273     /// Set the discriminant for a new value of the given case of the given
274     /// representation.
275     pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
276         &self,
277         bx: &Bx,
278         variant_index: VariantIdx
279     ) {
280         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
281             return;
282         }
283         match self.layout.variants {
284             layout::Variants::Single { index } => {
285                 assert_eq!(index, variant_index);
286             }
287             layout::Variants::Tagged { .. } => {
288                 let ptr = self.project_field(bx, 0);
289                 let to = self.layout.ty.ty_adt_def().unwrap()
290                     .discriminant_for_variant(bx.tcx(), variant_index)
291                     .val;
292                 bx.store(
293                     bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
294                     ptr.llval,
295                     ptr.align);
296             }
297             layout::Variants::NicheFilling {
298                 dataful_variant,
299                 ref niche_variants,
300                 niche_start,
301                 ..
302             } => {
303                 if variant_index != dataful_variant {
304                     if bx.cx().sess().target.target.arch == "arm" ||
305                        bx.cx().sess().target.target.arch == "aarch64" {
306                         // Issue #34427: As workaround for LLVM bug on ARM,
307                         // use memset of 0 before assigning niche value.
308                         let fill_byte = bx.cx().const_u8(0);
309                         let (size, align) = self.layout.size_and_align();
310                         let size = bx.cx().const_usize(size.bytes());
311                         bx.memset(self.llval, fill_byte, size, align, MemFlags::empty());
312                     }
313
314                     let niche = self.project_field(bx, 0);
315                     let niche_llty = bx.cx().immediate_backend_type(niche.layout);
316                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
317                     let niche_value = (niche_value as u128)
318                         .wrapping_add(niche_start);
319                     // FIXME(eddyb) Check the actual primitive type here.
320                     let niche_llval = if niche_value == 0 {
321                         // HACK(eddyb) Using `c_null` as it works on all types.
322                         bx.cx().const_null(niche_llty)
323                     } else {
324                         bx.cx().const_uint_big(niche_llty, niche_value)
325                     };
326                     OperandValue::Immediate(niche_llval).store(bx, niche);
327                 }
328             }
329         }
330     }
331
332     pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
333         &self,
334         bx: &Bx,
335         llindex: V
336     ) -> Self {
337         PlaceRef {
338             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
339             llextra: None,
340             layout: self.layout.field(bx.cx(), 0),
341             align: self.align
342         }
343     }
344
345     pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
346         &self,
347         bx: &Bx,
348         variant_index: VariantIdx
349     ) -> Self {
350         let mut downcast = *self;
351         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
352
353         // Cast to the appropriate variant struct type.
354         let variant_ty = bx.cx().backend_type(downcast.layout);
355         downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
356
357         downcast
358     }
359
360     pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
361         bx.lifetime_start(self.llval, self.layout.size);
362     }
363
364     pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &Bx) {
365         bx.lifetime_end(self.llval, self.layout.size);
366     }
367 }
368
369 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
370     pub fn codegen_place(
371         &mut self,
372         bx: &Bx,
373         place: &mir::Place<'tcx>
374     ) -> PlaceRef<'tcx, Bx::Value> {
375         debug!("codegen_place(place={:?})", place);
376
377         let cx = bx.cx();
378         let tcx = cx.tcx();
379
380         if let mir::Place::Local(index) = *place {
381             match self.locals[index] {
382                 LocalRef::Place(place) => {
383                     return place;
384                 }
385                 LocalRef::UnsizedPlace(place) => {
386                     return bx.load_operand(place).deref(cx);
387                 }
388                 LocalRef::Operand(..) => {
389                     bug!("using operand local {:?} as place", place);
390                 }
391             }
392         }
393
394         let result = match *place {
395             mir::Place::Local(_) => bug!(), // handled above
396             mir::Place::Promoted(box (index, ty)) => {
397                 let param_env = ty::ParamEnv::reveal_all();
398                 let cid = mir::interpret::GlobalId {
399                     instance: self.instance,
400                     promoted: Some(index),
401                 };
402                 let layout = cx.layout_of(self.monomorphize(&ty));
403                 match bx.tcx().const_eval(param_env.and(cid)) {
404                     Ok(val) => match val.val {
405                         mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
406                             bx.cx().from_const_alloc(layout, alloc, offset)
407                         }
408                         _ => bug!("promoteds should have an allocation: {:?}", val),
409                     },
410                     Err(_) => {
411                         // this is unreachable as long as runtime
412                         // and compile-time agree on values
413                         // With floats that won't always be true
414                         // so we generate an abort
415                         let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
416                         bx.call(fnname, &[], None);
417                         let llval = bx.cx().const_undef(
418                             bx.cx().type_ptr_to(bx.cx().backend_type(layout))
419                         );
420                         PlaceRef::new_sized(llval, layout, layout.align)
421                     }
422                 }
423             }
424             mir::Place::Static(box mir::Static { def_id, ty }) => {
425                 let layout = cx.layout_of(self.monomorphize(&ty));
426                 PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align)
427             },
428             mir::Place::Projection(box mir::Projection {
429                 ref base,
430                 elem: mir::ProjectionElem::Deref
431             }) => {
432                 // Load the pointer from its location.
433                 self.codegen_consume(bx, base).deref(bx.cx())
434             }
435             mir::Place::Projection(ref projection) => {
436                 let cg_base = self.codegen_place(bx, &projection.base);
437
438                 match projection.elem {
439                     mir::ProjectionElem::Deref => bug!(),
440                     mir::ProjectionElem::Field(ref field, _) => {
441                         cg_base.project_field(bx, field.index())
442                     }
443                     mir::ProjectionElem::Index(index) => {
444                         let index = &mir::Operand::Copy(mir::Place::Local(index));
445                         let index = self.codegen_operand(bx, index);
446                         let llindex = index.immediate();
447                         cg_base.project_index(bx, llindex)
448                     }
449                     mir::ProjectionElem::ConstantIndex { offset,
450                                                          from_end: false,
451                                                          min_length: _ } => {
452                         let lloffset = bx.cx().const_usize(offset as u64);
453                         cg_base.project_index(bx, lloffset)
454                     }
455                     mir::ProjectionElem::ConstantIndex { offset,
456                                                          from_end: true,
457                                                          min_length: _ } => {
458                         let lloffset = bx.cx().const_usize(offset as u64);
459                         let lllen = cg_base.len(bx.cx());
460                         let llindex = bx.sub(lllen, lloffset);
461                         cg_base.project_index(bx, llindex)
462                     }
463                     mir::ProjectionElem::Subslice { from, to } => {
464                         let mut subslice = cg_base.project_index(bx,
465                             bx.cx().const_usize(from as u64));
466                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
467                             .projection_ty(tcx, &projection.elem).to_ty(tcx);
468                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
469
470                         if subslice.layout.is_unsized() {
471                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
472                                 bx.cx().const_usize((from as u64) + (to as u64))));
473                         }
474
475                         // Cast the place pointer type to the new
476                         // array or slice type (*[%_; new_len]).
477                         subslice.llval = bx.pointercast(subslice.llval,
478                             bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
479
480                         subslice
481                     }
482                     mir::ProjectionElem::Downcast(_, v) => {
483                         cg_base.project_downcast(bx, v)
484                     }
485                 }
486             }
487         };
488         debug!("codegen_place(place={:?}) => {:?}", place, result);
489         result
490     }
491
492     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
493         let tcx = self.cx.tcx();
494         let place_ty = place.ty(self.mir, tcx);
495         self.monomorphize(&place_ty.to_ty(tcx))
496     }
497 }