]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/mir/place.rs
Generalized base::coerce_unsized_into
[rust.git] / src / librustc_codegen_llvm / mir / place.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::LLVMConstInBoundsGEP;
12 use rustc::ty::{self, Ty};
13 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx, HasTyCtxt};
14 use rustc::mir;
15 use rustc::mir::tcx::PlaceTy;
16 use builder::{Builder, MemFlags};
17 use common::{CodegenCx, IntPredicate};
18 use type_of::LayoutLlvmExt;
19 use value::Value;
20 use glue;
21 use mir::constant::const_alloc_to_llvm;
22
23 use interfaces::*;
24
25 use super::{FunctionCx, LocalRef};
26 use super::operand::OperandValue;
27
28 #[derive(Copy, Clone, Debug)]
29 pub struct PlaceRef<'tcx, V> {
30     /// Pointer to the contents of the place
31     pub llval: V,
32
33     /// This place's extra data if it is unsized, or null
34     pub llextra: Option<V>,
35
36     /// Monomorphized type of this place, including variant information
37     pub layout: TyLayout<'tcx>,
38
39     /// What alignment we know for this place
40     pub align: Align,
41 }
42
43 impl PlaceRef<'tcx, &'ll Value> {
44     pub fn new_sized(
45         llval: &'ll Value,
46         layout: TyLayout<'tcx>,
47         align: Align,
48     ) -> PlaceRef<'tcx, &'ll Value> {
49         assert!(!layout.is_unsized());
50         PlaceRef {
51             llval,
52             llextra: None,
53             layout,
54             align
55         }
56     }
57
58     pub fn from_const_alloc(
59         bx: &Builder<'a, 'll, 'tcx>,
60         layout: TyLayout<'tcx>,
61         alloc: &mir::interpret::Allocation,
62         offset: Size,
63     ) -> PlaceRef<'tcx, &'ll Value> {
64         let init = const_alloc_to_llvm(bx.cx(), alloc);
65         let base_addr = bx.cx().static_addr_of(init, layout.align, None);
66
67         let llval = unsafe { LLVMConstInBoundsGEP(
68             bx.cx().static_bitcast(base_addr, bx.cx().type_i8p()),
69             &bx.cx().const_usize(offset.bytes()),
70             1,
71         )};
72         let llval = bx.cx().static_bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx())));
73         PlaceRef::new_sized(llval, layout, alloc.align)
74     }
75
76     pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
77                   -> PlaceRef<'tcx, &'ll Value> {
78         debug!("alloca({:?}: {:?})", name, layout);
79         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
80         let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align);
81         Self::new_sized(tmp, layout, layout.align)
82     }
83
84     /// Returns a place for an indirect reference to an unsized place.
85     pub fn alloca_unsized_indirect(
86         bx: &Builder<'a, 'll, 'tcx>,
87         layout: TyLayout<'tcx>,
88         name: &str,
89     ) -> PlaceRef<'tcx, &'ll Value> {
90         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
91         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
92         let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty);
93         let ptr_layout = bx.cx().layout_of(ptr_ty);
94         Self::alloca(bx, ptr_layout, name)
95     }
96
97     pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
98         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
99             if self.layout.is_unsized() {
100                 assert_eq!(count, 0);
101                 self.llextra.unwrap()
102             } else {
103                 cx.const_usize(count)
104             }
105         } else {
106             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
107         }
108     }
109
110 }
111
112 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
113     /// Access a field, at a point when the value's case is known.
114     pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
115         self, bx: &Bx,
116         ix: usize,
117     ) -> PlaceRef<'tcx, Bx::Value> {
118         let cx = bx.cx();
119         let field = self.layout.field(cx, ix);
120         let offset = self.layout.fields.offset(ix);
121         let effective_field_align = self.align.restrict_for_offset(offset);
122
123         let simple = || {
124             // Unions and newtypes only use an offset of 0.
125             let llval = if offset.bytes() == 0 {
126                 self.llval
127             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
128                 // Offsets have to match either first or second field.
129                 assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
130                 bx.struct_gep(self.llval, 1)
131             } else {
132                 bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
133             };
134             PlaceRef {
135                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
136                 llval: bx.pointercast(llval, cx.type_ptr_to(cx.backend_type(field))),
137                 llextra: if cx.type_has_metadata(field.ty) {
138                     self.llextra
139                 } else {
140                     None
141                 },
142                 layout: field,
143                 align: effective_field_align,
144             }
145         };
146
147         // Simple cases, which don't need DST adjustment:
148         //   * no metadata available - just log the case
149         //   * known alignment - sized types, [T], str or a foreign type
150         //   * packed struct - there is no alignment padding
151         match field.ty.sty {
152             _ if self.llextra.is_none() => {
153                 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
154                     ix, self.llval);
155                 return simple();
156             }
157             _ if !field.is_unsized() => return simple(),
158             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
159             ty::Adt(def, _) => {
160                 if def.repr.packed() {
161                     // FIXME(eddyb) generalize the adjustment when we
162                     // start supporting packing to larger alignments.
163                     assert_eq!(self.layout.align.abi(), 1);
164                     return simple();
165                 }
166             }
167             _ => {}
168         }
169
170         // We need to get the pointer manually now.
171         // We do this by casting to a *i8, then offsetting it by the appropriate amount.
172         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
173         // because the field may have an arbitrary alignment in the LLVM representation
174         // anyway.
175         //
176         // To demonstrate:
177         //   struct Foo<T: ?Sized> {
178         //      x: u16,
179         //      y: T
180         //   }
181         //
182         // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
183         // the `y` field has 16-bit alignment.
184
185         let meta = self.llextra;
186
187         let unaligned_offset = cx.const_usize(offset.bytes());
188
189         // Get the alignment of the field
190         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
191
192         // Bump the unaligned offset up to the appropriate alignment using the
193         // following expression:
194         //
195         //   (unaligned offset + (align - 1)) & -align
196
197         // Calculate offset
198         let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64));
199         let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
200         bx.neg(unsized_align));
201
202         debug!("struct_field_ptr: DST field offset: {:?}", offset);
203
204         // Cast and adjust pointer
205         let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
206         let byte_ptr = bx.gep(byte_ptr, &[offset]);
207
208         // Finally, cast back to the type expected
209         let ll_fty = cx.backend_type(field);
210         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
211
212         PlaceRef {
213             llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
214             llextra: self.llextra,
215             layout: field,
216             align: effective_field_align,
217         }
218     }
219 }
220
221 impl PlaceRef<'tcx, &'ll Value> {
222
223     /// Obtain the actual discriminant of a value.
224     pub fn codegen_get_discr(
225         self,
226         bx: &Builder<'a, 'll, 'tcx>,
227         cast_to: Ty<'tcx>
228     ) -> &'ll Value {
229         let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
230         if self.layout.abi.is_uninhabited() {
231             return bx.cx().const_undef(cast_to);
232         }
233         match self.layout.variants {
234             layout::Variants::Single { index } => {
235                 let discr_val = self.layout.ty.ty_adt_def().map_or(
236                     index.as_u32() as u128,
237                     |def| def.discriminant_for_variant(bx.cx().tcx, index).val);
238                 return bx.cx().const_uint_big(cast_to, discr_val);
239             }
240             layout::Variants::Tagged { .. } |
241             layout::Variants::NicheFilling { .. } => {},
242         }
243
244         let discr = self.project_field(bx, 0);
245         let lldiscr = bx.load_operand(discr).immediate();
246         match self.layout.variants {
247             layout::Variants::Single { .. } => bug!(),
248             layout::Variants::Tagged { ref tag, .. } => {
249                 let signed = match tag.value {
250                     // We use `i1` for bytes that are always `0` or `1`,
251                     // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
252                     // let LLVM interpret the `i1` as signed, because
253                     // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
254                     layout::Int(_, signed) => !tag.is_bool() && signed,
255                     _ => false
256                 };
257                 bx.intcast(lldiscr, cast_to, signed)
258             }
259             layout::Variants::NicheFilling {
260                 dataful_variant,
261                 ref niche_variants,
262                 niche_start,
263                 ..
264             } => {
265                 let niche_llty = discr.layout.immediate_llvm_type(bx.cx());
266                 if niche_variants.start() == niche_variants.end() {
267                     // FIXME(eddyb) Check the actual primitive type here.
268                     let niche_llval = if niche_start == 0 {
269                         // HACK(eddyb) Using `c_null` as it works on all types.
270                         bx.cx().const_null(niche_llty)
271                     } else {
272                         bx.cx().const_uint_big(niche_llty, niche_start)
273                     };
274                     bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
275                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
276                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
277                 } else {
278                     // Rebase from niche values to discriminant values.
279                     let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
280                     let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
281                     let lldiscr_max =
282                         bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
283                     bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
284                         bx.intcast(lldiscr, cast_to, false),
285                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
286                 }
287             }
288         }
289     }
290
291     /// Set the discriminant for a new value of the given case of the given
292     /// representation.
293     pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
294         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
295             return;
296         }
297         match self.layout.variants {
298             layout::Variants::Single { index } => {
299                 assert_eq!(index, variant_index);
300             }
301             layout::Variants::Tagged { .. } => {
302                 let ptr = self.project_field(bx, 0);
303                 let to = self.layout.ty.ty_adt_def().unwrap()
304                     .discriminant_for_variant(bx.tcx(), variant_index)
305                     .val;
306                 bx.store(
307                     bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to),
308                     ptr.llval,
309                     ptr.align);
310             }
311             layout::Variants::NicheFilling {
312                 dataful_variant,
313                 ref niche_variants,
314                 niche_start,
315                 ..
316             } => {
317                 if variant_index != dataful_variant {
318                     if bx.sess().target.target.arch == "arm" ||
319                        bx.sess().target.target.arch == "aarch64" {
320                         // Issue #34427: As workaround for LLVM bug on ARM,
321                         // use memset of 0 before assigning niche value.
322                         let fill_byte = bx.cx().const_u8(0);
323                         let (size, align) = self.layout.size_and_align();
324                         let size = bx.cx().const_usize(size.bytes());
325                         bx.memset(self.llval, fill_byte, size, align, MemFlags::empty());
326                     }
327
328                     let niche = self.project_field(bx, 0);
329                     let niche_llty = niche.layout.immediate_llvm_type(bx.cx());
330                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
331                     let niche_value = (niche_value as u128)
332                         .wrapping_add(niche_start);
333                     // FIXME(eddyb) Check the actual primitive type here.
334                     let niche_llval = if niche_value == 0 {
335                         // HACK(eddyb) Using `c_null` as it works on all types.
336                         bx.cx().const_null(niche_llty)
337                     } else {
338                         bx.cx().const_uint_big(niche_llty, niche_value)
339                     };
340                     OperandValue::Immediate(niche_llval).store(bx, niche);
341                 }
342             }
343         }
344     }
345
346     pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
347                          -> PlaceRef<'tcx, &'ll Value> {
348         PlaceRef {
349             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
350             llextra: None,
351             layout: self.layout.field(bx.cx(), 0),
352             align: self.align
353         }
354     }
355
356     pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
357                             -> PlaceRef<'tcx, &'ll Value> {
358         let mut downcast = *self;
359         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
360
361         // Cast to the appropriate variant struct type.
362         let variant_ty = downcast.layout.llvm_type(bx.cx());
363         downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
364
365         downcast
366     }
367
368     pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) {
369         bx.lifetime_start(self.llval, self.layout.size);
370     }
371
372     pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) {
373         bx.lifetime_end(self.llval, self.layout.size);
374     }
375 }
376
377 impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
378     pub fn codegen_place(&mut self,
379                         bx: &Builder<'a, 'll, 'tcx>,
380                         place: &mir::Place<'tcx>)
381                         -> PlaceRef<'tcx, &'ll Value> {
382         debug!("codegen_place(place={:?})", place);
383
384         let cx = bx.cx();
385         let tcx = cx.tcx;
386
387         if let mir::Place::Local(index) = *place {
388             match self.locals[index] {
389                 LocalRef::Place(place) => {
390                     return place;
391                 }
392                 LocalRef::UnsizedPlace(place) => {
393                     return bx.load_operand(place).deref(&cx);
394                 }
395                 LocalRef::Operand(..) => {
396                     bug!("using operand local {:?} as place", place);
397                 }
398             }
399         }
400
401         let result = match *place {
402             mir::Place::Local(_) => bug!(), // handled above
403             mir::Place::Promoted(box (index, ty)) => {
404                 let param_env = ty::ParamEnv::reveal_all();
405                 let cid = mir::interpret::GlobalId {
406                     instance: self.instance,
407                     promoted: Some(index),
408                 };
409                 let layout = cx.layout_of(self.monomorphize(&ty));
410                 match bx.tcx().const_eval(param_env.and(cid)) {
411                     Ok(val) => match val.val {
412                         mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
413                             PlaceRef::from_const_alloc(bx, layout, alloc, offset)
414                         }
415                         _ => bug!("promoteds should have an allocation: {:?}", val),
416                     },
417                     Err(_) => {
418                         // this is unreachable as long as runtime
419                         // and compile-time agree on values
420                         // With floats that won't always be true
421                         // so we generate an abort
422                         let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
423                         bx.call(fnname, &[], None);
424                         let llval = bx.cx().const_undef(
425                             bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))
426                         );
427                         PlaceRef::new_sized(llval, layout, layout.align)
428                     }
429                 }
430             }
431             mir::Place::Static(box mir::Static { def_id, ty }) => {
432                 let layout = cx.layout_of(self.monomorphize(&ty));
433                 PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align)
434             },
435             mir::Place::Projection(box mir::Projection {
436                 ref base,
437                 elem: mir::ProjectionElem::Deref
438             }) => {
439                 // Load the pointer from its location.
440                 self.codegen_consume(bx, base).deref(bx.cx())
441             }
442             mir::Place::Projection(ref projection) => {
443                 let cg_base = self.codegen_place(bx, &projection.base);
444
445                 match projection.elem {
446                     mir::ProjectionElem::Deref => bug!(),
447                     mir::ProjectionElem::Field(ref field, _) => {
448                         cg_base.project_field(bx, field.index())
449                     }
450                     mir::ProjectionElem::Index(index) => {
451                         let index = &mir::Operand::Copy(mir::Place::Local(index));
452                         let index = self.codegen_operand(bx, index);
453                         let llindex = index.immediate();
454                         cg_base.project_index(bx, llindex)
455                     }
456                     mir::ProjectionElem::ConstantIndex { offset,
457                                                          from_end: false,
458                                                          min_length: _ } => {
459                         let lloffset = bx.cx().const_usize(offset as u64);
460                         cg_base.project_index(bx, lloffset)
461                     }
462                     mir::ProjectionElem::ConstantIndex { offset,
463                                                          from_end: true,
464                                                          min_length: _ } => {
465                         let lloffset = bx.cx().const_usize(offset as u64);
466                         let lllen = cg_base.len(bx.cx());
467                         let llindex = bx.sub(lllen, lloffset);
468                         cg_base.project_index(bx, llindex)
469                     }
470                     mir::ProjectionElem::Subslice { from, to } => {
471                         let mut subslice = cg_base.project_index(bx,
472                             bx.cx().const_usize(from as u64));
473                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
474                             .projection_ty(tcx, &projection.elem)
475                             .to_ty(bx.tcx());
476                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
477
478                         if subslice.layout.is_unsized() {
479                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
480                                 bx.cx().const_usize((from as u64) + (to as u64))));
481                         }
482
483                         // Cast the place pointer type to the new
484                         // array or slice type (*[%_; new_len]).
485                         subslice.llval = bx.pointercast(subslice.llval,
486                             bx.cx().type_ptr_to(subslice.layout.llvm_type(bx.cx())));
487
488                         subslice
489                     }
490                     mir::ProjectionElem::Downcast(_, v) => {
491                         cg_base.project_downcast(bx, v)
492                     }
493                 }
494             }
495         };
496         debug!("codegen_place(place={:?}) => {:?}", place, result);
497         result
498     }
499
500     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
501         let tcx = self.cx.tcx;
502         let place_ty = place.ty(self.mir, tcx);
503         self.monomorphize(&place_ty.to_ty(tcx))
504     }
505 }