]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/mir/place.rs
Replaced Codegen field access by trait method
[rust.git] / src / librustc_codegen_llvm / mir / place.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, LLVMConstInBoundsGEP};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx};
14 use rustc::mir;
15 use rustc::mir::tcx::PlaceTy;
16 use base;
17 use builder::Builder;
18 use common::{CodegenCx, IntPredicate};
19 use consts;
20 use type_of::LayoutLlvmExt;
21 use type_::Type;
22 use value::Value;
23 use glue;
24 use mir::constant::const_alloc_to_llvm;
25
26 use interfaces::{BuilderMethods, CommonMethods};
27
28 use super::{FunctionCx, LocalRef};
29 use super::operand::{OperandRef, OperandValue};
30
31 #[derive(Copy, Clone, Debug)]
32 pub struct PlaceRef<'tcx, V> {
33     /// Pointer to the contents of the place
34     pub llval: V,
35
36     /// This place's extra data if it is unsized, or null
37     pub llextra: Option<V>,
38
39     /// Monomorphized type of this place, including variant information
40     pub layout: TyLayout<'tcx>,
41
42     /// What alignment we know for this place
43     pub align: Align,
44 }
45
46 impl PlaceRef<'tcx, &'ll Value> {
47     pub fn new_sized(
48         llval: &'ll Value,
49         layout: TyLayout<'tcx>,
50         align: Align,
51     ) -> PlaceRef<'tcx, &'ll Value> {
52         assert!(!layout.is_unsized());
53         PlaceRef {
54             llval,
55             llextra: None,
56             layout,
57             align
58         }
59     }
60
61     pub fn from_const_alloc(
62         bx: &Builder<'a, 'll, 'tcx>,
63         layout: TyLayout<'tcx>,
64         alloc: &mir::interpret::Allocation,
65         offset: Size,
66     ) -> PlaceRef<'tcx, &'ll Value> {
67         let init = const_alloc_to_llvm(bx.cx(), alloc);
68         let base_addr = consts::addr_of(bx.cx(), init, layout.align, None);
69
70         let llval = unsafe { LLVMConstInBoundsGEP(
71             consts::bitcast(base_addr, Type::i8p(bx.cx())),
72             &CodegenCx::c_usize(bx.cx(), offset.bytes()),
73             1,
74         )};
75         let llval = consts::bitcast(llval, layout.llvm_type(bx.cx()).ptr_to());
76         PlaceRef::new_sized(llval, layout, alloc.align)
77     }
78
79     pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
80                   -> PlaceRef<'tcx, &'ll Value> {
81         debug!("alloca({:?}: {:?})", name, layout);
82         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
83         let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align);
84         Self::new_sized(tmp, layout, layout.align)
85     }
86
87     /// Returns a place for an indirect reference to an unsized place.
88     pub fn alloca_unsized_indirect(
89         bx: &Builder<'a, 'll, 'tcx>,
90         layout: TyLayout<'tcx>,
91         name: &str,
92     ) -> PlaceRef<'tcx, &'ll Value> {
93         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
94         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
95         let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty);
96         let ptr_layout = bx.cx().layout_of(ptr_ty);
97         Self::alloca(bx, ptr_layout, name)
98     }
99
100     pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
101         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
102             if self.layout.is_unsized() {
103                 assert_eq!(count, 0);
104                 self.llextra.unwrap()
105             } else {
106                 CodegenCx::c_usize(cx, count)
107             }
108         } else {
109             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
110         }
111     }
112
113     pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx, &'ll Value> {
114         debug!("PlaceRef::load: {:?}", self);
115
116         assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
117
118         if self.layout.is_zst() {
119             return OperandRef::new_zst(bx.cx(), self.layout);
120         }
121
122         let scalar_load_metadata = |load, scalar: &layout::Scalar| {
123             let vr = scalar.valid_range.clone();
124             match scalar.value {
125                 layout::Int(..) => {
126                     let range = scalar.valid_range_exclusive(bx.cx());
127                     if range.start != range.end {
128                         bx.range_metadata(load, range);
129                     }
130                 }
131                 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
132                     bx.nonnull_metadata(load);
133                 }
134                 _ => {}
135             }
136         };
137
138         let val = if let Some(llextra) = self.llextra {
139             OperandValue::Ref(self.llval, Some(llextra), self.align)
140         } else if self.layout.is_llvm_immediate() {
141             let mut const_llval = None;
142             unsafe {
143                 if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) {
144                     if llvm::LLVMIsGlobalConstant(global) == llvm::True {
145                         const_llval = llvm::LLVMGetInitializer(global);
146                     }
147                 }
148             }
149             let llval = const_llval.unwrap_or_else(|| {
150                 let load = bx.load(self.llval, self.align);
151                 if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
152                     scalar_load_metadata(load, scalar);
153                 }
154                 load
155             });
156             OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
157         } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
158             let load = |i, scalar: &layout::Scalar| {
159                 let llptr = bx.struct_gep(self.llval, i as u64);
160                 let load = bx.load(llptr, self.align);
161                 scalar_load_metadata(load, scalar);
162                 if scalar.is_bool() {
163                     bx.trunc(load, Type::i1(bx.cx()))
164                 } else {
165                     load
166                 }
167             };
168             OperandValue::Pair(load(0, a), load(1, b))
169         } else {
170             OperandValue::Ref(self.llval, None, self.align)
171         };
172
173         OperandRef { val, layout: self.layout }
174     }
175
176     /// Access a field, at a point when the value's case is known.
177     pub fn project_field(
178         self,
179         bx: &Builder<'a, 'll, 'tcx>,
180         ix: usize,
181     ) -> PlaceRef<'tcx, &'ll Value> {
182         let cx = bx.cx();
183         let field = self.layout.field(cx, ix);
184         let offset = self.layout.fields.offset(ix);
185         let effective_field_align = self.align.restrict_for_offset(offset);
186
187         let simple = || {
188             // Unions and newtypes only use an offset of 0.
189             let llval = if offset.bytes() == 0 {
190                 self.llval
191             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
192                 // Offsets have to match either first or second field.
193                 assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
194                 bx.struct_gep(self.llval, 1)
195             } else {
196                 bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
197             };
198             PlaceRef {
199                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
200                 llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()),
201                 llextra: if cx.type_has_metadata(field.ty) {
202                     self.llextra
203                 } else {
204                     None
205                 },
206                 layout: field,
207                 align: effective_field_align,
208             }
209         };
210
211         // Simple cases, which don't need DST adjustment:
212         //   * no metadata available - just log the case
213         //   * known alignment - sized types, [T], str or a foreign type
214         //   * packed struct - there is no alignment padding
215         match field.ty.sty {
216             _ if self.llextra.is_none() => {
217                 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
218                     ix, self.llval);
219                 return simple();
220             }
221             _ if !field.is_unsized() => return simple(),
222             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
223             ty::Adt(def, _) => {
224                 if def.repr.packed() {
225                     // FIXME(eddyb) generalize the adjustment when we
226                     // start supporting packing to larger alignments.
227                     assert_eq!(self.layout.align.abi(), 1);
228                     return simple();
229                 }
230             }
231             _ => {}
232         }
233
234         // We need to get the pointer manually now.
235         // We do this by casting to a *i8, then offsetting it by the appropriate amount.
236         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
237         // because the field may have an arbitrary alignment in the LLVM representation
238         // anyway.
239         //
240         // To demonstrate:
241         //   struct Foo<T: ?Sized> {
242         //      x: u16,
243         //      y: T
244         //   }
245         //
246         // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
247         // the `y` field has 16-bit alignment.
248
249         let meta = self.llextra;
250
251         let unaligned_offset = CodegenCx::c_usize(cx, offset.bytes());
252
253         // Get the alignment of the field
254         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
255
256         // Bump the unaligned offset up to the appropriate alignment using the
257         // following expression:
258         //
259         //   (unaligned offset + (align - 1)) & -align
260
261         // Calculate offset
262         let align_sub_1 = bx.sub(unsized_align, CodegenCx::c_usize(cx, 1u64));
263         let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
264         bx.neg(unsized_align));
265
266         debug!("struct_field_ptr: DST field offset: {:?}", offset);
267
268         // Cast and adjust pointer
269         let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx));
270         let byte_ptr = bx.gep(byte_ptr, &[offset]);
271
272         // Finally, cast back to the type expected
273         let ll_fty = field.llvm_type(cx);
274         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
275
276         PlaceRef {
277             llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()),
278             llextra: self.llextra,
279             layout: field,
280             align: effective_field_align,
281         }
282     }
283
284     /// Obtain the actual discriminant of a value.
285     pub fn codegen_get_discr(
286         self,
287         bx: &Builder<'a, 'll, 'tcx>,
288         cast_to: Ty<'tcx>
289     ) -> &'ll Value {
290         let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
291         if self.layout.abi.is_uninhabited() {
292             return CodegenCx::c_undef(cast_to);
293         }
294         match self.layout.variants {
295             layout::Variants::Single { index } => {
296                 let discr_val = self.layout.ty.ty_adt_def().map_or(
297                     index.as_u32() as u128,
298                     |def| def.discriminant_for_variant(bx.cx().tcx, index).val);
299                 return CodegenCx::c_uint_big(cast_to, discr_val);
300             }
301             layout::Variants::Tagged { .. } |
302             layout::Variants::NicheFilling { .. } => {},
303         }
304
305         let discr = self.project_field(bx, 0);
306         let lldiscr = discr.load(bx).immediate();
307         match self.layout.variants {
308             layout::Variants::Single { .. } => bug!(),
309             layout::Variants::Tagged { ref tag, .. } => {
310                 let signed = match tag.value {
311                     // We use `i1` for bytes that are always `0` or `1`,
312                     // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
313                     // let LLVM interpret the `i1` as signed, because
314                     // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
315                     layout::Int(_, signed) => !tag.is_bool() && signed,
316                     _ => false
317                 };
318                 bx.intcast(lldiscr, cast_to, signed)
319             }
320             layout::Variants::NicheFilling {
321                 dataful_variant,
322                 ref niche_variants,
323                 niche_start,
324                 ..
325             } => {
326                 let niche_llty = discr.layout.immediate_llvm_type(bx.cx());
327                 if niche_variants.start() == niche_variants.end() {
328                     // FIXME(eddyb) Check the actual primitive type here.
329                     let niche_llval = if niche_start == 0 {
330                         // HACK(eddyb) Using `c_null` as it works on all types.
331                         CodegenCx::c_null(niche_llty)
332                     } else {
333                         CodegenCx::c_uint_big(niche_llty, niche_start)
334                     };
335                     bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
336                         CodegenCx::c_uint(cast_to, niche_variants.start().as_u32() as u64),
337                         CodegenCx::c_uint(cast_to, dataful_variant.as_u32() as u64))
338                 } else {
339                     // Rebase from niche values to discriminant values.
340                     let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
341                     let lldiscr = bx.sub(lldiscr, CodegenCx::c_uint_big(niche_llty, delta));
342                     let lldiscr_max =
343                         CodegenCx::c_uint(niche_llty, niche_variants.end().as_u32() as u64);
344                     bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
345                         bx.intcast(lldiscr, cast_to, false),
346                         CodegenCx::c_uint(cast_to, dataful_variant.as_u32() as u64))
347                 }
348             }
349         }
350     }
351
352     /// Set the discriminant for a new value of the given case of the given
353     /// representation.
354     pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
355         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
356             return;
357         }
358         match self.layout.variants {
359             layout::Variants::Single { index } => {
360                 assert_eq!(index, variant_index);
361             }
362             layout::Variants::Tagged { .. } => {
363                 let ptr = self.project_field(bx, 0);
364                 let to = self.layout.ty.ty_adt_def().unwrap()
365                     .discriminant_for_variant(bx.tcx(), variant_index)
366                     .val;
367                 bx.store(
368                     CodegenCx::c_uint_big(ptr.layout.llvm_type(bx.cx()), to),
369                     ptr.llval,
370                     ptr.align);
371             }
372             layout::Variants::NicheFilling {
373                 dataful_variant,
374                 ref niche_variants,
375                 niche_start,
376                 ..
377             } => {
378                 if variant_index != dataful_variant {
379                     if bx.sess().target.target.arch == "arm" ||
380                        bx.sess().target.target.arch == "aarch64" {
381                         // Issue #34427: As workaround for LLVM bug on ARM,
382                         // use memset of 0 before assigning niche value.
383                         let llptr = bx.pointercast(self.llval, Type::i8(bx.cx()).ptr_to());
384                         let fill_byte = CodegenCx::c_u8(bx.cx(), 0);
385                         let (size, align) = self.layout.size_and_align();
386                         let size = CodegenCx::c_usize(bx.cx(), size.bytes());
387                         let align = CodegenCx::c_u32(bx.cx(), align.abi() as u32);
388                         base::call_memset(bx, llptr, fill_byte, size, align, false);
389                     }
390
391                     let niche = self.project_field(bx, 0);
392                     let niche_llty = niche.layout.immediate_llvm_type(bx.cx());
393                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
394                     let niche_value = (niche_value as u128)
395                         .wrapping_add(niche_start);
396                     // FIXME(eddyb) Check the actual primitive type here.
397                     let niche_llval = if niche_value == 0 {
398                         // HACK(eddyb) Using `c_null` as it works on all types.
399                         CodegenCx::c_null(niche_llty)
400                     } else {
401                         CodegenCx::c_uint_big(niche_llty, niche_value)
402                     };
403                     OperandValue::Immediate(niche_llval).store(bx, niche);
404                 }
405             }
406         }
407     }
408
409     pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
410                          -> PlaceRef<'tcx, &'ll Value> {
411         PlaceRef {
412             llval: bx.inbounds_gep(self.llval, &[CodegenCx::c_usize(bx.cx(), 0), llindex]),
413             llextra: None,
414             layout: self.layout.field(bx.cx(), 0),
415             align: self.align
416         }
417     }
418
419     pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
420                             -> PlaceRef<'tcx, &'ll Value> {
421         let mut downcast = *self;
422         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
423
424         // Cast to the appropriate variant struct type.
425         let variant_ty = downcast.layout.llvm_type(bx.cx());
426         downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to());
427
428         downcast
429     }
430
431     pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) {
432         bx.lifetime_start(self.llval, self.layout.size);
433     }
434
435     pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) {
436         bx.lifetime_end(self.llval, self.layout.size);
437     }
438 }
439
440 impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
441     pub fn codegen_place(&mut self,
442                         bx: &Builder<'a, 'll, 'tcx>,
443                         place: &mir::Place<'tcx>)
444                         -> PlaceRef<'tcx, &'ll Value> {
445         debug!("codegen_place(place={:?})", place);
446
447         let cx = bx.cx();
448         let tcx = cx.tcx;
449
450         if let mir::Place::Local(index) = *place {
451             match self.locals[index] {
452                 LocalRef::Place(place) => {
453                     return place;
454                 }
455                 LocalRef::UnsizedPlace(place) => {
456                     return place.load(bx).deref(&cx);
457                 }
458                 LocalRef::Operand(..) => {
459                     bug!("using operand local {:?} as place", place);
460                 }
461             }
462         }
463
464         let result = match *place {
465             mir::Place::Local(_) => bug!(), // handled above
466             mir::Place::Promoted(box (index, ty)) => {
467                 let param_env = ty::ParamEnv::reveal_all();
468                 let cid = mir::interpret::GlobalId {
469                     instance: self.instance,
470                     promoted: Some(index),
471                 };
472                 let layout = cx.layout_of(self.monomorphize(&ty));
473                 match bx.tcx().const_eval(param_env.and(cid)) {
474                     Ok(val) => match val.val {
475                         mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
476                             PlaceRef::from_const_alloc(bx, layout, alloc, offset)
477                         }
478                         _ => bug!("promoteds should have an allocation: {:?}", val),
479                     },
480                     Err(_) => {
481                         // this is unreachable as long as runtime
482                         // and compile-time agree on values
483                         // With floats that won't always be true
484                         // so we generate an abort
485                         let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
486                         bx.call(fnname, &[], None);
487                         let llval = CodegenCx::c_undef(layout.llvm_type(bx.cx()).ptr_to());
488                         PlaceRef::new_sized(llval, layout, layout.align)
489                     }
490                 }
491             }
492             mir::Place::Static(box mir::Static { def_id, ty }) => {
493                 let layout = cx.layout_of(self.monomorphize(&ty));
494                 PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align)
495             },
496             mir::Place::Projection(box mir::Projection {
497                 ref base,
498                 elem: mir::ProjectionElem::Deref
499             }) => {
500                 // Load the pointer from its location.
501                 self.codegen_consume(bx, base).deref(bx.cx())
502             }
503             mir::Place::Projection(ref projection) => {
504                 let cg_base = self.codegen_place(bx, &projection.base);
505
506                 match projection.elem {
507                     mir::ProjectionElem::Deref => bug!(),
508                     mir::ProjectionElem::Field(ref field, _) => {
509                         cg_base.project_field(bx, field.index())
510                     }
511                     mir::ProjectionElem::Index(index) => {
512                         let index = &mir::Operand::Copy(mir::Place::Local(index));
513                         let index = self.codegen_operand(bx, index);
514                         let llindex = index.immediate();
515                         cg_base.project_index(bx, llindex)
516                     }
517                     mir::ProjectionElem::ConstantIndex { offset,
518                                                          from_end: false,
519                                                          min_length: _ } => {
520                         let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64);
521                         cg_base.project_index(bx, lloffset)
522                     }
523                     mir::ProjectionElem::ConstantIndex { offset,
524                                                          from_end: true,
525                                                          min_length: _ } => {
526                         let lloffset = CodegenCx::c_usize(bx.cx(), offset as u64);
527                         let lllen = cg_base.len(bx.cx());
528                         let llindex = bx.sub(lllen, lloffset);
529                         cg_base.project_index(bx, llindex)
530                     }
531                     mir::ProjectionElem::Subslice { from, to } => {
532                         let mut subslice = cg_base.project_index(bx,
533                             CodegenCx::c_usize(bx.cx(), from as u64));
534                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
535                             .projection_ty(tcx, &projection.elem)
536                             .to_ty(bx.tcx());
537                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
538
539                         if subslice.layout.is_unsized() {
540                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
541                                 CodegenCx::c_usize(bx.cx(), (from as u64) + (to as u64))));
542                         }
543
544                         // Cast the place pointer type to the new
545                         // array or slice type (*[%_; new_len]).
546                         subslice.llval = bx.pointercast(subslice.llval,
547                             subslice.layout.llvm_type(bx.cx()).ptr_to());
548
549                         subslice
550                     }
551                     mir::ProjectionElem::Downcast(_, v) => {
552                         cg_base.project_downcast(bx, v)
553                     }
554                 }
555             }
556         };
557         debug!("codegen_place(place={:?}) => {:?}", place, result);
558         result
559     }
560
561     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
562         let tcx = self.cx.tcx;
563         let place_ty = place.ty(self.mir, tcx);
564         self.monomorphize(&place_ty.to_ty(tcx))
565     }
566 }