]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/mir/place.rs
Prefixed const methods with "const" instead of "c"
[rust.git] / src / librustc_codegen_llvm / mir / place.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, LLVMConstInBoundsGEP};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx};
14 use rustc::mir;
15 use rustc::mir::tcx::PlaceTy;
16 use base;
17 use builder::Builder;
18 use common::{CodegenCx, IntPredicate};
19 use consts;
20 use type_of::LayoutLlvmExt;
21 use value::Value;
22 use glue;
23 use mir::constant::const_alloc_to_llvm;
24
25 use interfaces::{BuilderMethods, CommonMethods, TypeMethods};
26
27 use super::{FunctionCx, LocalRef};
28 use super::operand::{OperandRef, OperandValue};
29
30 #[derive(Copy, Clone, Debug)]
31 pub struct PlaceRef<'tcx, V> {
32     /// Pointer to the contents of the place
33     pub llval: V,
34
35     /// This place's extra data if it is unsized, or null
36     pub llextra: Option<V>,
37
38     /// Monomorphized type of this place, including variant information
39     pub layout: TyLayout<'tcx>,
40
41     /// What alignment we know for this place
42     pub align: Align,
43 }
44
45 impl PlaceRef<'tcx, &'ll Value> {
46     pub fn new_sized(
47         llval: &'ll Value,
48         layout: TyLayout<'tcx>,
49         align: Align,
50     ) -> PlaceRef<'tcx, &'ll Value> {
51         assert!(!layout.is_unsized());
52         PlaceRef {
53             llval,
54             llextra: None,
55             layout,
56             align
57         }
58     }
59
60     pub fn from_const_alloc(
61         bx: &Builder<'a, 'll, 'tcx>,
62         layout: TyLayout<'tcx>,
63         alloc: &mir::interpret::Allocation,
64         offset: Size,
65     ) -> PlaceRef<'tcx, &'ll Value> {
66         let init = const_alloc_to_llvm(bx.cx(), alloc);
67         let base_addr = consts::addr_of(bx.cx(), init, layout.align, None);
68
69         let llval = unsafe { LLVMConstInBoundsGEP(
70             consts::bitcast(base_addr, bx.cx().i8p()),
71             &bx.cx().const_usize(offset.bytes()),
72             1,
73         )};
74         let llval = consts::bitcast(llval, bx.cx().ptr_to(layout.llvm_type(bx.cx())));
75         PlaceRef::new_sized(llval, layout, alloc.align)
76     }
77
78     pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
79                   -> PlaceRef<'tcx, &'ll Value> {
80         debug!("alloca({:?}: {:?})", name, layout);
81         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
82         let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align);
83         Self::new_sized(tmp, layout, layout.align)
84     }
85
86     /// Returns a place for an indirect reference to an unsized place.
87     pub fn alloca_unsized_indirect(
88         bx: &Builder<'a, 'll, 'tcx>,
89         layout: TyLayout<'tcx>,
90         name: &str,
91     ) -> PlaceRef<'tcx, &'ll Value> {
92         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
93         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
94         let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty);
95         let ptr_layout = bx.cx().layout_of(ptr_ty);
96         Self::alloca(bx, ptr_layout, name)
97     }
98
99     pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
100         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
101             if self.layout.is_unsized() {
102                 assert_eq!(count, 0);
103                 self.llextra.unwrap()
104             } else {
105                 cx.const_usize(count)
106             }
107         } else {
108             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
109         }
110     }
111
112     pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx, &'ll Value> {
113         debug!("PlaceRef::load: {:?}", self);
114
115         assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
116
117         if self.layout.is_zst() {
118             return OperandRef::new_zst(bx.cx(), self.layout);
119         }
120
121         let scalar_load_metadata = |load, scalar: &layout::Scalar| {
122             let vr = scalar.valid_range.clone();
123             match scalar.value {
124                 layout::Int(..) => {
125                     let range = scalar.valid_range_exclusive(bx.cx());
126                     if range.start != range.end {
127                         bx.range_metadata(load, range);
128                     }
129                 }
130                 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
131                     bx.nonnull_metadata(load);
132                 }
133                 _ => {}
134             }
135         };
136
137         let val = if let Some(llextra) = self.llextra {
138             OperandValue::Ref(self.llval, Some(llextra), self.align)
139         } else if self.layout.is_llvm_immediate() {
140             let mut const_llval = None;
141             unsafe {
142                 if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) {
143                     if llvm::LLVMIsGlobalConstant(global) == llvm::True {
144                         const_llval = llvm::LLVMGetInitializer(global);
145                     }
146                 }
147             }
148             let llval = const_llval.unwrap_or_else(|| {
149                 let load = bx.load(self.llval, self.align);
150                 if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
151                     scalar_load_metadata(load, scalar);
152                 }
153                 load
154             });
155             OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
156         } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
157             let load = |i, scalar: &layout::Scalar| {
158                 let llptr = bx.struct_gep(self.llval, i as u64);
159                 let load = bx.load(llptr, self.align);
160                 scalar_load_metadata(load, scalar);
161                 if scalar.is_bool() {
162                     bx.trunc(load, bx.cx().i1())
163                 } else {
164                     load
165                 }
166             };
167             OperandValue::Pair(load(0, a), load(1, b))
168         } else {
169             OperandValue::Ref(self.llval, None, self.align)
170         };
171
172         OperandRef { val, layout: self.layout }
173     }
174
175     /// Access a field, at a point when the value's case is known.
176     pub fn project_field(
177         self,
178         bx: &Builder<'a, 'll, 'tcx>,
179         ix: usize,
180     ) -> PlaceRef<'tcx, &'ll Value> {
181         let cx = bx.cx();
182         let field = self.layout.field(cx, ix);
183         let offset = self.layout.fields.offset(ix);
184         let effective_field_align = self.align.restrict_for_offset(offset);
185
186         let simple = || {
187             // Unions and newtypes only use an offset of 0.
188             let llval = if offset.bytes() == 0 {
189                 self.llval
190             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
191                 // Offsets have to match either first or second field.
192                 assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
193                 bx.struct_gep(self.llval, 1)
194             } else {
195                 bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
196             };
197             PlaceRef {
198                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
199                 llval: bx.pointercast(llval, cx.ptr_to(field.llvm_type(cx))),
200                 llextra: if cx.type_has_metadata(field.ty) {
201                     self.llextra
202                 } else {
203                     None
204                 },
205                 layout: field,
206                 align: effective_field_align,
207             }
208         };
209
210         // Simple cases, which don't need DST adjustment:
211         //   * no metadata available - just log the case
212         //   * known alignment - sized types, [T], str or a foreign type
213         //   * packed struct - there is no alignment padding
214         match field.ty.sty {
215             _ if self.llextra.is_none() => {
216                 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
217                     ix, self.llval);
218                 return simple();
219             }
220             _ if !field.is_unsized() => return simple(),
221             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
222             ty::Adt(def, _) => {
223                 if def.repr.packed() {
224                     // FIXME(eddyb) generalize the adjustment when we
225                     // start supporting packing to larger alignments.
226                     assert_eq!(self.layout.align.abi(), 1);
227                     return simple();
228                 }
229             }
230             _ => {}
231         }
232
233         // We need to get the pointer manually now.
234         // We do this by casting to a *i8, then offsetting it by the appropriate amount.
235         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
236         // because the field may have an arbitrary alignment in the LLVM representation
237         // anyway.
238         //
239         // To demonstrate:
240         //   struct Foo<T: ?Sized> {
241         //      x: u16,
242         //      y: T
243         //   }
244         //
245         // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
246         // the `y` field has 16-bit alignment.
247
248         let meta = self.llextra;
249
250         let unaligned_offset = cx.const_usize(offset.bytes());
251
252         // Get the alignment of the field
253         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
254
255         // Bump the unaligned offset up to the appropriate alignment using the
256         // following expression:
257         //
258         //   (unaligned offset + (align - 1)) & -align
259
260         // Calculate offset
261         let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64));
262         let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
263         bx.neg(unsized_align));
264
265         debug!("struct_field_ptr: DST field offset: {:?}", offset);
266
267         // Cast and adjust pointer
268         let byte_ptr = bx.pointercast(self.llval, cx.i8p());
269         let byte_ptr = bx.gep(byte_ptr, &[offset]);
270
271         // Finally, cast back to the type expected
272         let ll_fty = field.llvm_type(cx);
273         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
274
275         PlaceRef {
276             llval: bx.pointercast(byte_ptr, bx.cx().ptr_to(ll_fty)),
277             llextra: self.llextra,
278             layout: field,
279             align: effective_field_align,
280         }
281     }
282
283     /// Obtain the actual discriminant of a value.
284     pub fn codegen_get_discr(
285         self,
286         bx: &Builder<'a, 'll, 'tcx>,
287         cast_to: Ty<'tcx>
288     ) -> &'ll Value {
289         let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
290         if self.layout.abi.is_uninhabited() {
291             return bx.cx().const_undef(cast_to);
292         }
293         match self.layout.variants {
294             layout::Variants::Single { index } => {
295                 let discr_val = self.layout.ty.ty_adt_def().map_or(
296                     index.as_u32() as u128,
297                     |def| def.discriminant_for_variant(bx.cx().tcx, index).val);
298                 return bx.cx().const_uint_big(cast_to, discr_val);
299             }
300             layout::Variants::Tagged { .. } |
301             layout::Variants::NicheFilling { .. } => {},
302         }
303
304         let discr = self.project_field(bx, 0);
305         let lldiscr = discr.load(bx).immediate();
306         match self.layout.variants {
307             layout::Variants::Single { .. } => bug!(),
308             layout::Variants::Tagged { ref tag, .. } => {
309                 let signed = match tag.value {
310                     // We use `i1` for bytes that are always `0` or `1`,
311                     // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
312                     // let LLVM interpret the `i1` as signed, because
313                     // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
314                     layout::Int(_, signed) => !tag.is_bool() && signed,
315                     _ => false
316                 };
317                 bx.intcast(lldiscr, cast_to, signed)
318             }
319             layout::Variants::NicheFilling {
320                 dataful_variant,
321                 ref niche_variants,
322                 niche_start,
323                 ..
324             } => {
325                 let niche_llty = discr.layout.immediate_llvm_type(bx.cx());
326                 if niche_variants.start() == niche_variants.end() {
327                     // FIXME(eddyb) Check the actual primitive type here.
328                     let niche_llval = if niche_start == 0 {
329                         // HACK(eddyb) Using `c_null` as it works on all types.
330                         bx.cx().const_null(niche_llty)
331                     } else {
332                         bx.cx().const_uint_big(niche_llty, niche_start)
333                     };
334                     bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
335                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
336                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
337                 } else {
338                     // Rebase from niche values to discriminant values.
339                     let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
340                     let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
341                     let lldiscr_max =
342                         bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
343                     bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
344                         bx.intcast(lldiscr, cast_to, false),
345                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
346                 }
347             }
348         }
349     }
350
351     /// Set the discriminant for a new value of the given case of the given
352     /// representation.
353     pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
354         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
355             return;
356         }
357         match self.layout.variants {
358             layout::Variants::Single { index } => {
359                 assert_eq!(index, variant_index);
360             }
361             layout::Variants::Tagged { .. } => {
362                 let ptr = self.project_field(bx, 0);
363                 let to = self.layout.ty.ty_adt_def().unwrap()
364                     .discriminant_for_variant(bx.tcx(), variant_index)
365                     .val;
366                 bx.store(
367                     bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to),
368                     ptr.llval,
369                     ptr.align);
370             }
371             layout::Variants::NicheFilling {
372                 dataful_variant,
373                 ref niche_variants,
374                 niche_start,
375                 ..
376             } => {
377                 if variant_index != dataful_variant {
378                     if bx.sess().target.target.arch == "arm" ||
379                        bx.sess().target.target.arch == "aarch64" {
380                         // Issue #34427: As workaround for LLVM bug on ARM,
381                         // use memset of 0 before assigning niche value.
382                         let llptr = bx.pointercast(self.llval, bx.cx().ptr_to(bx.cx().i8()));
383                         let fill_byte = bx.cx().const_u8(0);
384                         let (size, align) = self.layout.size_and_align();
385                         let size = bx.cx().const_usize(size.bytes());
386                         let align = bx.cx().const_u32(align.abi() as u32);
387                         base::call_memset(bx, llptr, fill_byte, size, align, false);
388                     }
389
390                     let niche = self.project_field(bx, 0);
391                     let niche_llty = niche.layout.immediate_llvm_type(bx.cx());
392                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
393                     let niche_value = (niche_value as u128)
394                         .wrapping_add(niche_start);
395                     // FIXME(eddyb) Check the actual primitive type here.
396                     let niche_llval = if niche_value == 0 {
397                         // HACK(eddyb) Using `c_null` as it works on all types.
398                         bx.cx().const_null(niche_llty)
399                     } else {
400                         bx.cx().const_uint_big(niche_llty, niche_value)
401                     };
402                     OperandValue::Immediate(niche_llval).store(bx, niche);
403                 }
404             }
405         }
406     }
407
408     pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
409                          -> PlaceRef<'tcx, &'ll Value> {
410         PlaceRef {
411             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
412             llextra: None,
413             layout: self.layout.field(bx.cx(), 0),
414             align: self.align
415         }
416     }
417
418     pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
419                             -> PlaceRef<'tcx, &'ll Value> {
420         let mut downcast = *self;
421         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
422
423         // Cast to the appropriate variant struct type.
424         let variant_ty = downcast.layout.llvm_type(bx.cx());
425         downcast.llval = bx.pointercast(downcast.llval, bx.cx().ptr_to(variant_ty));
426
427         downcast
428     }
429
430     pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) {
431         bx.lifetime_start(self.llval, self.layout.size);
432     }
433
434     pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) {
435         bx.lifetime_end(self.llval, self.layout.size);
436     }
437 }
438
439 impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
440     pub fn codegen_place(&mut self,
441                         bx: &Builder<'a, 'll, 'tcx>,
442                         place: &mir::Place<'tcx>)
443                         -> PlaceRef<'tcx, &'ll Value> {
444         debug!("codegen_place(place={:?})", place);
445
446         let cx = bx.cx();
447         let tcx = cx.tcx;
448
449         if let mir::Place::Local(index) = *place {
450             match self.locals[index] {
451                 LocalRef::Place(place) => {
452                     return place;
453                 }
454                 LocalRef::UnsizedPlace(place) => {
455                     return place.load(bx).deref(&cx);
456                 }
457                 LocalRef::Operand(..) => {
458                     bug!("using operand local {:?} as place", place);
459                 }
460             }
461         }
462
463         let result = match *place {
464             mir::Place::Local(_) => bug!(), // handled above
465             mir::Place::Promoted(box (index, ty)) => {
466                 let param_env = ty::ParamEnv::reveal_all();
467                 let cid = mir::interpret::GlobalId {
468                     instance: self.instance,
469                     promoted: Some(index),
470                 };
471                 let layout = cx.layout_of(self.monomorphize(&ty));
472                 match bx.tcx().const_eval(param_env.and(cid)) {
473                     Ok(val) => match val.val {
474                         mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
475                             PlaceRef::from_const_alloc(bx, layout, alloc, offset)
476                         }
477                         _ => bug!("promoteds should have an allocation: {:?}", val),
478                     },
479                     Err(_) => {
480                         // this is unreachable as long as runtime
481                         // and compile-time agree on values
482                         // With floats that won't always be true
483                         // so we generate an abort
484                         let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
485                         bx.call(fnname, &[], None);
486                         let llval = bx.cx().const_undef(bx.cx().ptr_to(layout.llvm_type(bx.cx())));
487                         PlaceRef::new_sized(llval, layout, layout.align)
488                     }
489                 }
490             }
491             mir::Place::Static(box mir::Static { def_id, ty }) => {
492                 let layout = cx.layout_of(self.monomorphize(&ty));
493                 PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align)
494             },
495             mir::Place::Projection(box mir::Projection {
496                 ref base,
497                 elem: mir::ProjectionElem::Deref
498             }) => {
499                 // Load the pointer from its location.
500                 self.codegen_consume(bx, base).deref(bx.cx())
501             }
502             mir::Place::Projection(ref projection) => {
503                 let cg_base = self.codegen_place(bx, &projection.base);
504
505                 match projection.elem {
506                     mir::ProjectionElem::Deref => bug!(),
507                     mir::ProjectionElem::Field(ref field, _) => {
508                         cg_base.project_field(bx, field.index())
509                     }
510                     mir::ProjectionElem::Index(index) => {
511                         let index = &mir::Operand::Copy(mir::Place::Local(index));
512                         let index = self.codegen_operand(bx, index);
513                         let llindex = index.immediate();
514                         cg_base.project_index(bx, llindex)
515                     }
516                     mir::ProjectionElem::ConstantIndex { offset,
517                                                          from_end: false,
518                                                          min_length: _ } => {
519                         let lloffset = bx.cx().const_usize(offset as u64);
520                         cg_base.project_index(bx, lloffset)
521                     }
522                     mir::ProjectionElem::ConstantIndex { offset,
523                                                          from_end: true,
524                                                          min_length: _ } => {
525                         let lloffset = bx.cx().const_usize(offset as u64);
526                         let lllen = cg_base.len(bx.cx());
527                         let llindex = bx.sub(lllen, lloffset);
528                         cg_base.project_index(bx, llindex)
529                     }
530                     mir::ProjectionElem::Subslice { from, to } => {
531                         let mut subslice = cg_base.project_index(bx,
532                             bx.cx().const_usize(from as u64));
533                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
534                             .projection_ty(tcx, &projection.elem)
535                             .to_ty(bx.tcx());
536                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
537
538                         if subslice.layout.is_unsized() {
539                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
540                                 bx.cx().const_usize((from as u64) + (to as u64))));
541                         }
542
543                         // Cast the place pointer type to the new
544                         // array or slice type (*[%_; new_len]).
545                         subslice.llval = bx.pointercast(subslice.llval,
546                             bx.cx().ptr_to(subslice.layout.llvm_type(bx.cx())));
547
548                         subslice
549                     }
550                     mir::ProjectionElem::Downcast(_, v) => {
551                         cg_base.project_downcast(bx, v)
552                     }
553                 }
554             }
555         };
556         debug!("codegen_place(place={:?}) => {:?}", place, result);
557         result
558     }
559
560     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
561         let tcx = self.cx.tcx;
562         let place_ty = place.ty(self.mir, tcx);
563         self.monomorphize(&place_ty.to_ty(tcx))
564     }
565 }