]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_llvm/mir/place.rs
Added StaticMethods trait
[rust.git] / src / librustc_codegen_llvm / mir / place.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use llvm::{self, LLVMConstInBoundsGEP};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx};
14 use rustc::mir;
15 use rustc::mir::tcx::PlaceTy;
16 use base;
17 use builder::Builder;
18 use common::{CodegenCx, IntPredicate};
19 use type_of::LayoutLlvmExt;
20 use value::Value;
21 use glue;
22 use mir::constant::const_alloc_to_llvm;
23
24 use interfaces::{
25     BuilderMethods, ConstMethods, BaseTypeMethods, DerivedTypeMethods, DerivedIntrinsicMethods,
26     StaticMethods,
27 };
28
29 use super::{FunctionCx, LocalRef};
30 use super::operand::{OperandRef, OperandValue};
31
32 #[derive(Copy, Clone, Debug)]
33 pub struct PlaceRef<'tcx, V> {
34     /// Pointer to the contents of the place
35     pub llval: V,
36
37     /// This place's extra data if it is unsized, or null
38     pub llextra: Option<V>,
39
40     /// Monomorphized type of this place, including variant information
41     pub layout: TyLayout<'tcx>,
42
43     /// What alignment we know for this place
44     pub align: Align,
45 }
46
47 impl PlaceRef<'tcx, &'ll Value> {
48     pub fn new_sized(
49         llval: &'ll Value,
50         layout: TyLayout<'tcx>,
51         align: Align,
52     ) -> PlaceRef<'tcx, &'ll Value> {
53         assert!(!layout.is_unsized());
54         PlaceRef {
55             llval,
56             llextra: None,
57             layout,
58             align
59         }
60     }
61
62     pub fn from_const_alloc(
63         bx: &Builder<'a, 'll, 'tcx>,
64         layout: TyLayout<'tcx>,
65         alloc: &mir::interpret::Allocation,
66         offset: Size,
67     ) -> PlaceRef<'tcx, &'ll Value> {
68         let init = const_alloc_to_llvm(bx.cx(), alloc);
69         let base_addr = bx.cx().static_addr_of(init, layout.align, None);
70
71         let llval = unsafe { LLVMConstInBoundsGEP(
72             bx.cx().static_bitcast(base_addr, bx.cx().type_i8p()),
73             &bx.cx().const_usize(offset.bytes()),
74             1,
75         )};
76         let llval = bx.cx().static_bitcast(llval, bx.cx().type_ptr_to(layout.llvm_type(bx.cx())));
77         PlaceRef::new_sized(llval, layout, alloc.align)
78     }
79
80     pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
81                   -> PlaceRef<'tcx, &'ll Value> {
82         debug!("alloca({:?}: {:?})", name, layout);
83         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
84         let tmp = bx.alloca(layout.llvm_type(bx.cx()), name, layout.align);
85         Self::new_sized(tmp, layout, layout.align)
86     }
87
88     /// Returns a place for an indirect reference to an unsized place.
89     pub fn alloca_unsized_indirect(
90         bx: &Builder<'a, 'll, 'tcx>,
91         layout: TyLayout<'tcx>,
92         name: &str,
93     ) -> PlaceRef<'tcx, &'ll Value> {
94         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
95         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
96         let ptr_ty = bx.cx().tcx.mk_mut_ptr(layout.ty);
97         let ptr_layout = bx.cx().layout_of(ptr_ty);
98         Self::alloca(bx, ptr_layout, name)
99     }
100
101     pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
102         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
103             if self.layout.is_unsized() {
104                 assert_eq!(count, 0);
105                 self.llextra.unwrap()
106             } else {
107                 cx.const_usize(count)
108             }
109         } else {
110             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
111         }
112     }
113
114     pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx, &'ll Value> {
115         debug!("PlaceRef::load: {:?}", self);
116
117         assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
118
119         if self.layout.is_zst() {
120             return OperandRef::new_zst(bx.cx(), self.layout);
121         }
122
123         let scalar_load_metadata = |load, scalar: &layout::Scalar| {
124             let vr = scalar.valid_range.clone();
125             match scalar.value {
126                 layout::Int(..) => {
127                     let range = scalar.valid_range_exclusive(bx.cx());
128                     if range.start != range.end {
129                         bx.range_metadata(load, range);
130                     }
131                 }
132                 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
133                     bx.nonnull_metadata(load);
134                 }
135                 _ => {}
136             }
137         };
138
139         let val = if let Some(llextra) = self.llextra {
140             OperandValue::Ref(self.llval, Some(llextra), self.align)
141         } else if self.layout.is_llvm_immediate() {
142             let mut const_llval = None;
143             unsafe {
144                 if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) {
145                     if llvm::LLVMIsGlobalConstant(global) == llvm::True {
146                         const_llval = llvm::LLVMGetInitializer(global);
147                     }
148                 }
149             }
150             let llval = const_llval.unwrap_or_else(|| {
151                 let load = bx.load(self.llval, self.align);
152                 if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
153                     scalar_load_metadata(load, scalar);
154                 }
155                 load
156             });
157             OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
158         } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
159             let load = |i, scalar: &layout::Scalar| {
160                 let llptr = bx.struct_gep(self.llval, i as u64);
161                 let load = bx.load(llptr, self.align);
162                 scalar_load_metadata(load, scalar);
163                 if scalar.is_bool() {
164                     bx.trunc(load, bx.cx().type_i1())
165                 } else {
166                     load
167                 }
168             };
169             OperandValue::Pair(load(0, a), load(1, b))
170         } else {
171             OperandValue::Ref(self.llval, None, self.align)
172         };
173
174         OperandRef { val, layout: self.layout }
175     }
176
177     /// Access a field, at a point when the value's case is known.
178     pub fn project_field(
179         self,
180         bx: &Builder<'a, 'll, 'tcx>,
181         ix: usize,
182     ) -> PlaceRef<'tcx, &'ll Value> {
183         let cx = bx.cx();
184         let field = self.layout.field(cx, ix);
185         let offset = self.layout.fields.offset(ix);
186         let effective_field_align = self.align.restrict_for_offset(offset);
187
188         let simple = || {
189             // Unions and newtypes only use an offset of 0.
190             let llval = if offset.bytes() == 0 {
191                 self.llval
192             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
193                 // Offsets have to match either first or second field.
194                 assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
195                 bx.struct_gep(self.llval, 1)
196             } else {
197                 bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
198             };
199             PlaceRef {
200                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
201                 llval: bx.pointercast(llval, cx.type_ptr_to(field.llvm_type(cx))),
202                 llextra: if cx.type_has_metadata(field.ty) {
203                     self.llextra
204                 } else {
205                     None
206                 },
207                 layout: field,
208                 align: effective_field_align,
209             }
210         };
211
212         // Simple cases, which don't need DST adjustment:
213         //   * no metadata available - just log the case
214         //   * known alignment - sized types, [T], str or a foreign type
215         //   * packed struct - there is no alignment padding
216         match field.ty.sty {
217             _ if self.llextra.is_none() => {
218                 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
219                     ix, self.llval);
220                 return simple();
221             }
222             _ if !field.is_unsized() => return simple(),
223             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
224             ty::Adt(def, _) => {
225                 if def.repr.packed() {
226                     // FIXME(eddyb) generalize the adjustment when we
227                     // start supporting packing to larger alignments.
228                     assert_eq!(self.layout.align.abi(), 1);
229                     return simple();
230                 }
231             }
232             _ => {}
233         }
234
235         // We need to get the pointer manually now.
236         // We do this by casting to a *i8, then offsetting it by the appropriate amount.
237         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
238         // because the field may have an arbitrary alignment in the LLVM representation
239         // anyway.
240         //
241         // To demonstrate:
242         //   struct Foo<T: ?Sized> {
243         //      x: u16,
244         //      y: T
245         //   }
246         //
247         // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
248         // the `y` field has 16-bit alignment.
249
250         let meta = self.llextra;
251
252         let unaligned_offset = cx.const_usize(offset.bytes());
253
254         // Get the alignment of the field
255         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
256
257         // Bump the unaligned offset up to the appropriate alignment using the
258         // following expression:
259         //
260         //   (unaligned offset + (align - 1)) & -align
261
262         // Calculate offset
263         let align_sub_1 = bx.sub(unsized_align, cx.const_usize(1u64));
264         let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
265         bx.neg(unsized_align));
266
267         debug!("struct_field_ptr: DST field offset: {:?}", offset);
268
269         // Cast and adjust pointer
270         let byte_ptr = bx.pointercast(self.llval, cx.type_i8p());
271         let byte_ptr = bx.gep(byte_ptr, &[offset]);
272
273         // Finally, cast back to the type expected
274         let ll_fty = field.llvm_type(cx);
275         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
276
277         PlaceRef {
278             llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
279             llextra: self.llextra,
280             layout: field,
281             align: effective_field_align,
282         }
283     }
284
285     /// Obtain the actual discriminant of a value.
286     pub fn codegen_get_discr(
287         self,
288         bx: &Builder<'a, 'll, 'tcx>,
289         cast_to: Ty<'tcx>
290     ) -> &'ll Value {
291         let cast_to = bx.cx().layout_of(cast_to).immediate_llvm_type(bx.cx());
292         if self.layout.abi.is_uninhabited() {
293             return bx.cx().const_undef(cast_to);
294         }
295         match self.layout.variants {
296             layout::Variants::Single { index } => {
297                 let discr_val = self.layout.ty.ty_adt_def().map_or(
298                     index.as_u32() as u128,
299                     |def| def.discriminant_for_variant(bx.cx().tcx, index).val);
300                 return bx.cx().const_uint_big(cast_to, discr_val);
301             }
302             layout::Variants::Tagged { .. } |
303             layout::Variants::NicheFilling { .. } => {},
304         }
305
306         let discr = self.project_field(bx, 0);
307         let lldiscr = discr.load(bx).immediate();
308         match self.layout.variants {
309             layout::Variants::Single { .. } => bug!(),
310             layout::Variants::Tagged { ref tag, .. } => {
311                 let signed = match tag.value {
312                     // We use `i1` for bytes that are always `0` or `1`,
313                     // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
314                     // let LLVM interpret the `i1` as signed, because
315                     // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
316                     layout::Int(_, signed) => !tag.is_bool() && signed,
317                     _ => false
318                 };
319                 bx.intcast(lldiscr, cast_to, signed)
320             }
321             layout::Variants::NicheFilling {
322                 dataful_variant,
323                 ref niche_variants,
324                 niche_start,
325                 ..
326             } => {
327                 let niche_llty = discr.layout.immediate_llvm_type(bx.cx());
328                 if niche_variants.start() == niche_variants.end() {
329                     // FIXME(eddyb) Check the actual primitive type here.
330                     let niche_llval = if niche_start == 0 {
331                         // HACK(eddyb) Using `c_null` as it works on all types.
332                         bx.cx().const_null(niche_llty)
333                     } else {
334                         bx.cx().const_uint_big(niche_llty, niche_start)
335                     };
336                     bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
337                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
338                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
339                 } else {
340                     // Rebase from niche values to discriminant values.
341                     let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
342                     let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
343                     let lldiscr_max =
344                         bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
345                     bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
346                         bx.intcast(lldiscr, cast_to, false),
347                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
348                 }
349             }
350         }
351     }
352
353     /// Set the discriminant for a new value of the given case of the given
354     /// representation.
355     pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
356         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
357             return;
358         }
359         match self.layout.variants {
360             layout::Variants::Single { index } => {
361                 assert_eq!(index, variant_index);
362             }
363             layout::Variants::Tagged { .. } => {
364                 let ptr = self.project_field(bx, 0);
365                 let to = self.layout.ty.ty_adt_def().unwrap()
366                     .discriminant_for_variant(bx.tcx(), variant_index)
367                     .val;
368                 bx.store(
369                     bx.cx().const_uint_big(ptr.layout.llvm_type(bx.cx()), to),
370                     ptr.llval,
371                     ptr.align);
372             }
373             layout::Variants::NicheFilling {
374                 dataful_variant,
375                 ref niche_variants,
376                 niche_start,
377                 ..
378             } => {
379                 if variant_index != dataful_variant {
380                     if bx.sess().target.target.arch == "arm" ||
381                        bx.sess().target.target.arch == "aarch64" {
382                         // Issue #34427: As workaround for LLVM bug on ARM,
383                         // use memset of 0 before assigning niche value.
384                         let llptr = bx.pointercast(
385                             self.llval,
386                             bx.cx().type_ptr_to(bx.cx().type_i8())
387                         );
388                         let fill_byte = bx.cx().const_u8(0);
389                         let (size, align) = self.layout.size_and_align();
390                         let size = bx.cx().const_usize(size.bytes());
391                         let align = bx.cx().const_u32(align.abi() as u32);
392                         base::call_memset(bx, llptr, fill_byte, size, align, false);
393                     }
394
395                     let niche = self.project_field(bx, 0);
396                     let niche_llty = niche.layout.immediate_llvm_type(bx.cx());
397                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
398                     let niche_value = (niche_value as u128)
399                         .wrapping_add(niche_start);
400                     // FIXME(eddyb) Check the actual primitive type here.
401                     let niche_llval = if niche_value == 0 {
402                         // HACK(eddyb) Using `c_null` as it works on all types.
403                         bx.cx().const_null(niche_llty)
404                     } else {
405                         bx.cx().const_uint_big(niche_llty, niche_value)
406                     };
407                     OperandValue::Immediate(niche_llval).store(bx, niche);
408                 }
409             }
410         }
411     }
412
413     pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
414                          -> PlaceRef<'tcx, &'ll Value> {
415         PlaceRef {
416             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
417             llextra: None,
418             layout: self.layout.field(bx.cx(), 0),
419             align: self.align
420         }
421     }
422
423     pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
424                             -> PlaceRef<'tcx, &'ll Value> {
425         let mut downcast = *self;
426         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
427
428         // Cast to the appropriate variant struct type.
429         let variant_ty = downcast.layout.llvm_type(bx.cx());
430         downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
431
432         downcast
433     }
434
435     pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) {
436         bx.lifetime_start(self.llval, self.layout.size);
437     }
438
439     pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) {
440         bx.lifetime_end(self.llval, self.layout.size);
441     }
442 }
443
444 impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
445     pub fn codegen_place(&mut self,
446                         bx: &Builder<'a, 'll, 'tcx>,
447                         place: &mir::Place<'tcx>)
448                         -> PlaceRef<'tcx, &'ll Value> {
449         debug!("codegen_place(place={:?})", place);
450
451         let cx = bx.cx();
452         let tcx = cx.tcx;
453
454         if let mir::Place::Local(index) = *place {
455             match self.locals[index] {
456                 LocalRef::Place(place) => {
457                     return place;
458                 }
459                 LocalRef::UnsizedPlace(place) => {
460                     return place.load(bx).deref(&cx);
461                 }
462                 LocalRef::Operand(..) => {
463                     bug!("using operand local {:?} as place", place);
464                 }
465             }
466         }
467
468         let result = match *place {
469             mir::Place::Local(_) => bug!(), // handled above
470             mir::Place::Promoted(box (index, ty)) => {
471                 let param_env = ty::ParamEnv::reveal_all();
472                 let cid = mir::interpret::GlobalId {
473                     instance: self.instance,
474                     promoted: Some(index),
475                 };
476                 let layout = cx.layout_of(self.monomorphize(&ty));
477                 match bx.tcx().const_eval(param_env.and(cid)) {
478                     Ok(val) => match val.val {
479                         mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
480                             PlaceRef::from_const_alloc(bx, layout, alloc, offset)
481                         }
482                         _ => bug!("promoteds should have an allocation: {:?}", val),
483                     },
484                     Err(_) => {
485                         // this is unreachable as long as runtime
486                         // and compile-time agree on values
487                         // With floats that won't always be true
488                         // so we generate an abort
489                         let fnname = bx.cx().get_intrinsic(&("llvm.trap"));
490                         bx.call(fnname, &[], None);
491                         let llval = bx.cx().const_undef(
492                             bx.cx().type_ptr_to(layout.llvm_type(bx.cx()))
493                         );
494                         PlaceRef::new_sized(llval, layout, layout.align)
495                     }
496                 }
497             }
498             mir::Place::Static(box mir::Static { def_id, ty }) => {
499                 let layout = cx.layout_of(self.monomorphize(&ty));
500                 PlaceRef::new_sized(cx.get_static(def_id), layout, layout.align)
501             },
502             mir::Place::Projection(box mir::Projection {
503                 ref base,
504                 elem: mir::ProjectionElem::Deref
505             }) => {
506                 // Load the pointer from its location.
507                 self.codegen_consume(bx, base).deref(bx.cx())
508             }
509             mir::Place::Projection(ref projection) => {
510                 let cg_base = self.codegen_place(bx, &projection.base);
511
512                 match projection.elem {
513                     mir::ProjectionElem::Deref => bug!(),
514                     mir::ProjectionElem::Field(ref field, _) => {
515                         cg_base.project_field(bx, field.index())
516                     }
517                     mir::ProjectionElem::Index(index) => {
518                         let index = &mir::Operand::Copy(mir::Place::Local(index));
519                         let index = self.codegen_operand(bx, index);
520                         let llindex = index.immediate();
521                         cg_base.project_index(bx, llindex)
522                     }
523                     mir::ProjectionElem::ConstantIndex { offset,
524                                                          from_end: false,
525                                                          min_length: _ } => {
526                         let lloffset = bx.cx().const_usize(offset as u64);
527                         cg_base.project_index(bx, lloffset)
528                     }
529                     mir::ProjectionElem::ConstantIndex { offset,
530                                                          from_end: true,
531                                                          min_length: _ } => {
532                         let lloffset = bx.cx().const_usize(offset as u64);
533                         let lllen = cg_base.len(bx.cx());
534                         let llindex = bx.sub(lllen, lloffset);
535                         cg_base.project_index(bx, llindex)
536                     }
537                     mir::ProjectionElem::Subslice { from, to } => {
538                         let mut subslice = cg_base.project_index(bx,
539                             bx.cx().const_usize(from as u64));
540                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
541                             .projection_ty(tcx, &projection.elem)
542                             .to_ty(bx.tcx());
543                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
544
545                         if subslice.layout.is_unsized() {
546                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
547                                 bx.cx().const_usize((from as u64) + (to as u64))));
548                         }
549
550                         // Cast the place pointer type to the new
551                         // array or slice type (*[%_; new_len]).
552                         subslice.llval = bx.pointercast(subslice.llval,
553                             bx.cx().type_ptr_to(subslice.layout.llvm_type(bx.cx())));
554
555                         subslice
556                     }
557                     mir::ProjectionElem::Downcast(_, v) => {
558                         cg_base.project_downcast(bx, v)
559                     }
560                 }
561             }
562         };
563         debug!("codegen_place(place={:?}) => {:?}", place, result);
564         result
565     }
566
567     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
568         let tcx = self.cx.tcx;
569         let place_ty = place.ty(self.mir, tcx);
570         self.monomorphize(&place_ty.to_ty(tcx))
571     }
572 }