]> git.lizzy.rs Git - rust.git/blob - src/librustc_codegen_ssa/mir/place.rs
Various minor/cosmetic improvements to code
[rust.git] / src / librustc_codegen_ssa / mir / place.rs
1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use rustc::ty::{self, Ty};
12 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
13 use rustc::mir;
14 use rustc::mir::tcx::PlaceTy;
15 use MemFlags;
16 use common::IntPredicate;
17 use glue;
18
19 use traits::*;
20
21 use super::{FunctionCx, LocalRef};
22 use super::operand::OperandValue;
23
24 #[derive(Copy, Clone, Debug)]
25 pub struct PlaceRef<'tcx, V> {
26     /// Pointer to the contents of the place
27     pub llval: V,
28
29     /// This place's extra data if it is unsized, or null
30     pub llextra: Option<V>,
31
32     /// Monomorphized type of this place, including variant information
33     pub layout: TyLayout<'tcx>,
34
35     /// What alignment we know for this place
36     pub align: Align,
37 }
38
39 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
40     pub fn new_sized(
41         llval: V,
42         layout: TyLayout<'tcx>,
43         align: Align,
44     ) -> PlaceRef<'tcx, V> {
45         assert!(!layout.is_unsized());
46         PlaceRef {
47             llval,
48             llextra: None,
49             layout,
50             align
51         }
52     }
53
54     pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
55         bx: &mut Bx,
56         layout: TyLayout<'tcx>,
57         name: &str
58     ) -> Self {
59         debug!("alloca({:?}: {:?})", name, layout);
60         assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
61         let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
62         Self::new_sized(tmp, layout, layout.align.abi)
63     }
64
65     /// Returns a place for an indirect reference to an unsized place.
66     pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
67         bx: &mut Bx,
68         layout: TyLayout<'tcx>,
69         name: &str,
70     ) -> Self {
71         debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
72         assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
73         let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
74         let ptr_layout = bx.cx().layout_of(ptr_ty);
75         Self::alloca(bx, ptr_layout, name)
76     }
77
78     pub fn len<Cx: CodegenMethods<'tcx, Value = V>>(
79         &self,
80         cx: &Cx
81     ) -> V {
82         if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
83             if self.layout.is_unsized() {
84                 assert_eq!(count, 0);
85                 self.llextra.unwrap()
86             } else {
87                 cx.const_usize(count)
88             }
89         } else {
90             bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
91         }
92     }
93
94 }
95
96 impl<'a, 'tcx: 'a, V: CodegenObject> PlaceRef<'tcx, V> {
97     /// Access a field, at a point when the value's case is known.
98     pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
99         self, bx: &mut Bx,
100         ix: usize,
101     ) -> Self {
102         let field = self.layout.field(bx.cx(), ix);
103         let offset = self.layout.fields.offset(ix);
104         let effective_field_align = self.align.restrict_for_offset(offset);
105
106         let mut simple = || {
107             // Unions and newtypes only use an offset of 0.
108             let llval = if offset.bytes() == 0 {
109                 self.llval
110             } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
111                 // Offsets have to match either first or second field.
112                 assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
113                 bx.struct_gep(self.llval, 1)
114             } else {
115                 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
116             };
117             PlaceRef {
118                 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
119                 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
120                 llextra: if bx.cx().type_has_metadata(field.ty) {
121                     self.llextra
122                 } else {
123                     None
124                 },
125                 layout: field,
126                 align: effective_field_align,
127             }
128         };
129
130         // Simple cases, which don't need DST adjustment:
131         //   * no metadata available - just log the case
132         //   * known alignment - sized types, [T], str or a foreign type
133         //   * packed struct - there is no alignment padding
134         match field.ty.sty {
135             _ if self.llextra.is_none() => {
136                 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
137                     ix, self.llval);
138                 return simple();
139             }
140             _ if !field.is_unsized() => return simple(),
141             ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
142             ty::Adt(def, _) => {
143                 if def.repr.packed() {
144                     // FIXME(eddyb) generalize the adjustment when we
145                     // start supporting packing to larger alignments.
146                     assert_eq!(self.layout.align.abi.bytes(), 1);
147                     return simple();
148                 }
149             }
150             _ => {}
151         }
152
153         // We need to get the pointer manually now.
154         // We do this by casting to a *i8, then offsetting it by the appropriate amount.
155         // We do this instead of, say, simply adjusting the pointer from the result of a GEP
156         // because the field may have an arbitrary alignment in the LLVM representation
157         // anyway.
158         //
159         // To demonstrate:
160         //   struct Foo<T: ?Sized> {
161         //      x: u16,
162         //      y: T
163         //   }
164         //
165         // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
166         // the `y` field has 16-bit alignment.
167
168         let meta = self.llextra;
169
170         let unaligned_offset = bx.cx().const_usize(offset.bytes());
171
172         // Get the alignment of the field
173         let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
174
175         // Bump the unaligned offset up to the appropriate alignment using the
176         // following expression:
177         //
178         //   (unaligned offset + (align - 1)) & -align
179
180         // Calculate offset
181         let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
182         let and_lhs = bx.add(unaligned_offset, align_sub_1);
183         let and_rhs = bx.neg(unsized_align);
184         let offset = bx.and(and_lhs, and_rhs);
185
186         debug!("struct_field_ptr: DST field offset: {:?}", offset);
187
188         // Cast and adjust pointer
189         let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
190         let byte_ptr = bx.gep(byte_ptr, &[offset]);
191
192         // Finally, cast back to the type expected
193         let ll_fty = bx.cx().backend_type(field);
194         debug!("struct_field_ptr: Field type is {:?}", ll_fty);
195
196         PlaceRef {
197             llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
198             llextra: self.llextra,
199             layout: field,
200             align: effective_field_align,
201         }
202     }
203
204     /// Obtain the actual discriminant of a value.
205     pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
206         self,
207         bx: &mut Bx,
208         cast_to: Ty<'tcx>
209     ) -> V {
210         let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
211         if self.layout.abi.is_uninhabited() {
212             return bx.cx().const_undef(cast_to);
213         }
214         match self.layout.variants {
215             layout::Variants::Single { index } => {
216                 let discr_val = self.layout.ty.ty_adt_def().map_or(
217                     index.as_u32() as u128,
218                     |def| def.discriminant_for_variant(bx.cx().tcx(), index).val);
219                 return bx.cx().const_uint_big(cast_to, discr_val);
220             }
221             layout::Variants::Tagged { .. } |
222             layout::Variants::NicheFilling { .. } => {},
223         }
224
225         let discr = self.project_field(bx, 0);
226         let lldiscr = bx.load_operand(discr).immediate();
227         match self.layout.variants {
228             layout::Variants::Single { .. } => bug!(),
229             layout::Variants::Tagged { ref tag, .. } => {
230                 let signed = match tag.value {
231                     // We use `i1` for bytes that are always `0` or `1`,
232                     // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
233                     // let LLVM interpret the `i1` as signed, because
234                     // then `i1 1` (i.e., E::B) is effectively `i8 -1`.
235                     layout::Int(_, signed) => !tag.is_bool() && signed,
236                     _ => false
237                 };
238                 bx.intcast(lldiscr, cast_to, signed)
239             }
240             layout::Variants::NicheFilling {
241                 dataful_variant,
242                 ref niche_variants,
243                 niche_start,
244                 ..
245             } => {
246                 let niche_llty = bx.cx().immediate_backend_type(discr.layout);
247                 if niche_variants.start() == niche_variants.end() {
248                     // FIXME(eddyb) Check the actual primitive type here.
249                     let niche_llval = if niche_start == 0 {
250                         // HACK(eddyb) Using `c_null` as it works on all types.
251                         bx.cx().const_null(niche_llty)
252                     } else {
253                         bx.cx().const_uint_big(niche_llty, niche_start)
254                     };
255                     let select_arg = bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval);
256                     bx.select(select_arg,
257                         bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
258                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
259                 } else {
260                     // Rebase from niche values to discriminant values.
261                     let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
262                     let lldiscr = bx.sub(lldiscr, bx.cx().const_uint_big(niche_llty, delta));
263                     let lldiscr_max =
264                         bx.cx().const_uint(niche_llty, niche_variants.end().as_u32() as u64);
265                     let select_arg = bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max);
266                     let cast = bx.intcast(lldiscr, cast_to, false);
267                     bx.select(select_arg,
268                         cast,
269                         bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64))
270                 }
271             }
272         }
273     }
274
275     /// Set the discriminant for a new value of the given case of the given
276     /// representation.
277     pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
278         &self,
279         bx: &mut Bx,
280         variant_index: VariantIdx
281     ) {
282         if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
283             return;
284         }
285         match self.layout.variants {
286             layout::Variants::Single { index } => {
287                 assert_eq!(index, variant_index);
288             }
289             layout::Variants::Tagged { .. } => {
290                 let ptr = self.project_field(bx, 0);
291                 let to = self.layout.ty.ty_adt_def().unwrap()
292                     .discriminant_for_variant(bx.tcx(), variant_index)
293                     .val;
294                 bx.store(
295                     bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
296                     ptr.llval,
297                     ptr.align);
298             }
299             layout::Variants::NicheFilling {
300                 dataful_variant,
301                 ref niche_variants,
302                 niche_start,
303                 ..
304             } => {
305                 if variant_index != dataful_variant {
306                     if bx.cx().sess().target.target.arch == "arm" ||
307                        bx.cx().sess().target.target.arch == "aarch64" {
308                         // Issue #34427: As workaround for LLVM bug on ARM,
309                         // use memset of 0 before assigning niche value.
310                         let fill_byte = bx.cx().const_u8(0);
311                         let size = bx.cx().const_usize(self.layout.size.bytes());
312                         bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
313                     }
314
315                     let niche = self.project_field(bx, 0);
316                     let niche_llty = bx.cx().immediate_backend_type(niche.layout);
317                     let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
318                     let niche_value = (niche_value as u128)
319                         .wrapping_add(niche_start);
320                     // FIXME(eddyb) Check the actual primitive type here.
321                     let niche_llval = if niche_value == 0 {
322                         // HACK(eddyb) Using `c_null` as it works on all types.
323                         bx.cx().const_null(niche_llty)
324                     } else {
325                         bx.cx().const_uint_big(niche_llty, niche_value)
326                     };
327                     OperandValue::Immediate(niche_llval).store(bx, niche);
328                 }
329             }
330         }
331     }
332
333     pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
334         &self,
335         bx: &mut Bx,
336         llindex: V
337     ) -> Self {
338         PlaceRef {
339             llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
340             llextra: None,
341             layout: self.layout.field(bx.cx(), 0),
342             align: self.align
343         }
344     }
345
346     pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
347         &self,
348         bx: &mut Bx,
349         variant_index: VariantIdx
350     ) -> Self {
351         let mut downcast = *self;
352         downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
353
354         // Cast to the appropriate variant struct type.
355         let variant_ty = bx.cx().backend_type(downcast.layout);
356         downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
357
358         downcast
359     }
360
361     pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
362         bx.lifetime_start(self.llval, self.layout.size);
363     }
364
365     pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
366         bx.lifetime_end(self.llval, self.layout.size);
367     }
368 }
369
370 impl<'a, 'tcx: 'a, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
371     pub fn codegen_place(
372         &mut self,
373         bx: &mut Bx,
374         place: &mir::Place<'tcx>
375     ) -> PlaceRef<'tcx, Bx::Value> {
376         debug!("codegen_place(place={:?})", place);
377
378         let cx = self.cx;
379         let tcx = self.cx.tcx();
380
381         if let mir::Place::Local(index) = *place {
382             match self.locals[index] {
383                 LocalRef::Place(place) => {
384                     return place;
385                 }
386                 LocalRef::UnsizedPlace(place) => {
387                     return bx.load_operand(place).deref(cx);
388                 }
389                 LocalRef::Operand(..) => {
390                     bug!("using operand local {:?} as place", place);
391                 }
392             }
393         }
394
395         let result = match *place {
396             mir::Place::Local(_) => bug!(), // handled above
397             mir::Place::Promoted(box (index, ty)) => {
398                 let param_env = ty::ParamEnv::reveal_all();
399                 let cid = mir::interpret::GlobalId {
400                     instance: self.instance,
401                     promoted: Some(index),
402                 };
403                 let layout = cx.layout_of(self.monomorphize(&ty));
404                 match bx.tcx().const_eval(param_env.and(cid)) {
405                     Ok(val) => match val.val {
406                         mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
407                             bx.cx().from_const_alloc(layout, alloc, offset)
408                         }
409                         _ => bug!("promoteds should have an allocation: {:?}", val),
410                     },
411                     Err(_) => {
412                         // this is unreachable as long as runtime
413                         // and compile-time agree on values
414                         // With floats that won't always be true
415                         // so we generate an abort
416                         bx.abort();
417                         let llval = bx.cx().const_undef(
418                             bx.cx().type_ptr_to(bx.cx().backend_type(layout))
419                         );
420                         PlaceRef::new_sized(llval, layout, layout.align.abi)
421                     }
422                 }
423             }
424             mir::Place::Static(box mir::Static { def_id, ty }) => {
425                 let layout = cx.layout_of(self.monomorphize(&ty));
426                 PlaceRef::new_sized(bx.get_static(def_id), layout, layout.align.abi)
427             },
428             mir::Place::Projection(box mir::Projection {
429                 ref base,
430                 elem: mir::ProjectionElem::Deref
431             }) => {
432                 // Load the pointer from its location.
433                 self.codegen_consume(bx, base).deref(bx.cx())
434             }
435             mir::Place::Projection(ref projection) => {
436                 let cg_base = self.codegen_place(bx, &projection.base);
437
438                 match projection.elem {
439                     mir::ProjectionElem::Deref => bug!(),
440                     mir::ProjectionElem::Field(ref field, _) => {
441                         cg_base.project_field(bx, field.index())
442                     }
443                     mir::ProjectionElem::Index(index) => {
444                         let index = &mir::Operand::Copy(mir::Place::Local(index));
445                         let index = self.codegen_operand(bx, index);
446                         let llindex = index.immediate();
447                         cg_base.project_index(bx, llindex)
448                     }
449                     mir::ProjectionElem::ConstantIndex { offset,
450                                                          from_end: false,
451                                                          min_length: _ } => {
452                         let lloffset = bx.cx().const_usize(offset as u64);
453                         cg_base.project_index(bx, lloffset)
454                     }
455                     mir::ProjectionElem::ConstantIndex { offset,
456                                                          from_end: true,
457                                                          min_length: _ } => {
458                         let lloffset = bx.cx().const_usize(offset as u64);
459                         let lllen = cg_base.len(bx.cx());
460                         let llindex = bx.sub(lllen, lloffset);
461                         cg_base.project_index(bx, llindex)
462                     }
463                     mir::ProjectionElem::Subslice { from, to } => {
464                         let mut subslice = cg_base.project_index(bx,
465                             bx.cx().const_usize(from as u64));
466                         let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
467                             .projection_ty(tcx, &projection.elem).to_ty(tcx);
468                         subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
469
470                         if subslice.layout.is_unsized() {
471                             subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
472                                 bx.cx().const_usize((from as u64) + (to as u64))));
473                         }
474
475                         // Cast the place pointer type to the new
476                         // array or slice type (*[%_; new_len]).
477                         subslice.llval = bx.pointercast(subslice.llval,
478                             bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
479
480                         subslice
481                     }
482                     mir::ProjectionElem::Downcast(_, v) => {
483                         cg_base.project_downcast(bx, v)
484                     }
485                 }
486             }
487         };
488         debug!("codegen_place(place={:?}) => {:?}", place, result);
489         result
490     }
491
492     pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
493         let tcx = self.cx.tcx();
494         let place_ty = place.ty(self.mir, tcx);
495         self.monomorphize(&place_ty.to_ty(tcx))
496     }
497 }