1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use llvm::{self, LLVMConstInBoundsGEP};
12 use rustc::ty::{self, Ty};
13 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, Size, VariantIdx};
15 use rustc::mir::tcx::PlaceTy;
18 use common::{CodegenCx, C_undef, C_usize, C_u8, C_u32, C_uint, C_null, C_uint_big};
20 use type_of::LayoutLlvmExt;
24 use mir::constant::const_alloc_to_llvm;
26 use traits::{IntPredicate,BuilderMethods};
28 use super::{FunctionCx, LocalRef};
29 use super::operand::{OperandRef, OperandValue};
31 #[derive(Copy, Clone, Debug)]
32 pub struct PlaceRef<'tcx, V> {
33 /// Pointer to the contents of the place
36 /// This place's extra data if it is unsized, or null
37 pub llextra: Option<V>,
39 /// Monomorphized type of this place, including variant information
40 pub layout: TyLayout<'tcx>,
42 /// What alignment we know for this place
46 impl PlaceRef<'tcx, &'ll Value> {
49 layout: TyLayout<'tcx>,
51 ) -> PlaceRef<'tcx, &'ll Value> {
52 assert!(!layout.is_unsized());
61 pub fn from_const_alloc(
62 bx: &Builder<'a, 'll, 'tcx>,
63 layout: TyLayout<'tcx>,
64 alloc: &mir::interpret::Allocation,
66 ) -> PlaceRef<'tcx, &'ll Value> {
67 let init = const_alloc_to_llvm(bx.cx, alloc);
68 let base_addr = consts::addr_of(bx.cx, init, layout.align, None);
70 let llval = unsafe { LLVMConstInBoundsGEP(
71 consts::bitcast(base_addr, Type::i8p(bx.cx)),
72 &C_usize(bx.cx, offset.bytes()),
75 let llval = consts::bitcast(llval, layout.llvm_type(bx.cx).ptr_to());
76 PlaceRef::new_sized(llval, layout, alloc.align)
79 pub fn alloca(bx: &Builder<'a, 'll, 'tcx>, layout: TyLayout<'tcx>, name: &str)
80 -> PlaceRef<'tcx, &'ll Value> {
81 debug!("alloca({:?}: {:?})", name, layout);
82 assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
83 let tmp = bx.alloca(layout.llvm_type(bx.cx), name, layout.align);
84 Self::new_sized(tmp, layout, layout.align)
87 /// Returns a place for an indirect reference to an unsized place.
88 pub fn alloca_unsized_indirect(
89 bx: &Builder<'a, 'll, 'tcx>,
90 layout: TyLayout<'tcx>,
92 ) -> PlaceRef<'tcx, &'ll Value> {
93 debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
94 assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
95 let ptr_ty = bx.cx.tcx.mk_mut_ptr(layout.ty);
96 let ptr_layout = bx.cx.layout_of(ptr_ty);
97 Self::alloca(bx, ptr_layout, name)
100 pub fn len(&self, cx: &CodegenCx<'ll, 'tcx>) -> &'ll Value {
101 if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
102 if self.layout.is_unsized() {
103 assert_eq!(count, 0);
104 self.llextra.unwrap()
109 bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
113 pub fn load(&self, bx: &Builder<'a, 'll, 'tcx>) -> OperandRef<'tcx, &'ll Value> {
114 debug!("PlaceRef::load: {:?}", self);
116 assert_eq!(self.llextra.is_some(), self.layout.is_unsized());
118 if self.layout.is_zst() {
119 return OperandRef::new_zst(bx.cx, self.layout);
122 let scalar_load_metadata = |load, scalar: &layout::Scalar| {
123 let vr = scalar.valid_range.clone();
126 let range = scalar.valid_range_exclusive(bx.cx);
127 if range.start != range.end {
128 bx.range_metadata(load, range);
131 layout::Pointer if vr.start() < vr.end() && !vr.contains(&0) => {
132 bx.nonnull_metadata(load);
138 let val = if let Some(llextra) = self.llextra {
139 OperandValue::Ref(self.llval, Some(llextra), self.align)
140 } else if self.layout.is_llvm_immediate() {
141 let mut const_llval = None;
143 if let Some(global) = llvm::LLVMIsAGlobalVariable(self.llval) {
144 if llvm::LLVMIsGlobalConstant(global) == llvm::True {
145 const_llval = llvm::LLVMGetInitializer(global);
149 let llval = const_llval.unwrap_or_else(|| {
150 let load = bx.load(self.llval, self.align);
151 if let layout::Abi::Scalar(ref scalar) = self.layout.abi {
152 scalar_load_metadata(load, scalar);
156 OperandValue::Immediate(base::to_immediate(bx, llval, self.layout))
157 } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
158 let load = |i, scalar: &layout::Scalar| {
159 let llptr = bx.struct_gep(self.llval, i as u64);
160 let load = bx.load(llptr, self.align);
161 scalar_load_metadata(load, scalar);
162 if scalar.is_bool() {
163 bx.trunc(load, Type::i1(bx.cx))
168 OperandValue::Pair(load(0, a), load(1, b))
170 OperandValue::Ref(self.llval, None, self.align)
173 OperandRef { val, layout: self.layout }
176 /// Access a field, at a point when the value's case is known.
177 pub fn project_field(
179 bx: &Builder<'a, 'll, 'tcx>,
181 ) -> PlaceRef<'tcx, &'ll Value> {
183 let field = self.layout.field(cx, ix);
184 let offset = self.layout.fields.offset(ix);
185 let effective_field_align = self.align.restrict_for_offset(offset);
188 // Unions and newtypes only use an offset of 0.
189 let llval = if offset.bytes() == 0 {
191 } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
192 // Offsets have to match either first or second field.
193 assert_eq!(offset, a.value.size(cx).abi_align(b.value.align(cx)));
194 bx.struct_gep(self.llval, 1)
196 bx.struct_gep(self.llval, self.layout.llvm_field_index(ix))
199 // HACK(eddyb) have to bitcast pointers until LLVM removes pointee types.
200 llval: bx.pointercast(llval, field.llvm_type(cx).ptr_to()),
201 llextra: if cx.type_has_metadata(field.ty) {
207 align: effective_field_align,
211 // Simple cases, which don't need DST adjustment:
212 // * no metadata available - just log the case
213 // * known alignment - sized types, [T], str or a foreign type
214 // * packed struct - there is no alignment padding
216 _ if self.llextra.is_none() => {
217 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
221 _ if !field.is_unsized() => return simple(),
222 ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
224 if def.repr.packed() {
225 // FIXME(eddyb) generalize the adjustment when we
226 // start supporting packing to larger alignments.
227 assert_eq!(self.layout.align.abi(), 1);
234 // We need to get the pointer manually now.
235 // We do this by casting to a *i8, then offsetting it by the appropriate amount.
236 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
237 // because the field may have an arbitrary alignment in the LLVM representation
241 // struct Foo<T: ?Sized> {
246 // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
247 // the `y` field has 16-bit alignment.
249 let meta = self.llextra;
251 let unaligned_offset = C_usize(cx, offset.bytes());
253 // Get the alignment of the field
254 let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
256 // Bump the unaligned offset up to the appropriate alignment using the
257 // following expression:
259 // (unaligned offset + (align - 1)) & -align
262 let align_sub_1 = bx.sub(unsized_align, C_usize(cx, 1u64));
263 let offset = bx.and(bx.add(unaligned_offset, align_sub_1),
264 bx.neg(unsized_align));
266 debug!("struct_field_ptr: DST field offset: {:?}", offset);
268 // Cast and adjust pointer
269 let byte_ptr = bx.pointercast(self.llval, Type::i8p(cx));
270 let byte_ptr = bx.gep(byte_ptr, &[offset]);
272 // Finally, cast back to the type expected
273 let ll_fty = field.llvm_type(cx);
274 debug!("struct_field_ptr: Field type is {:?}", ll_fty);
277 llval: bx.pointercast(byte_ptr, ll_fty.ptr_to()),
278 llextra: self.llextra,
280 align: effective_field_align,
284 /// Obtain the actual discriminant of a value.
285 pub fn codegen_get_discr(
287 bx: &Builder<'a, 'll, 'tcx, &'ll Value>,
290 let cast_to = bx.cx.layout_of(cast_to).immediate_llvm_type(bx.cx);
291 if self.layout.abi.is_uninhabited() {
292 return C_undef(cast_to);
294 match self.layout.variants {
295 layout::Variants::Single { index } => {
296 let discr_val = self.layout.ty.ty_adt_def().map_or(
297 index.as_u32() as u128,
298 |def| def.discriminant_for_variant(bx.cx.tcx, index).val);
299 return C_uint_big(cast_to, discr_val);
301 layout::Variants::Tagged { .. } |
302 layout::Variants::NicheFilling { .. } => {},
305 let discr = self.project_field(bx, 0);
306 let lldiscr = discr.load(bx).immediate();
307 match self.layout.variants {
308 layout::Variants::Single { .. } => bug!(),
309 layout::Variants::Tagged { ref tag, .. } => {
310 let signed = match tag.value {
311 // We use `i1` for bytes that are always `0` or `1`,
312 // e.g. `#[repr(i8)] enum E { A, B }`, but we can't
313 // let LLVM interpret the `i1` as signed, because
314 // then `i1 1` (i.e. E::B) is effectively `i8 -1`.
315 layout::Int(_, signed) => !tag.is_bool() && signed,
318 bx.intcast(lldiscr, cast_to, signed)
320 layout::Variants::NicheFilling {
326 let niche_llty = discr.layout.immediate_llvm_type(bx.cx);
327 if niche_variants.start() == niche_variants.end() {
328 // FIXME(eddyb) Check the actual primitive type here.
329 let niche_llval = if niche_start == 0 {
330 // HACK(eddyb) Using `C_null` as it works on all types.
333 C_uint_big(niche_llty, niche_start)
335 bx.select(bx.icmp(IntPredicate::IntEQ, lldiscr, niche_llval),
336 C_uint(cast_to, niche_variants.start().as_u32() as u64),
337 C_uint(cast_to, dataful_variant.as_u32() as u64))
339 // Rebase from niche values to discriminant values.
340 let delta = niche_start.wrapping_sub(niche_variants.start().as_u32() as u128);
341 let lldiscr = bx.sub(lldiscr, C_uint_big(niche_llty, delta));
342 let lldiscr_max = C_uint(niche_llty, niche_variants.end().as_u32() as u64);
343 bx.select(bx.icmp(IntPredicate::IntULE, lldiscr, lldiscr_max),
344 bx.intcast(lldiscr, cast_to, false),
345 C_uint(cast_to, dataful_variant.as_u32() as u64))
351 /// Set the discriminant for a new value of the given case of the given
353 pub fn codegen_set_discr(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx) {
354 if self.layout.for_variant(bx.cx, variant_index).abi.is_uninhabited() {
357 match self.layout.variants {
358 layout::Variants::Single { index } => {
359 assert_eq!(index, variant_index);
361 layout::Variants::Tagged { .. } => {
362 let ptr = self.project_field(bx, 0);
363 let to = self.layout.ty.ty_adt_def().unwrap()
364 .discriminant_for_variant(bx.tcx(), variant_index)
367 C_uint_big(ptr.layout.llvm_type(bx.cx), to),
371 layout::Variants::NicheFilling {
377 if variant_index != dataful_variant {
378 if bx.sess().target.target.arch == "arm" ||
379 bx.sess().target.target.arch == "aarch64" {
380 // Issue #34427: As workaround for LLVM bug on ARM,
381 // use memset of 0 before assigning niche value.
382 let llptr = bx.pointercast(self.llval, Type::i8(bx.cx).ptr_to());
383 let fill_byte = C_u8(bx.cx, 0);
384 let (size, align) = self.layout.size_and_align();
385 let size = C_usize(bx.cx, size.bytes());
386 let align = C_u32(bx.cx, align.abi() as u32);
387 base::call_memset(bx, llptr, fill_byte, size, align, false);
390 let niche = self.project_field(bx, 0);
391 let niche_llty = niche.layout.immediate_llvm_type(bx.cx);
392 let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
393 let niche_value = (niche_value as u128)
394 .wrapping_add(niche_start);
395 // FIXME(eddyb) Check the actual primitive type here.
396 let niche_llval = if niche_value == 0 {
397 // HACK(eddyb) Using `C_null` as it works on all types.
400 C_uint_big(niche_llty, niche_value)
402 OperandValue::Immediate(niche_llval).store(bx, niche);
408 pub fn project_index(&self, bx: &Builder<'a, 'll, 'tcx>, llindex: &'ll Value)
409 -> PlaceRef<'tcx, &'ll Value> {
411 llval: bx.inbounds_gep(self.llval, &[C_usize(bx.cx, 0), llindex]),
413 layout: self.layout.field(bx.cx, 0),
418 pub fn project_downcast(&self, bx: &Builder<'a, 'll, 'tcx>, variant_index: VariantIdx)
419 -> PlaceRef<'tcx, &'ll Value> {
420 let mut downcast = *self;
421 downcast.layout = self.layout.for_variant(bx.cx, variant_index);
423 // Cast to the appropriate variant struct type.
424 let variant_ty = downcast.layout.llvm_type(bx.cx);
425 downcast.llval = bx.pointercast(downcast.llval, variant_ty.ptr_to());
430 pub fn storage_live(&self, bx: &Builder<'a, 'll, 'tcx>) {
431 bx.lifetime_start(self.llval, self.layout.size);
434 pub fn storage_dead(&self, bx: &Builder<'a, 'll, 'tcx>) {
435 bx.lifetime_end(self.llval, self.layout.size);
439 impl FunctionCx<'a, 'll, 'tcx, &'ll Value> {
440 pub fn codegen_place(&mut self,
441 bx: &Builder<'a, 'll, 'tcx>,
442 place: &mir::Place<'tcx>)
443 -> PlaceRef<'tcx, &'ll Value> {
444 debug!("codegen_place(place={:?})", place);
449 if let mir::Place::Local(index) = *place {
450 match self.locals[index] {
451 LocalRef::Place(place) => {
454 LocalRef::UnsizedPlace(place) => {
455 return place.load(bx).deref(&cx);
457 LocalRef::Operand(..) => {
458 bug!("using operand local {:?} as place", place);
463 let result = match *place {
464 mir::Place::Local(_) => bug!(), // handled above
465 mir::Place::Promoted(box (index, ty)) => {
466 let param_env = ty::ParamEnv::reveal_all();
467 let cid = mir::interpret::GlobalId {
468 instance: self.instance,
469 promoted: Some(index),
471 let layout = cx.layout_of(self.monomorphize(&ty));
472 match bx.tcx().const_eval(param_env.and(cid)) {
473 Ok(val) => match val.val {
474 mir::interpret::ConstValue::ByRef(_, alloc, offset) => {
475 PlaceRef::from_const_alloc(bx, layout, alloc, offset)
477 _ => bug!("promoteds should have an allocation: {:?}", val),
480 // this is unreachable as long as runtime
481 // and compile-time agree on values
482 // With floats that won't always be true
483 // so we generate an abort
484 let fnname = bx.cx.get_intrinsic(&("llvm.trap"));
485 bx.call(fnname, &[], None);
486 let llval = C_undef(layout.llvm_type(bx.cx).ptr_to());
487 PlaceRef::new_sized(llval, layout, layout.align)
491 mir::Place::Static(box mir::Static { def_id, ty }) => {
492 let layout = cx.layout_of(self.monomorphize(&ty));
493 PlaceRef::new_sized(consts::get_static(cx, def_id), layout, layout.align)
495 mir::Place::Projection(box mir::Projection {
497 elem: mir::ProjectionElem::Deref
499 // Load the pointer from its location.
500 self.codegen_consume(bx, base).deref(bx.cx)
502 mir::Place::Projection(ref projection) => {
503 let cg_base = self.codegen_place(bx, &projection.base);
505 match projection.elem {
506 mir::ProjectionElem::Deref => bug!(),
507 mir::ProjectionElem::Field(ref field, _) => {
508 cg_base.project_field(bx, field.index())
510 mir::ProjectionElem::Index(index) => {
511 let index = &mir::Operand::Copy(mir::Place::Local(index));
512 let index = self.codegen_operand(bx, index);
513 let llindex = index.immediate();
514 cg_base.project_index(bx, llindex)
516 mir::ProjectionElem::ConstantIndex { offset,
519 let lloffset = C_usize(bx.cx, offset as u64);
520 cg_base.project_index(bx, lloffset)
522 mir::ProjectionElem::ConstantIndex { offset,
525 let lloffset = C_usize(bx.cx, offset as u64);
526 let lllen = cg_base.len(bx.cx);
527 let llindex = bx.sub(lllen, lloffset);
528 cg_base.project_index(bx, llindex)
530 mir::ProjectionElem::Subslice { from, to } => {
531 let mut subslice = cg_base.project_index(bx,
532 C_usize(bx.cx, from as u64));
533 let projected_ty = PlaceTy::Ty { ty: cg_base.layout.ty }
534 .projection_ty(tcx, &projection.elem)
536 subslice.layout = bx.cx.layout_of(self.monomorphize(&projected_ty));
538 if subslice.layout.is_unsized() {
539 subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
540 C_usize(bx.cx, (from as u64) + (to as u64))));
543 // Cast the place pointer type to the new
544 // array or slice type (*[%_; new_len]).
545 subslice.llval = bx.pointercast(subslice.llval,
546 subslice.layout.llvm_type(bx.cx).ptr_to());
550 mir::ProjectionElem::Downcast(_, v) => {
551 cg_base.project_downcast(bx, v)
556 debug!("codegen_place(place={:?}) => {:?}", place, result);
560 pub fn monomorphized_place_ty(&self, place: &mir::Place<'tcx>) -> Ty<'tcx> {
561 let tcx = self.cx.tcx;
562 let place_ty = place.ty(self.mir, tcx);
563 self.monomorphize(&place_ty.to_ty(tcx))