1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 use rustc::ty::{self, Ty, TypeFoldable};
13 use rustc::ty::layout::{self, LayoutTyper};
15 use rustc::mir::tcx::LvalueTy;
16 use rustc_data_structures::indexed_vec::Idx;
19 use common::{self, CrateContext, C_uint};
30 use super::{MirContext, LocalRef};
32 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
38 impl ops::BitOr for Alignment {
41 fn bitor(self, rhs: Self) -> Self {
43 (Alignment::Packed, _) => Alignment::Packed,
44 (Alignment::AbiAligned, a) => a,
50 pub fn from_packed(packed: bool) -> Self {
58 pub fn to_align(self) -> Option<u32> {
60 Alignment::Packed => Some(1),
61 Alignment::AbiAligned => None,
65 pub fn min_with(self, align: u32) -> Option<u32> {
67 Alignment::Packed => Some(1),
68 Alignment::AbiAligned => Some(align),
73 #[derive(Copy, Clone, Debug)]
74 pub struct LvalueRef<'tcx> {
75 /// Pointer to the contents of the lvalue
78 /// This lvalue's extra data if it is unsized, or null
79 pub llextra: ValueRef,
81 /// Monomorphized type of this lvalue, including variant information
82 pub ty: LvalueTy<'tcx>,
84 /// Whether this lvalue is known to be aligned according to its layout
85 pub alignment: Alignment,
88 impl<'a, 'tcx> LvalueRef<'tcx> {
89 pub fn new_sized(llval: ValueRef, lvalue_ty: LvalueTy<'tcx>,
90 alignment: Alignment) -> LvalueRef<'tcx> {
91 LvalueRef { llval: llval, llextra: ptr::null_mut(), ty: lvalue_ty, alignment: alignment }
94 pub fn new_sized_ty(llval: ValueRef, ty: Ty<'tcx>, alignment: Alignment) -> LvalueRef<'tcx> {
95 LvalueRef::new_sized(llval, LvalueTy::from_ty(ty), alignment)
98 pub fn alloca(bcx: &Builder<'a, 'tcx>, ty: Ty<'tcx>, name: &str) -> LvalueRef<'tcx> {
99 debug!("alloca({:?}: {:?})", name, ty);
100 let tmp = bcx.alloca(type_of::type_of(bcx.ccx, ty), name);
101 assert!(!ty.has_param_types());
102 Self::new_sized_ty(tmp, ty, Alignment::AbiAligned)
105 pub fn len(&self, ccx: &CrateContext<'a, 'tcx>) -> ValueRef {
106 let ty = self.ty.to_ty(ccx.tcx());
108 ty::TyArray(_, n) => common::C_uint(ccx, n),
109 ty::TySlice(_) | ty::TyStr => {
110 assert!(self.llextra != ptr::null_mut());
113 _ => bug!("unexpected type `{}` in LvalueRef::len", ty)
117 pub fn has_extra(&self) -> bool {
118 !self.llextra.is_null()
123 bcx: &Builder<'a, 'tcx>,
125 fields: &Vec<Ty<'tcx>>,
128 ) -> (ValueRef, Alignment) {
129 let fty = fields[ix];
132 let alignment = self.alignment | Alignment::from_packed(st.packed);
134 let ptr_val = if needs_cast {
135 let fields = st.field_index_by_increasing_offset().map(|i| {
136 type_of::in_memory_type_of(ccx, fields[i])
137 }).collect::<Vec<_>>();
138 let real_ty = Type::struct_(ccx, &fields[..], st.packed);
139 bcx.pointercast(self.llval, real_ty.ptr_to())
144 // Simple case - we can just GEP the field
145 // * First field - Always aligned properly
146 // * Packed struct - There is no alignment padding
147 // * Field is sized - pointer is properly aligned already
148 if st.offsets[ix] == layout::Size::from_bytes(0) || st.packed ||
149 bcx.ccx.shared().type_is_sized(fty) {
150 return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
153 // If the type of the last field is [T] or str, then we don't need to do
156 ty::TySlice(..) | ty::TyStr => {
157 return (bcx.struct_gep(ptr_val, st.memory_index[ix] as usize), alignment);
162 // There's no metadata available, log the case and just do the GEP.
163 if !self.has_extra() {
164 debug!("Unsized field `{}`, of `{:?}` has no metadata for adjustment",
166 return (bcx.struct_gep(ptr_val, ix), alignment);
169 // We need to get the pointer manually now.
170 // We do this by casting to a *i8, then offsetting it by the appropriate amount.
171 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
172 // because the field may have an arbitrary alignment in the LLVM representation
176 // struct Foo<T: ?Sized> {
181 // The type Foo<Foo<Trait>> is represented in LLVM as { u16, { u16, u8 }}, meaning that
182 // the `y` field has 16-bit alignment.
184 let meta = self.llextra;
187 let offset = st.offsets[ix].bytes();
188 let unaligned_offset = C_uint(bcx.ccx, offset);
190 // Get the alignment of the field
191 let (_, align) = glue::size_and_align_of_dst(bcx, fty, meta);
193 // Bump the unaligned offset up to the appropriate alignment using the
194 // following expression:
196 // (unaligned offset + (align - 1)) & -align
199 let align_sub_1 = bcx.sub(align, C_uint(bcx.ccx, 1u64));
200 let offset = bcx.and(bcx.add(unaligned_offset, align_sub_1),
203 debug!("struct_field_ptr: DST field offset: {:?}", Value(offset));
205 // Cast and adjust pointer
206 let byte_ptr = bcx.pointercast(ptr_val, Type::i8p(bcx.ccx));
207 let byte_ptr = bcx.gep(byte_ptr, &[offset]);
209 // Finally, cast back to the type expected
210 let ll_fty = type_of::in_memory_type_of(bcx.ccx, fty);
211 debug!("struct_field_ptr: Field type is {:?}", ll_fty);
212 (bcx.pointercast(byte_ptr, ll_fty.ptr_to()), alignment)
215 /// Access a field, at a point when the value's case is known.
216 pub fn trans_field_ptr(self, bcx: &Builder<'a, 'tcx>, ix: usize) -> (ValueRef, Alignment) {
217 let discr = match self.ty {
218 LvalueTy::Ty { .. } => 0,
219 LvalueTy::Downcast { variant_index, .. } => variant_index,
221 let t = self.ty.to_ty(bcx.tcx());
222 let l = bcx.ccx.layout_of(t);
223 // Note: if this ever needs to generate conditionals (e.g., if we
224 // decide to do some kind of cdr-coding-like non-unique repr
225 // someday), it will need to return a possibly-new bcx as well.
227 layout::Univariant { ref variant, .. } => {
228 assert_eq!(discr, 0);
229 self.struct_field_ptr(bcx, &variant,
230 &adt::compute_fields(bcx.ccx, t, 0, false), ix, false)
232 layout::Vector { count, .. } => {
233 assert_eq!(discr, 0);
234 assert!((ix as u64) < count);
235 (bcx.struct_gep(self.llval, ix), self.alignment)
237 layout::General { discr: d, ref variants, .. } => {
238 let mut fields = adt::compute_fields(bcx.ccx, t, discr, false);
239 fields.insert(0, d.to_ty(&bcx.tcx(), false));
240 self.struct_field_ptr(bcx, &variants[discr], &fields, ix + 1, true)
242 layout::UntaggedUnion { ref variants } => {
243 let fields = adt::compute_fields(bcx.ccx, t, 0, false);
244 let ty = type_of::in_memory_type_of(bcx.ccx, fields[ix]);
245 (bcx.pointercast(self.llval, ty.ptr_to()),
246 self.alignment | Alignment::from_packed(variants.packed))
248 layout::RawNullablePointer { nndiscr, .. } |
249 layout::StructWrappedNullablePointer { nndiscr, .. } if discr as u64 != nndiscr => {
250 let nullfields = adt::compute_fields(bcx.ccx, t, (1-nndiscr) as usize, false);
251 // The unit-like case might have a nonzero number of unit-like fields.
252 // (e.d., Result of Either with (), as one side.)
253 let ty = type_of::type_of(bcx.ccx, nullfields[ix]);
254 assert_eq!(machine::llsize_of_alloc(bcx.ccx, ty), 0);
255 (bcx.pointercast(self.llval, ty.ptr_to()), Alignment::Packed)
257 layout::RawNullablePointer { nndiscr, .. } => {
258 let nnty = adt::compute_fields(bcx.ccx, t, nndiscr as usize, false)[0];
260 assert_eq!(discr as u64, nndiscr);
261 let ty = type_of::type_of(bcx.ccx, nnty);
262 (bcx.pointercast(self.llval, ty.ptr_to()), self.alignment)
264 layout::StructWrappedNullablePointer { ref nonnull, nndiscr, .. } => {
265 assert_eq!(discr as u64, nndiscr);
266 self.struct_field_ptr(bcx, &nonnull,
267 &adt::compute_fields(bcx.ccx, t, discr, false), ix, false)
269 _ => bug!("element access in type without elements: {} represented as {:#?}", t, l)
273 pub fn project_index(&self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef {
274 if let ty::TySlice(_) = self.ty.to_ty(bcx.tcx()).sty {
275 // Slices already point to the array element type.
276 bcx.inbounds_gep(self.llval, &[llindex])
278 let zero = common::C_uint(bcx.ccx, 0u64);
279 bcx.inbounds_gep(self.llval, &[zero, llindex])
284 impl<'a, 'tcx> MirContext<'a, 'tcx> {
285 pub fn trans_lvalue(&mut self,
286 bcx: &Builder<'a, 'tcx>,
287 lvalue: &mir::Lvalue<'tcx>)
289 debug!("trans_lvalue(lvalue={:?})", lvalue);
294 if let mir::Lvalue::Local(index) = *lvalue {
295 match self.locals[index] {
296 LocalRef::Lvalue(lvalue) => {
299 LocalRef::Operand(..) => {
300 bug!("using operand local {:?} as lvalue", lvalue);
305 let result = match *lvalue {
306 mir::Lvalue::Local(_) => bug!(), // handled above
307 mir::Lvalue::Static(box mir::Static { def_id, ty }) => {
308 LvalueRef::new_sized(consts::get_static(ccx, def_id),
309 LvalueTy::from_ty(self.monomorphize(&ty)),
310 Alignment::AbiAligned)
312 mir::Lvalue::Projection(box mir::Projection {
314 elem: mir::ProjectionElem::Deref
316 // Load the pointer from its location.
317 self.trans_consume(bcx, base).deref()
319 mir::Lvalue::Projection(ref projection) => {
320 let tr_base = self.trans_lvalue(bcx, &projection.base);
321 let projected_ty = tr_base.ty.projection_ty(tcx, &projection.elem);
322 let projected_ty = self.monomorphize(&projected_ty);
323 let align = tr_base.alignment;
325 let ((llprojected, align), llextra) = match projection.elem {
326 mir::ProjectionElem::Deref => bug!(),
327 mir::ProjectionElem::Field(ref field, _) => {
328 let llextra = if self.ccx.shared().type_is_sized(projected_ty.to_ty(tcx)) {
333 (tr_base.trans_field_ptr(bcx, field.index()), llextra)
335 mir::ProjectionElem::Index(ref index) => {
336 let index = self.trans_operand(bcx, index);
337 let llindex = self.prepare_index(bcx, index.immediate());
338 ((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
340 mir::ProjectionElem::ConstantIndex { offset,
343 let lloffset = C_uint(bcx.ccx, offset);
344 ((tr_base.project_index(bcx, lloffset), align), ptr::null_mut())
346 mir::ProjectionElem::ConstantIndex { offset,
349 let lloffset = C_uint(bcx.ccx, offset);
350 let lllen = tr_base.len(bcx.ccx);
351 let llindex = bcx.sub(lllen, lloffset);
352 ((tr_base.project_index(bcx, llindex), align), ptr::null_mut())
354 mir::ProjectionElem::Subslice { from, to } => {
355 let llbase = tr_base.project_index(bcx, C_uint(bcx.ccx, from));
357 let base_ty = tr_base.ty.to_ty(bcx.tcx());
360 // must cast the lvalue pointer type to the new
361 // array type (*[%_; new_len]).
362 let base_ty = self.monomorphized_lvalue_ty(lvalue);
363 let llbasety = type_of::type_of(bcx.ccx, base_ty).ptr_to();
364 let llbase = bcx.pointercast(llbase, llbasety);
365 ((llbase, align), ptr::null_mut())
368 assert!(tr_base.llextra != ptr::null_mut());
369 let lllen = bcx.sub(tr_base.llextra,
370 C_uint(bcx.ccx, from+to));
371 ((llbase, align), lllen)
373 _ => bug!("unexpected type {:?} in Subslice", base_ty)
376 mir::ProjectionElem::Downcast(..) => {
377 ((tr_base.llval, align), tr_base.llextra)
388 debug!("trans_lvalue(lvalue={:?}) => {:?}", lvalue, result);
392 /// Adjust the bitwidth of an index since LLVM is less forgiving
395 /// nmatsakis: is this still necessary? Not sure.
396 fn prepare_index(&mut self, bcx: &Builder<'a, 'tcx>, llindex: ValueRef) -> ValueRef {
397 let index_size = machine::llbitsize_of_real(bcx.ccx, common::val_ty(llindex));
398 let int_size = machine::llbitsize_of_real(bcx.ccx, bcx.ccx.int_type());
399 if index_size < int_size {
400 bcx.zext(llindex, bcx.ccx.int_type())
401 } else if index_size > int_size {
402 bcx.trunc(llindex, bcx.ccx.int_type())
408 pub fn monomorphized_lvalue_ty(&self, lvalue: &mir::Lvalue<'tcx>) -> Ty<'tcx> {
409 let tcx = self.ccx.tcx();
410 let lvalue_ty = lvalue.ty(&self.mir, tcx);
411 self.monomorphize(&lvalue_ty.to_ty(tcx))