1 use rustc::ty::{self, Instance, Ty};
2 use rustc::ty::subst::Subst;
3 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
5 use rustc::mir::tcx::PlaceTy;
7 use crate::common::IntPredicate;
12 use super::{FunctionCx, LocalRef};
13 use super::operand::OperandValue;
15 #[derive(Copy, Clone, Debug)]
16 pub struct PlaceRef<'tcx, V> {
17 /// Pointer to the contents of the place.
20 /// This place's extra data if it is unsized, or null.
21 pub llextra: Option<V>,
23 /// Monomorphized type of this place, including variant information.
24 pub layout: TyLayout<'tcx>,
26 /// What alignment we know for this place.
30 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
33 layout: TyLayout<'tcx>,
35 ) -> PlaceRef<'tcx, V> {
36 assert!(!layout.is_unsized());
45 fn new_thin_place<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
48 layout: TyLayout<'tcx>,
50 ) -> PlaceRef<'tcx, V> {
51 assert!(!bx.cx().type_has_metadata(layout.ty));
60 pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
62 layout: TyLayout<'tcx>,
65 debug!("alloca({:?}: {:?})", name, layout);
66 assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
67 let tmp = bx.alloca(bx.cx().backend_type(layout), name, layout.align.abi);
68 Self::new_sized(tmp, layout, layout.align.abi)
71 /// Returns a place for an indirect reference to an unsized place.
72 pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
74 layout: TyLayout<'tcx>,
77 debug!("alloca_unsized_indirect({:?}: {:?})", name, layout);
78 assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
79 let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
80 let ptr_layout = bx.cx().layout_of(ptr_ty);
81 Self::alloca(bx, ptr_layout, name)
84 pub fn len<Cx: ConstMethods<'tcx, Value = V>>(
88 if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
89 if self.layout.is_unsized() {
96 bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
102 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
103 /// Access a field, at a point when the value's case is known.
104 pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
108 let field = self.layout.field(bx.cx(), ix);
109 let offset = self.layout.fields.offset(ix);
110 let effective_field_align = self.align.restrict_for_offset(offset);
112 let mut simple = || {
113 // Unions and newtypes only use an offset of 0.
114 let llval = if offset.bytes() == 0 {
116 } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
117 // Offsets have to match either first or second field.
118 assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
119 bx.struct_gep(self.llval, 1)
121 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
124 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
125 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
126 llextra: if bx.cx().type_has_metadata(field.ty) {
132 align: effective_field_align,
136 // Simple cases, which don't need DST adjustment:
137 // * no metadata available - just log the case
138 // * known alignment - sized types, `[T]`, `str` or a foreign type
139 // * packed struct - there is no alignment padding
141 _ if self.llextra.is_none() => {
142 debug!("unsized field `{}`, of `{:?}` has no metadata for adjustment",
146 _ if !field.is_unsized() => return simple(),
147 ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
149 if def.repr.packed() {
150 // FIXME(eddyb) generalize the adjustment when we
151 // start supporting packing to larger alignments.
152 assert_eq!(self.layout.align.abi.bytes(), 1);
159 // We need to get the pointer manually now.
160 // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
161 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
162 // because the field may have an arbitrary alignment in the LLVM representation
167 // struct Foo<T: ?Sized> {
172 // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
173 // the `y` field has 16-bit alignment.
175 let meta = self.llextra;
177 let unaligned_offset = bx.cx().const_usize(offset.bytes());
179 // Get the alignment of the field
180 let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
182 // Bump the unaligned offset up to the appropriate alignment using the
183 // following expression:
185 // (unaligned offset + (align - 1)) & -align
188 let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
189 let and_lhs = bx.add(unaligned_offset, align_sub_1);
190 let and_rhs = bx.neg(unsized_align);
191 let offset = bx.and(and_lhs, and_rhs);
193 debug!("struct_field_ptr: DST field offset: {:?}", offset);
195 // Cast and adjust pointer.
196 let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
197 let byte_ptr = bx.gep(byte_ptr, &[offset]);
199 // Finally, cast back to the type expected.
200 let ll_fty = bx.cx().backend_type(field);
201 debug!("struct_field_ptr: Field type is {:?}", ll_fty);
204 llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
205 llextra: self.llextra,
207 align: effective_field_align,
211 /// Obtain the actual discriminant of a value.
212 pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
217 let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
218 if self.layout.abi.is_uninhabited() {
219 return bx.cx().const_undef(cast_to);
221 let (discr_scalar, discr_kind, discr_index) = match self.layout.variants {
222 layout::Variants::Single { index } => {
223 let discr_val = self.layout.ty.discriminant_for_variant(bx.cx().tcx(), index)
224 .map_or(index.as_u32() as u128, |discr| discr.val);
225 return bx.cx().const_uint_big(cast_to, discr_val);
227 layout::Variants::Multiple { ref discr, ref discr_kind, discr_index, .. } => {
228 (discr, discr_kind, discr_index)
232 // Read the tag/niche-encoded discriminant from memory.
233 let encoded_discr = self.project_field(bx, discr_index);
234 let encoded_discr = bx.load_operand(encoded_discr);
236 // Decode the discriminant (specifically if it's niche-encoded).
238 layout::DiscriminantKind::Tag => {
239 let signed = match discr_scalar.value {
240 // We use `i1` for bytes that are always `0` or `1`,
241 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
242 // let LLVM interpret the `i1` as signed, because
243 // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
244 layout::Int(_, signed) => !discr_scalar.is_bool() && signed,
247 bx.intcast(encoded_discr.immediate(), cast_to, signed)
249 layout::DiscriminantKind::Niche {
254 // Rebase from niche values to discriminants, and check
255 // whether the result is in range for the niche variants.
256 let niche_llty = bx.cx().immediate_backend_type(encoded_discr.layout);
257 let encoded_discr = encoded_discr.immediate();
259 // We first compute the "relative discriminant" (wrt `niche_variants`),
260 // that is, if `n = niche_variants.end() - niche_variants.start()`,
261 // we remap `niche_start..=niche_start + n` (which may wrap around)
262 // to (non-wrap-around) `0..=n`, to be able to check whether the
263 // discriminant corresponds to a niche variant with one comparison.
264 // We also can't go directly to the (variant index) discriminant
265 // and check that it is in the range `niche_variants`, because
266 // that might not fit in the same type, on top of needing an extra
267 // comparison (see also the comment on `let niche_discr`).
268 let relative_discr = if niche_start == 0 {
269 // Avoid subtracting `0`, which wouldn't work for pointers.
270 // FIXME(eddyb) check the actual primitive type here.
273 bx.sub(encoded_discr, bx.cx().const_uint_big(niche_llty, niche_start))
275 let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
277 let relative_max = if relative_max == 0 {
278 // Avoid calling `const_uint`, which wouldn't work for pointers.
279 // FIXME(eddyb) check the actual primitive type here.
280 bx.cx().const_null(niche_llty)
282 bx.cx().const_uint(niche_llty, relative_max as u64)
284 bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
287 // NOTE(eddyb) this addition needs to be performed on the final
288 // type, in case the niche itself can't represent all variant
289 // indices (e.g. `u8` niche with more than `256` variants,
290 // but enough uninhabited variants so that the remaining variants
291 // fit in the niche).
292 // In other words, `niche_variants.end - niche_variants.start`
293 // is representable in the niche, but `niche_variants.end`
294 // might not be, in extreme cases.
296 let relative_discr = if relative_max == 0 {
297 // HACK(eddyb) since we have only one niche, we know which
298 // one it is, and we can avoid having a dynamic value here.
299 bx.cx().const_uint(cast_to, 0)
301 bx.intcast(relative_discr, cast_to, false)
305 bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
312 bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
318 /// Sets the discriminant for a new value of the given case of the given
320 pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
323 variant_index: VariantIdx
325 if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
328 match self.layout.variants {
329 layout::Variants::Single { index } => {
330 assert_eq!(index, variant_index);
332 layout::Variants::Multiple {
333 discr_kind: layout::DiscriminantKind::Tag,
337 let ptr = self.project_field(bx, discr_index);
339 self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
341 bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
345 layout::Variants::Multiple {
346 discr_kind: layout::DiscriminantKind::Niche {
354 if variant_index != dataful_variant {
355 if bx.cx().sess().target.target.arch == "arm" ||
356 bx.cx().sess().target.target.arch == "aarch64" {
357 // FIXME(#34427): as workaround for LLVM bug on ARM,
358 // use memset of 0 before assigning niche value.
359 let fill_byte = bx.cx().const_u8(0);
360 let size = bx.cx().const_usize(self.layout.size.bytes());
361 bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
364 let niche = self.project_field(bx, discr_index);
365 let niche_llty = bx.cx().immediate_backend_type(niche.layout);
366 let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
367 let niche_value = (niche_value as u128)
368 .wrapping_add(niche_start);
369 // FIXME(eddyb): check the actual primitive type here.
370 let niche_llval = if niche_value == 0 {
371 // HACK(eddyb): using `c_null` as it works on all types.
372 bx.cx().const_null(niche_llty)
374 bx.cx().const_uint_big(niche_llty, niche_value)
376 OperandValue::Immediate(niche_llval).store(bx, niche);
382 pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
387 // Statically compute the offset if we can, otherwise just use the element size,
388 // as this will yield the lowest alignment.
389 let layout = self.layout.field(bx, 0);
390 let offset = if bx.is_const_integral(llindex) {
391 layout.size.checked_mul(bx.const_to_uint(llindex), bx).unwrap_or(layout.size)
397 llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
400 align: self.align.restrict_for_offset(offset),
404 pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
407 variant_index: VariantIdx
409 let mut downcast = *self;
410 downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
412 // Cast to the appropriate variant struct type.
413 let variant_ty = bx.cx().backend_type(downcast.layout);
414 downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
419 pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
420 bx.lifetime_start(self.llval, self.layout.size);
423 pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
424 bx.lifetime_end(self.llval, self.layout.size);
428 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
429 pub fn codegen_place(
432 place_ref: &mir::PlaceRef<'_, 'tcx>
433 ) -> PlaceRef<'tcx, Bx::Value> {
434 debug!("codegen_place(place_ref={:?})", place_ref);
436 let tcx = self.cx.tcx();
438 let result = match &place_ref {
440 base: mir::PlaceBase::Local(index),
443 match self.locals[*index] {
444 LocalRef::Place(place) => {
447 LocalRef::UnsizedPlace(place) => {
448 return bx.load_operand(place).deref(cx);
450 LocalRef::Operand(..) => {
451 bug!("using operand local {:?} as place", place_ref);
456 base: mir::PlaceBase::Static(box mir::Static {
458 kind: mir::StaticKind::Promoted(promoted, substs),
463 debug!("promoted={:?}, def_id={:?}, substs={:?}, self_substs={:?}", promoted, def_id, substs, self.instance.substs);
464 let param_env = ty::ParamEnv::reveal_all();
465 let instance = Instance::new(*def_id, substs.subst(bx.tcx(), self.instance.substs));
466 debug!("instance: {:?}", instance);
467 let cid = mir::interpret::GlobalId {
469 promoted: Some(*promoted),
471 let mono_ty = tcx.subst_and_normalize_erasing_regions(
476 let layout = cx.layout_of(mono_ty);
477 match bx.tcx().const_eval(param_env.and(cid)) {
478 Ok(val) => match val.val {
479 mir::interpret::ConstValue::ByRef { alloc, offset } => {
480 bx.cx().from_const_alloc(layout, alloc, offset)
482 _ => bug!("promoteds should have an allocation: {:?}", val),
485 // This is unreachable as long as runtime
486 // and compile-time agree on values
487 // With floats that won't always be true,
488 // so we generate an abort.
490 let llval = bx.cx().const_undef(
491 bx.cx().type_ptr_to(bx.cx().backend_type(layout))
493 PlaceRef::new_sized(llval, layout, layout.align.abi)
498 base: mir::PlaceBase::Static(box mir::Static {
500 kind: mir::StaticKind::Static,
505 // NB: The layout of a static may be unsized as is the case when working
506 // with a static that is an extern_type.
507 let layout = cx.layout_of(self.monomorphize(&ty));
508 let static_ = bx.get_static(*def_id);
509 PlaceRef::new_thin_place(bx, static_, layout, layout.align.abi)
513 projection: Some(box mir::Projection {
515 elem: mir::ProjectionElem::Deref,
518 // Load the pointer from its location.
519 self.codegen_consume(bx, &mir::PlaceRef {
521 projection: proj_base,
526 projection: Some(projection),
528 // FIXME turn this recursion into iteration
529 let cg_base = self.codegen_place(bx, &mir::PlaceRef {
531 projection: &projection.base,
534 match projection.elem {
535 mir::ProjectionElem::Deref => bug!(),
536 mir::ProjectionElem::Field(ref field, _) => {
537 cg_base.project_field(bx, field.index())
539 mir::ProjectionElem::Index(index) => {
540 let index = &mir::Operand::Copy(
541 mir::Place::from(index)
543 let index = self.codegen_operand(bx, index);
544 let llindex = index.immediate();
545 cg_base.project_index(bx, llindex)
547 mir::ProjectionElem::ConstantIndex { offset,
550 let lloffset = bx.cx().const_usize(offset as u64);
551 cg_base.project_index(bx, lloffset)
553 mir::ProjectionElem::ConstantIndex { offset,
556 let lloffset = bx.cx().const_usize(offset as u64);
557 let lllen = cg_base.len(bx.cx());
558 let llindex = bx.sub(lllen, lloffset);
559 cg_base.project_index(bx, llindex)
561 mir::ProjectionElem::Subslice { from, to } => {
562 let mut subslice = cg_base.project_index(bx,
563 bx.cx().const_usize(from as u64));
564 let projected_ty = PlaceTy::from_ty(cg_base.layout.ty)
565 .projection_ty(tcx, &projection.elem).ty;
566 subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
568 if subslice.layout.is_unsized() {
569 subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
570 bx.cx().const_usize((from as u64) + (to as u64))));
573 // Cast the place pointer type to the new
574 // array or slice type (`*[%_; new_len]`).
575 subslice.llval = bx.pointercast(subslice.llval,
576 bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
580 mir::ProjectionElem::Downcast(_, v) => {
581 cg_base.project_downcast(bx, v)
586 debug!("codegen_place(place={:?}) => {:?}", place_ref, result);
590 pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
591 let tcx = self.cx.tcx();
592 let place_ty = mir::Place::ty_from(place_ref.base, place_ref.projection, self.mir, tcx);
593 self.monomorphize(&place_ty.ty)