1 use super::{FunctionCx, LocalRef};
2 use super::operand::OperandValue;
5 use crate::common::IntPredicate;
9 use rustc::ty::{self, Instance, Ty};
10 use rustc::ty::layout::{self, Align, TyLayout, LayoutOf, VariantIdx, HasTyCtxt};
12 use rustc::mir::tcx::PlaceTy;
14 #[derive(Copy, Clone, Debug)]
15 pub struct PlaceRef<'tcx, V> {
16 /// A pointer to the contents of the place.
19 /// This place's extra data if it is unsized, or `None` if null.
20 pub llextra: Option<V>,
22 /// The monomorphized type of this place, including variant information.
23 pub layout: TyLayout<'tcx>,
25 /// The alignment we know for this place.
29 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
32 layout: TyLayout<'tcx>,
33 ) -> PlaceRef<'tcx, V> {
34 assert!(!layout.is_unsized());
39 align: layout.align.abi
43 pub fn new_sized_aligned(
45 layout: TyLayout<'tcx>,
47 ) -> PlaceRef<'tcx, V> {
48 assert!(!layout.is_unsized());
57 fn new_thin_place<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
60 layout: TyLayout<'tcx>,
61 ) -> PlaceRef<'tcx, V> {
62 assert!(!bx.cx().type_has_metadata(layout.ty));
67 align: layout.align.abi
71 // FIXME(eddyb) pass something else for the name so no work is done
72 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
73 pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
75 layout: TyLayout<'tcx>,
77 assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
78 let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
79 Self::new_sized(tmp, layout)
82 /// Returns a place for an indirect reference to an unsized place.
83 // FIXME(eddyb) pass something else for the name so no work is done
84 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
85 pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
87 layout: TyLayout<'tcx>,
89 assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
90 let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
91 let ptr_layout = bx.cx().layout_of(ptr_ty);
92 Self::alloca(bx, ptr_layout)
95 pub fn len<Cx: ConstMethods<'tcx, Value = V>>(
99 if let layout::FieldPlacement::Array { count, .. } = self.layout.fields {
100 if self.layout.is_unsized() {
101 assert_eq!(count, 0);
102 self.llextra.unwrap()
104 cx.const_usize(count)
107 bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
112 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
113 /// Access a field, at a point when the value's case is known.
114 pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
118 let field = self.layout.field(bx.cx(), ix);
119 let offset = self.layout.fields.offset(ix);
120 let effective_field_align = self.align.restrict_for_offset(offset);
122 let mut simple = || {
123 // Unions and newtypes only use an offset of 0.
124 let llval = if offset.bytes() == 0 {
126 } else if let layout::Abi::ScalarPair(ref a, ref b) = self.layout.abi {
127 // Offsets have to match either first or second field.
128 assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
129 bx.struct_gep(self.llval, 1)
131 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
134 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
135 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
136 llextra: if bx.cx().type_has_metadata(field.ty) {
142 align: effective_field_align,
146 // Simple cases, which don't need DST adjustment:
147 // * no metadata available - just log the case
148 // * known alignment - sized types, `[T]`, `str` or a foreign type
149 // * packed struct - there is no alignment padding
150 match field.ty.kind {
151 _ if self.llextra.is_none() => {
152 debug!("unsized field `{}`, of `{:?}` has no metadata for adjustment",
156 _ if !field.is_unsized() => return simple(),
157 ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
159 if def.repr.packed() {
160 // FIXME(eddyb) generalize the adjustment when we
161 // start supporting packing to larger alignments.
162 assert_eq!(self.layout.align.abi.bytes(), 1);
169 // We need to get the pointer manually now.
170 // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
171 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
172 // because the field may have an arbitrary alignment in the LLVM representation
177 // struct Foo<T: ?Sized> {
182 // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
183 // the `y` field has 16-bit alignment.
185 let meta = self.llextra;
187 let unaligned_offset = bx.cx().const_usize(offset.bytes());
189 // Get the alignment of the field
190 let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
192 // Bump the unaligned offset up to the appropriate alignment using the
193 // following expression:
195 // (unaligned offset + (align - 1)) & -align
198 let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
199 let and_lhs = bx.add(unaligned_offset, align_sub_1);
200 let and_rhs = bx.neg(unsized_align);
201 let offset = bx.and(and_lhs, and_rhs);
203 debug!("struct_field_ptr: DST field offset: {:?}", offset);
205 // Cast and adjust pointer.
206 let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
207 let byte_ptr = bx.gep(byte_ptr, &[offset]);
209 // Finally, cast back to the type expected.
210 let ll_fty = bx.cx().backend_type(field);
211 debug!("struct_field_ptr: Field type is {:?}", ll_fty);
214 llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
215 llextra: self.llextra,
217 align: effective_field_align,
221 /// Obtain the actual discriminant of a value.
222 pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
227 let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
228 if self.layout.abi.is_uninhabited() {
229 return bx.cx().const_undef(cast_to);
231 let (discr_scalar, discr_kind, discr_index) = match self.layout.variants {
232 layout::Variants::Single { index } => {
233 let discr_val = self.layout.ty.discriminant_for_variant(bx.cx().tcx(), index)
234 .map_or(index.as_u32() as u128, |discr| discr.val);
235 return bx.cx().const_uint_big(cast_to, discr_val);
237 layout::Variants::Multiple { ref discr, ref discr_kind, discr_index, .. } => {
238 (discr, discr_kind, discr_index)
242 // Read the tag/niche-encoded discriminant from memory.
243 let encoded_discr = self.project_field(bx, discr_index);
244 let encoded_discr = bx.load_operand(encoded_discr);
246 // Decode the discriminant (specifically if it's niche-encoded).
248 layout::DiscriminantKind::Tag => {
249 let signed = match discr_scalar.value {
250 // We use `i1` for bytes that are always `0` or `1`,
251 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
252 // let LLVM interpret the `i1` as signed, because
253 // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
254 layout::Int(_, signed) => !discr_scalar.is_bool() && signed,
257 bx.intcast(encoded_discr.immediate(), cast_to, signed)
259 layout::DiscriminantKind::Niche {
264 // Rebase from niche values to discriminants, and check
265 // whether the result is in range for the niche variants.
266 let niche_llty = bx.cx().immediate_backend_type(encoded_discr.layout);
267 let encoded_discr = encoded_discr.immediate();
269 // We first compute the "relative discriminant" (wrt `niche_variants`),
270 // that is, if `n = niche_variants.end() - niche_variants.start()`,
271 // we remap `niche_start..=niche_start + n` (which may wrap around)
272 // to (non-wrap-around) `0..=n`, to be able to check whether the
273 // discriminant corresponds to a niche variant with one comparison.
274 // We also can't go directly to the (variant index) discriminant
275 // and check that it is in the range `niche_variants`, because
276 // that might not fit in the same type, on top of needing an extra
277 // comparison (see also the comment on `let niche_discr`).
278 let relative_discr = if niche_start == 0 {
279 // Avoid subtracting `0`, which wouldn't work for pointers.
280 // FIXME(eddyb) check the actual primitive type here.
283 bx.sub(encoded_discr, bx.cx().const_uint_big(niche_llty, niche_start))
285 let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
287 let relative_max = if relative_max == 0 {
288 // Avoid calling `const_uint`, which wouldn't work for pointers.
289 // FIXME(eddyb) check the actual primitive type here.
290 bx.cx().const_null(niche_llty)
292 bx.cx().const_uint(niche_llty, relative_max as u64)
294 bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
297 // NOTE(eddyb) this addition needs to be performed on the final
298 // type, in case the niche itself can't represent all variant
299 // indices (e.g. `u8` niche with more than `256` variants,
300 // but enough uninhabited variants so that the remaining variants
301 // fit in the niche).
302 // In other words, `niche_variants.end - niche_variants.start`
303 // is representable in the niche, but `niche_variants.end`
304 // might not be, in extreme cases.
306 let relative_discr = if relative_max == 0 {
307 // HACK(eddyb) since we have only one niche, we know which
308 // one it is, and we can avoid having a dynamic value here.
309 bx.cx().const_uint(cast_to, 0)
311 bx.intcast(relative_discr, cast_to, false)
315 bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
322 bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
328 /// Sets the discriminant for a new value of the given case of the given
330 pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
333 variant_index: VariantIdx
335 if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
336 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
337 // if that turns out to be helpful.
341 match self.layout.variants {
342 layout::Variants::Single { index } => {
343 assert_eq!(index, variant_index);
345 layout::Variants::Multiple {
346 discr_kind: layout::DiscriminantKind::Tag,
350 let ptr = self.project_field(bx, discr_index);
352 self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
354 bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
358 layout::Variants::Multiple {
359 discr_kind: layout::DiscriminantKind::Niche {
367 if variant_index != dataful_variant {
368 if bx.cx().sess().target.target.arch == "arm" ||
369 bx.cx().sess().target.target.arch == "aarch64" {
370 // FIXME(#34427): as workaround for LLVM bug on ARM,
371 // use memset of 0 before assigning niche value.
372 let fill_byte = bx.cx().const_u8(0);
373 let size = bx.cx().const_usize(self.layout.size.bytes());
374 bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
377 let niche = self.project_field(bx, discr_index);
378 let niche_llty = bx.cx().immediate_backend_type(niche.layout);
379 let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
380 let niche_value = (niche_value as u128)
381 .wrapping_add(niche_start);
382 // FIXME(eddyb): check the actual primitive type here.
383 let niche_llval = if niche_value == 0 {
384 // HACK(eddyb): using `c_null` as it works on all types.
385 bx.cx().const_null(niche_llty)
387 bx.cx().const_uint_big(niche_llty, niche_value)
389 OperandValue::Immediate(niche_llval).store(bx, niche);
395 pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
400 // Statically compute the offset if we can, otherwise just use the element size,
401 // as this will yield the lowest alignment.
402 let layout = self.layout.field(bx, 0);
403 let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
404 layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
410 llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
413 align: self.align.restrict_for_offset(offset),
417 pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
420 variant_index: VariantIdx
422 let mut downcast = *self;
423 downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
425 // Cast to the appropriate variant struct type.
426 let variant_ty = bx.cx().backend_type(downcast.layout);
427 downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
432 pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
433 bx.lifetime_start(self.llval, self.layout.size);
436 pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
437 bx.lifetime_end(self.llval, self.layout.size);
441 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
442 pub fn codegen_place(
445 place_ref: &mir::PlaceRef<'_, 'tcx>
446 ) -> PlaceRef<'tcx, Bx::Value> {
447 debug!("codegen_place(place_ref={:?})", place_ref);
449 let tcx = self.cx.tcx();
451 let result = match &place_ref {
453 base: mir::PlaceBase::Local(index),
456 match self.locals[*index] {
457 LocalRef::Place(place) => {
460 LocalRef::UnsizedPlace(place) => {
461 return bx.load_operand(place).deref(cx);
463 LocalRef::Operand(..) => {
464 bug!("using operand local {:?} as place", place_ref);
469 base: mir::PlaceBase::Static(box mir::Static {
471 kind: mir::StaticKind::Promoted(promoted, substs),
476 let param_env = ty::ParamEnv::reveal_all();
477 let instance = Instance::new(*def_id, self.monomorphize(substs));
478 let cid = mir::interpret::GlobalId {
480 promoted: Some(*promoted),
482 let layout = cx.layout_of(self.monomorphize(&ty));
483 match bx.tcx().const_eval(param_env.and(cid)) {
484 Ok(val) => match val.val {
485 ty::ConstKind::Value(mir::interpret::ConstValue::ByRef {
488 bx.cx().from_const_alloc(layout, alloc, offset)
490 _ => bug!("promoteds should have an allocation: {:?}", val),
493 // This is unreachable as long as runtime
494 // and compile-time agree perfectly.
495 // With floats that won't always be true,
496 // so we generate a (safe) abort.
498 // We still have to return a place but it doesn't matter,
499 // this code is unreachable.
500 let llval = bx.cx().const_undef(
501 bx.cx().type_ptr_to(bx.cx().backend_type(layout))
503 PlaceRef::new_sized(llval, layout)
508 base: mir::PlaceBase::Static(box mir::Static {
510 kind: mir::StaticKind::Static,
515 // NB: The layout of a static may be unsized as is the case when working
516 // with a static that is an extern_type.
517 let layout = cx.layout_of(self.monomorphize(&ty));
518 let static_ = bx.get_static(*def_id);
519 PlaceRef::new_thin_place(bx, static_, layout)
523 projection: [proj_base @ .., mir::ProjectionElem::Deref],
525 // Load the pointer from its location.
526 self.codegen_consume(bx, &mir::PlaceRef {
528 projection: proj_base,
533 projection: [proj_base @ .., elem],
535 // FIXME turn this recursion into iteration
536 let cg_base = self.codegen_place(bx, &mir::PlaceRef {
538 projection: proj_base,
542 mir::ProjectionElem::Deref => bug!(),
543 mir::ProjectionElem::Field(ref field, _) => {
544 cg_base.project_field(bx, field.index())
546 mir::ProjectionElem::Index(index) => {
547 let index = &mir::Operand::Copy(
548 mir::Place::from(*index)
550 let index = self.codegen_operand(bx, index);
551 let llindex = index.immediate();
552 cg_base.project_index(bx, llindex)
554 mir::ProjectionElem::ConstantIndex { offset,
557 let lloffset = bx.cx().const_usize(*offset as u64);
558 cg_base.project_index(bx, lloffset)
560 mir::ProjectionElem::ConstantIndex { offset,
563 let lloffset = bx.cx().const_usize(*offset as u64);
564 let lllen = cg_base.len(bx.cx());
565 let llindex = bx.sub(lllen, lloffset);
566 cg_base.project_index(bx, llindex)
568 mir::ProjectionElem::Subslice { from, to } => {
569 let mut subslice = cg_base.project_index(bx,
570 bx.cx().const_usize(*from as u64));
571 let projected_ty = PlaceTy::from_ty(cg_base.layout.ty)
572 .projection_ty(tcx, elem).ty;
573 subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
575 if subslice.layout.is_unsized() {
576 subslice.llextra = Some(bx.sub(cg_base.llextra.unwrap(),
577 bx.cx().const_usize((*from as u64) + (*to as u64))));
580 // Cast the place pointer type to the new
581 // array or slice type (`*[%_; new_len]`).
582 subslice.llval = bx.pointercast(subslice.llval,
583 bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)));
587 mir::ProjectionElem::Downcast(_, v) => {
588 cg_base.project_downcast(bx, *v)
593 debug!("codegen_place(place={:?}) => {:?}", place_ref, result);
597 pub fn monomorphized_place_ty(&self, place_ref: &mir::PlaceRef<'_, 'tcx>) -> Ty<'tcx> {
598 let tcx = self.cx.tcx();
599 let place_ty = mir::Place::ty_from(
601 place_ref.projection,
605 self.monomorphize(&place_ty.ty)