1 use super::operand::OperandValue;
2 use super::{FunctionCx, LocalRef};
4 use crate::common::IntPredicate;
10 use rustc_middle::mir::tcx::PlaceTy;
11 use rustc_middle::ty::layout::{HasTyCtxt, TyAndLayout};
12 use rustc_middle::ty::{self, Ty};
13 use rustc_target::abi::{Abi, Align, DiscriminantKind, FieldsShape, Int};
14 use rustc_target::abi::{LayoutOf, VariantIdx, Variants};
16 #[derive(Copy, Clone, Debug)]
17 pub struct PlaceRef<'tcx, V> {
18 /// A pointer to the contents of the place.
21 /// This place's extra data if it is unsized, or `None` if null.
22 pub llextra: Option<V>,
24 /// The monomorphized type of this place, including variant information.
25 pub layout: TyAndLayout<'tcx>,
27 /// The alignment we know for this place.
31 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
32 pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
33 assert!(!layout.is_unsized());
34 PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
37 pub fn new_sized_aligned(
39 layout: TyAndLayout<'tcx>,
41 ) -> PlaceRef<'tcx, V> {
42 assert!(!layout.is_unsized());
43 PlaceRef { llval, llextra: None, layout, align }
46 // FIXME(eddyb) pass something else for the name so no work is done
47 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
48 pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
50 layout: TyAndLayout<'tcx>,
52 assert!(!layout.is_unsized(), "tried to statically allocate unsized place");
53 let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
54 Self::new_sized(tmp, layout)
57 /// Returns a place for an indirect reference to an unsized place.
58 // FIXME(eddyb) pass something else for the name so no work is done
59 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
60 pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
62 layout: TyAndLayout<'tcx>,
64 assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
65 let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
66 let ptr_layout = bx.cx().layout_of(ptr_ty);
67 Self::alloca(bx, ptr_layout)
70 pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
71 if let FieldsShape::Array { count, .. } = self.layout.fields {
72 if self.layout.is_unsized() {
79 bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
84 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
85 /// Access a field, at a point when the value's case is known.
86 pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
91 let field = self.layout.field(bx.cx(), ix);
92 let offset = self.layout.fields.offset(ix);
93 let effective_field_align = self.align.restrict_for_offset(offset);
96 // Unions and newtypes only use an offset of 0.
97 let llval = if offset.bytes() == 0 {
99 } else if let Abi::ScalarPair(ref a, ref b) = self.layout.abi {
100 // Offsets have to match either first or second field.
101 assert_eq!(offset, a.value.size(bx.cx()).align_to(b.value.align(bx.cx()).abi));
102 bx.struct_gep(self.llval, 1)
104 bx.struct_gep(self.llval, bx.cx().backend_field_index(self.layout, ix))
107 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
108 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
109 llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
111 align: effective_field_align,
115 // Simple cases, which don't need DST adjustment:
116 // * no metadata available - just log the case
117 // * known alignment - sized types, `[T]`, `str` or a foreign type
118 // * packed struct - there is no alignment padding
119 match field.ty.kind {
120 _ if self.llextra.is_none() => {
122 "unsized field `{}`, of `{:?}` has no metadata for adjustment",
127 _ if !field.is_unsized() => return simple(),
128 ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
130 if def.repr.packed() {
131 // FIXME(eddyb) generalize the adjustment when we
132 // start supporting packing to larger alignments.
133 assert_eq!(self.layout.align.abi.bytes(), 1);
140 // We need to get the pointer manually now.
141 // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
142 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
143 // because the field may have an arbitrary alignment in the LLVM representation
148 // struct Foo<T: ?Sized> {
153 // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
154 // the `y` field has 16-bit alignment.
156 let meta = self.llextra;
158 let unaligned_offset = bx.cx().const_usize(offset.bytes());
160 // Get the alignment of the field
161 let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
163 // Bump the unaligned offset up to the appropriate alignment using the
164 // following expression:
166 // (unaligned offset + (align - 1)) & -align
169 let align_sub_1 = bx.sub(unsized_align, bx.cx().const_usize(1u64));
170 let and_lhs = bx.add(unaligned_offset, align_sub_1);
171 let and_rhs = bx.neg(unsized_align);
172 let offset = bx.and(and_lhs, and_rhs);
174 debug!("struct_field_ptr: DST field offset: {:?}", offset);
176 // Cast and adjust pointer.
177 let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
178 let byte_ptr = bx.gep(byte_ptr, &[offset]);
180 // Finally, cast back to the type expected.
181 let ll_fty = bx.cx().backend_type(field);
182 debug!("struct_field_ptr: Field type is {:?}", ll_fty);
185 llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
186 llextra: self.llextra,
188 align: effective_field_align,
192 /// Obtain the actual discriminant of a value.
193 pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
198 let cast_to = bx.cx().immediate_backend_type(bx.cx().layout_of(cast_to));
199 if self.layout.abi.is_uninhabited() {
200 return bx.cx().const_undef(cast_to);
202 let (discr_scalar, discr_kind, discr_index) = match self.layout.variants {
203 Variants::Single { index } => {
207 .discriminant_for_variant(bx.cx().tcx(), index)
208 .map_or(index.as_u32() as u128, |discr| discr.val);
209 return bx.cx().const_uint_big(cast_to, discr_val);
211 Variants::Multiple { ref discr, ref discr_kind, discr_index, .. } => {
212 (discr, discr_kind, discr_index)
216 // Read the tag/niche-encoded discriminant from memory.
217 let encoded_discr = self.project_field(bx, discr_index);
218 let encoded_discr = bx.load_operand(encoded_discr);
220 // Decode the discriminant (specifically if it's niche-encoded).
222 DiscriminantKind::Tag => {
223 let signed = match discr_scalar.value {
224 // We use `i1` for bytes that are always `0` or `1`,
225 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
226 // let LLVM interpret the `i1` as signed, because
227 // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
228 Int(_, signed) => !discr_scalar.is_bool() && signed,
231 bx.intcast(encoded_discr.immediate(), cast_to, signed)
233 DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start } => {
234 // Rebase from niche values to discriminants, and check
235 // whether the result is in range for the niche variants.
236 let niche_llty = bx.cx().immediate_backend_type(encoded_discr.layout);
237 let encoded_discr = encoded_discr.immediate();
239 // We first compute the "relative discriminant" (wrt `niche_variants`),
240 // that is, if `n = niche_variants.end() - niche_variants.start()`,
241 // we remap `niche_start..=niche_start + n` (which may wrap around)
242 // to (non-wrap-around) `0..=n`, to be able to check whether the
243 // discriminant corresponds to a niche variant with one comparison.
244 // We also can't go directly to the (variant index) discriminant
245 // and check that it is in the range `niche_variants`, because
246 // that might not fit in the same type, on top of needing an extra
247 // comparison (see also the comment on `let niche_discr`).
248 let relative_discr = if niche_start == 0 {
249 // Avoid subtracting `0`, which wouldn't work for pointers.
250 // FIXME(eddyb) check the actual primitive type here.
253 bx.sub(encoded_discr, bx.cx().const_uint_big(niche_llty, niche_start))
255 let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
257 let relative_max = if relative_max == 0 {
258 // Avoid calling `const_uint`, which wouldn't work for pointers.
259 // FIXME(eddyb) check the actual primitive type here.
260 bx.cx().const_null(niche_llty)
262 bx.cx().const_uint(niche_llty, relative_max as u64)
264 bx.icmp(IntPredicate::IntULE, relative_discr, relative_max)
267 // NOTE(eddyb) this addition needs to be performed on the final
268 // type, in case the niche itself can't represent all variant
269 // indices (e.g. `u8` niche with more than `256` variants,
270 // but enough uninhabited variants so that the remaining variants
271 // fit in the niche).
272 // In other words, `niche_variants.end - niche_variants.start`
273 // is representable in the niche, but `niche_variants.end`
274 // might not be, in extreme cases.
276 let relative_discr = if relative_max == 0 {
277 // HACK(eddyb) since we have only one niche, we know which
278 // one it is, and we can avoid having a dynamic value here.
279 bx.cx().const_uint(cast_to, 0)
281 bx.intcast(relative_discr, cast_to, false)
285 bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64),
292 bx.cx().const_uint(cast_to, dataful_variant.as_u32() as u64),
298 /// Sets the discriminant for a new value of the given case of the given
300 pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
303 variant_index: VariantIdx,
305 if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
306 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
307 // if that turns out to be helpful.
311 match self.layout.variants {
312 Variants::Single { index } => {
313 assert_eq!(index, variant_index);
315 Variants::Multiple { discr_kind: DiscriminantKind::Tag, discr_index, .. } => {
316 let ptr = self.project_field(bx, discr_index);
318 self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
320 bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
327 DiscriminantKind::Niche { dataful_variant, ref niche_variants, niche_start },
331 if variant_index != dataful_variant {
332 if bx.cx().sess().target.target.arch == "arm"
333 || bx.cx().sess().target.target.arch == "aarch64"
335 // FIXME(#34427): as workaround for LLVM bug on ARM,
336 // use memset of 0 before assigning niche value.
337 let fill_byte = bx.cx().const_u8(0);
338 let size = bx.cx().const_usize(self.layout.size.bytes());
339 bx.memset(self.llval, fill_byte, size, self.align, MemFlags::empty());
342 let niche = self.project_field(bx, discr_index);
343 let niche_llty = bx.cx().immediate_backend_type(niche.layout);
344 let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
345 let niche_value = (niche_value as u128).wrapping_add(niche_start);
346 // FIXME(eddyb): check the actual primitive type here.
347 let niche_llval = if niche_value == 0 {
348 // HACK(eddyb): using `c_null` as it works on all types.
349 bx.cx().const_null(niche_llty)
351 bx.cx().const_uint_big(niche_llty, niche_value)
353 OperandValue::Immediate(niche_llval).store(bx, niche);
359 pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
364 // Statically compute the offset if we can, otherwise just use the element size,
365 // as this will yield the lowest alignment.
366 let layout = self.layout.field(bx, 0);
367 let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
368 layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
374 llval: bx.inbounds_gep(self.llval, &[bx.cx().const_usize(0), llindex]),
377 align: self.align.restrict_for_offset(offset),
381 pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
384 variant_index: VariantIdx,
386 let mut downcast = *self;
387 downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
389 // Cast to the appropriate variant struct type.
390 let variant_ty = bx.cx().backend_type(downcast.layout);
391 downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
396 pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
397 bx.lifetime_start(self.llval, self.layout.size);
400 pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
401 bx.lifetime_end(self.llval, self.layout.size);
405 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
406 pub fn codegen_place(
409 place_ref: mir::PlaceRef<'tcx>,
410 ) -> PlaceRef<'tcx, Bx::Value> {
411 debug!("codegen_place(place_ref={:?})", place_ref);
413 let tcx = self.cx.tcx();
415 let result = match place_ref {
416 mir::PlaceRef { local, projection: [] } => match self.locals[local] {
417 LocalRef::Place(place) => {
420 LocalRef::UnsizedPlace(place) => {
421 return bx.load_operand(place).deref(cx);
423 LocalRef::Operand(..) => {
424 bug!("using operand local {:?} as place", place_ref);
427 mir::PlaceRef { local, projection: [proj_base @ .., mir::ProjectionElem::Deref] } => {
428 // Load the pointer from its location.
429 self.codegen_consume(bx, mir::PlaceRef { local, projection: proj_base })
432 mir::PlaceRef { local, projection: [proj_base @ .., elem] } => {
433 // FIXME turn this recursion into iteration
435 self.codegen_place(bx, mir::PlaceRef { local, projection: proj_base });
438 mir::ProjectionElem::Deref => bug!(),
439 mir::ProjectionElem::Field(ref field, _) => {
440 cg_base.project_field(bx, field.index())
442 mir::ProjectionElem::Index(index) => {
443 let index = &mir::Operand::Copy(mir::Place::from(*index));
444 let index = self.codegen_operand(bx, index);
445 let llindex = index.immediate();
446 cg_base.project_index(bx, llindex)
448 mir::ProjectionElem::ConstantIndex {
453 let lloffset = bx.cx().const_usize(*offset as u64);
454 cg_base.project_index(bx, lloffset)
456 mir::ProjectionElem::ConstantIndex {
461 let lloffset = bx.cx().const_usize(*offset as u64);
462 let lllen = cg_base.len(bx.cx());
463 let llindex = bx.sub(lllen, lloffset);
464 cg_base.project_index(bx, llindex)
466 mir::ProjectionElem::Subslice { from, to, from_end } => {
468 cg_base.project_index(bx, bx.cx().const_usize(*from as u64));
470 PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, elem).ty;
471 subslice.layout = bx.cx().layout_of(self.monomorphize(&projected_ty));
473 if subslice.layout.is_unsized() {
474 assert!(from_end, "slice subslices should be `from_end`");
475 subslice.llextra = Some(bx.sub(
476 cg_base.llextra.unwrap(),
477 bx.cx().const_usize((*from as u64) + (*to as u64)),
481 // Cast the place pointer type to the new
482 // array or slice type (`*[%_; new_len]`).
483 subslice.llval = bx.pointercast(
485 bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
490 mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, *v),
494 debug!("codegen_place(place={:?}) => {:?}", place_ref, result);
498 pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
499 let tcx = self.cx.tcx();
500 let place_ty = mir::Place::ty_from(place_ref.local, place_ref.projection, *self.mir, tcx);
501 self.monomorphize(&place_ty.ty)