1 use super::operand::OperandValue;
2 use super::{FunctionCx, LocalRef};
4 use crate::common::IntPredicate;
9 use rustc_middle::mir::tcx::PlaceTy;
10 use rustc_middle::ty::layout::{HasTyCtxt, LayoutOf, TyAndLayout};
11 use rustc_middle::ty::{self, Ty};
12 use rustc_target::abi::{Abi, Align, FieldsShape, Int, Pointer, TagEncoding};
13 use rustc_target::abi::{VariantIdx, Variants};
15 #[derive(Copy, Clone, Debug)]
16 pub struct PlaceRef<'tcx, V> {
17 /// A pointer to the contents of the place.
20 /// This place's extra data if it is unsized, or `None` if null.
21 pub llextra: Option<V>,
23 /// The monomorphized type of this place, including variant information.
24 pub layout: TyAndLayout<'tcx>,
26 /// The alignment we know for this place.
30 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
31 pub fn new_sized(llval: V, layout: TyAndLayout<'tcx>) -> PlaceRef<'tcx, V> {
32 assert!(layout.is_sized());
33 PlaceRef { llval, llextra: None, layout, align: layout.align.abi }
36 pub fn new_sized_aligned(
38 layout: TyAndLayout<'tcx>,
40 ) -> PlaceRef<'tcx, V> {
41 assert!(layout.is_sized());
42 PlaceRef { llval, llextra: None, layout, align }
45 // FIXME(eddyb) pass something else for the name so no work is done
46 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
47 pub fn alloca<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
49 layout: TyAndLayout<'tcx>,
51 assert!(layout.is_sized(), "tried to statically allocate unsized place");
52 let tmp = bx.alloca(bx.cx().backend_type(layout), layout.align.abi);
53 Self::new_sized(tmp, layout)
56 /// Returns a place for an indirect reference to an unsized place.
57 // FIXME(eddyb) pass something else for the name so no work is done
58 // unless LLVM IR names are turned on (e.g. for `--emit=llvm-ir`).
59 pub fn alloca_unsized_indirect<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
61 layout: TyAndLayout<'tcx>,
63 assert!(layout.is_unsized(), "tried to allocate indirect place for sized values");
64 let ptr_ty = bx.cx().tcx().mk_mut_ptr(layout.ty);
65 let ptr_layout = bx.cx().layout_of(ptr_ty);
66 Self::alloca(bx, ptr_layout)
69 pub fn len<Cx: ConstMethods<'tcx, Value = V>>(&self, cx: &Cx) -> V {
70 if let FieldsShape::Array { count, .. } = self.layout.fields {
71 if self.layout.is_unsized() {
78 bug!("unexpected layout `{:#?}` in PlaceRef::len", self.layout)
83 impl<'a, 'tcx, V: CodegenObject> PlaceRef<'tcx, V> {
84 /// Access a field, at a point when the value's case is known.
85 pub fn project_field<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
90 let field = self.layout.field(bx.cx(), ix);
91 let offset = self.layout.fields.offset(ix);
92 let effective_field_align = self.align.restrict_for_offset(offset);
95 let llval = match self.layout.abi {
96 _ if offset.bytes() == 0 => {
97 // Unions and newtypes only use an offset of 0.
98 // Also handles the first field of Scalar, ScalarPair, and Vector layouts.
101 Abi::ScalarPair(a, b)
102 if offset == a.size(bx.cx()).align_to(b.align(bx.cx()).abi) =>
104 // Offset matches second field.
105 let ty = bx.backend_type(self.layout);
106 bx.struct_gep(ty, self.llval, 1)
108 Abi::Scalar(_) | Abi::ScalarPair(..) | Abi::Vector { .. } if field.is_zst() => {
109 // ZST fields are not included in Scalar, ScalarPair, and Vector layouts, so manually offset the pointer.
110 let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
111 bx.gep(bx.cx().type_i8(), byte_ptr, &[bx.const_usize(offset.bytes())])
113 Abi::Scalar(_) | Abi::ScalarPair(..) => {
114 // All fields of Scalar and ScalarPair layouts must have been handled by this point.
115 // Vector layouts have additional fields for each element of the vector, so don't panic in that case.
117 "offset of non-ZST field `{:?}` does not match layout `{:#?}`",
123 let ty = bx.backend_type(self.layout);
124 bx.struct_gep(ty, self.llval, bx.cx().backend_field_index(self.layout, ix))
128 // HACK(eddyb): have to bitcast pointers until LLVM removes pointee types.
129 llval: bx.pointercast(llval, bx.cx().type_ptr_to(bx.cx().backend_type(field))),
130 llextra: if bx.cx().type_has_metadata(field.ty) { self.llextra } else { None },
132 align: effective_field_align,
136 // Simple cases, which don't need DST adjustment:
137 // * no metadata available - just log the case
138 // * known alignment - sized types, `[T]`, `str` or a foreign type
139 // * packed struct - there is no alignment padding
140 match field.ty.kind() {
141 _ if self.llextra.is_none() => {
143 "unsized field `{}`, of `{:?}` has no metadata for adjustment",
148 _ if field.is_sized() => return simple(),
149 ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(),
151 if def.repr().packed() {
152 // FIXME(eddyb) generalize the adjustment when we
153 // start supporting packing to larger alignments.
154 assert_eq!(self.layout.align.abi.bytes(), 1);
161 // We need to get the pointer manually now.
162 // We do this by casting to a `*i8`, then offsetting it by the appropriate amount.
163 // We do this instead of, say, simply adjusting the pointer from the result of a GEP
164 // because the field may have an arbitrary alignment in the LLVM representation
169 // struct Foo<T: ?Sized> {
174 // The type `Foo<Foo<Trait>>` is represented in LLVM as `{ u16, { u16, u8 }}`, meaning that
175 // the `y` field has 16-bit alignment.
177 let meta = self.llextra;
179 let unaligned_offset = bx.cx().const_usize(offset.bytes());
181 // Get the alignment of the field
182 let (_, unsized_align) = glue::size_and_align_of_dst(bx, field.ty, meta);
184 // Bump the unaligned offset up to the appropriate alignment
185 let offset = round_up_const_value_to_alignment(bx, unaligned_offset, unsized_align);
187 debug!("struct_field_ptr: DST field offset: {:?}", offset);
189 // Cast and adjust pointer.
190 let byte_ptr = bx.pointercast(self.llval, bx.cx().type_i8p());
191 let byte_ptr = bx.gep(bx.cx().type_i8(), byte_ptr, &[offset]);
193 // Finally, cast back to the type expected.
194 let ll_fty = bx.cx().backend_type(field);
195 debug!("struct_field_ptr: Field type is {:?}", ll_fty);
198 llval: bx.pointercast(byte_ptr, bx.cx().type_ptr_to(ll_fty)),
199 llextra: self.llextra,
201 align: effective_field_align,
205 /// Obtain the actual discriminant of a value.
206 #[instrument(level = "trace", skip(bx))]
207 pub fn codegen_get_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
212 let dl = &bx.tcx().data_layout;
213 let cast_to_layout = bx.cx().layout_of(cast_to);
214 let cast_to_size = cast_to_layout.layout.size();
215 let cast_to = bx.cx().immediate_backend_type(cast_to_layout);
216 if self.layout.abi.is_uninhabited() {
217 return bx.cx().const_undef(cast_to);
219 let (tag_scalar, tag_encoding, tag_field) = match self.layout.variants {
220 Variants::Single { index } => {
224 .discriminant_for_variant(bx.cx().tcx(), index)
225 .map_or(index.as_u32() as u128, |discr| discr.val);
226 return bx.cx().const_uint_big(cast_to, discr_val);
228 Variants::Multiple { tag, ref tag_encoding, tag_field, .. } => {
229 (tag, tag_encoding, tag_field)
233 // Read the tag/niche-encoded discriminant from memory.
234 let tag = self.project_field(bx, tag_field);
235 let tag_op = bx.load_operand(tag);
236 let tag_imm = tag_op.immediate();
238 // Decode the discriminant (specifically if it's niche-encoded).
239 match *tag_encoding {
240 TagEncoding::Direct => {
241 let signed = match tag_scalar.primitive() {
242 // We use `i1` for bytes that are always `0` or `1`,
243 // e.g., `#[repr(i8)] enum E { A, B }`, but we can't
244 // let LLVM interpret the `i1` as signed, because
245 // then `i1 1` (i.e., `E::B`) is effectively `i8 -1`.
246 Int(_, signed) => !tag_scalar.is_bool() && signed,
249 bx.intcast(tag_imm, cast_to, signed)
251 TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start } => {
252 // Cast to an integer so we don't have to treat a pointer as a
254 let (tag, tag_llty) = match tag_scalar.primitive() {
255 // FIXME(erikdesjardins): handle non-default addrspace ptr sizes
257 let t = bx.type_from_integer(dl.ptr_sized_integer());
258 let tag = bx.ptrtoint(tag_imm, t);
261 _ => (tag_imm, bx.cx().immediate_backend_type(tag_op.layout)),
264 let tag_size = tag_scalar.size(bx.cx());
265 let max_unsigned = tag_size.unsigned_int_max();
266 let max_signed = tag_size.signed_int_max() as u128;
267 let min_signed = max_signed + 1;
268 let relative_max = niche_variants.end().as_u32() - niche_variants.start().as_u32();
269 let niche_end = niche_start.wrapping_add(relative_max as u128) & max_unsigned;
270 let range = tag_scalar.valid_range(bx.cx());
272 let sle = |lhs: u128, rhs: u128| -> bool {
273 // Signed and unsigned comparisons give the same results,
274 // except that in signed comparisons an integer with the
275 // sign bit set is less than one with the sign bit clear.
276 // Toggle the sign bit to do a signed comparison.
277 (lhs ^ min_signed) <= (rhs ^ min_signed)
280 // We have a subrange `niche_start..=niche_end` inside `range`.
281 // If the value of the tag is inside this subrange, it's a
282 // "niche value", an increment of the discriminant. Otherwise it
283 // indicates the untagged variant.
284 // A general algorithm to extract the discriminant from the tag
286 // relative_tag = tag - niche_start
287 // is_niche = relative_tag <= (ule) relative_max
288 // discr = if is_niche {
289 // cast(relative_tag) + niche_variants.start()
293 // However, we will likely be able to emit simpler code.
295 // Find the least and greatest values in `range`, considered
296 // both as signed and unsigned.
297 let (low_unsigned, high_unsigned) = if range.start <= range.end {
298 (range.start, range.end)
302 let (low_signed, high_signed) = if sle(range.start, range.end) {
303 (range.start, range.end)
305 (min_signed, max_signed)
308 let niches_ule = niche_start <= niche_end;
309 let niches_sle = sle(niche_start, niche_end);
310 let cast_smaller = cast_to_size <= tag_size;
312 // In the algorithm above, we can change
313 // cast(relative_tag) + niche_variants.start()
315 // cast(tag + (niche_variants.start() - niche_start))
316 // if either the casted type is no larger than the original
317 // type, or if the niche values are contiguous (in either the
318 // signed or unsigned sense).
319 let can_incr = cast_smaller || niches_ule || niches_sle;
321 let data_for_boundary_niche = || -> Option<(IntPredicate, u128)> {
324 } else if niche_start == low_unsigned {
325 Some((IntPredicate::IntULE, niche_end))
326 } else if niche_end == high_unsigned {
327 Some((IntPredicate::IntUGE, niche_start))
328 } else if niche_start == low_signed {
329 Some((IntPredicate::IntSLE, niche_end))
330 } else if niche_end == high_signed {
331 Some((IntPredicate::IntSGE, niche_start))
337 let (is_niche, tagged_discr, delta) = if relative_max == 0 {
338 // Best case scenario: only one tagged variant. This will
339 // likely become just a comparison and a jump.
341 // is_niche = tag == niche_start
342 // discr = if is_niche {
347 let niche_start = bx.cx().const_uint_big(tag_llty, niche_start);
348 let is_niche = bx.icmp(IntPredicate::IntEQ, tag, niche_start);
350 bx.cx().const_uint(cast_to, niche_variants.start().as_u32() as u64);
351 (is_niche, tagged_discr, 0)
352 } else if let Some((predicate, constant)) = data_for_boundary_niche() {
353 // The niche values are either the lowest or the highest in
354 // `range`. We can avoid the first subtraction in the
356 // The algorithm is now this:
357 // is_niche = tag <= niche_end
358 // discr = if is_niche {
359 // cast(tag + (niche_variants.start() - niche_start))
363 // (the first line may instead be tag >= niche_start,
364 // and may be a signed or unsigned comparison)
365 // The arithmetic must be done before the cast, so we can
366 // have the correct wrapping behavior. See issue #104519 for
367 // the consequences of getting this wrong.
369 bx.icmp(predicate, tag, bx.cx().const_uint_big(tag_llty, constant));
370 let delta = (niche_variants.start().as_u32() as u128).wrapping_sub(niche_start);
371 let incr_tag = if delta == 0 {
374 bx.add(tag, bx.cx().const_uint_big(tag_llty, delta))
377 let cast_tag = if cast_smaller {
378 bx.intcast(incr_tag, cast_to, false)
379 } else if niches_ule {
380 bx.zext(incr_tag, cast_to)
382 bx.sext(incr_tag, cast_to)
385 (is_niche, cast_tag, 0)
387 // The special cases don't apply, so we'll have to go with
388 // the general algorithm.
389 let relative_discr = bx.sub(tag, bx.cx().const_uint_big(tag_llty, niche_start));
390 let cast_tag = bx.intcast(relative_discr, cast_to, false);
391 let is_niche = bx.icmp(
392 IntPredicate::IntULE,
394 bx.cx().const_uint(tag_llty, relative_max as u64),
396 (is_niche, cast_tag, niche_variants.start().as_u32() as u128)
399 let tagged_discr = if delta == 0 {
402 bx.add(tagged_discr, bx.cx().const_uint_big(cast_to, delta))
405 let discr = bx.select(
408 bx.cx().const_uint(cast_to, untagged_variant.as_u32() as u64),
411 // In principle we could insert assumes on the possible range of `discr`, but
412 // currently in LLVM this seems to be a pessimization.
419 /// Sets the discriminant for a new value of the given case of the given
421 pub fn codegen_set_discr<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
424 variant_index: VariantIdx,
426 if self.layout.for_variant(bx.cx(), variant_index).abi.is_uninhabited() {
427 // We play it safe by using a well-defined `abort`, but we could go for immediate UB
428 // if that turns out to be helpful.
432 match self.layout.variants {
433 Variants::Single { index } => {
434 assert_eq!(index, variant_index);
436 Variants::Multiple { tag_encoding: TagEncoding::Direct, tag_field, .. } => {
437 let ptr = self.project_field(bx, tag_field);
439 self.layout.ty.discriminant_for_variant(bx.tcx(), variant_index).unwrap().val;
441 bx.cx().const_uint_big(bx.cx().backend_type(ptr.layout), to),
448 TagEncoding::Niche { untagged_variant, ref niche_variants, niche_start },
452 if variant_index != untagged_variant {
453 let niche = self.project_field(bx, tag_field);
454 let niche_llty = bx.cx().immediate_backend_type(niche.layout);
455 let niche_value = variant_index.as_u32() - niche_variants.start().as_u32();
456 let niche_value = (niche_value as u128).wrapping_add(niche_start);
457 // FIXME(eddyb): check the actual primitive type here.
458 let niche_llval = if niche_value == 0 {
459 // HACK(eddyb): using `c_null` as it works on all types.
460 bx.cx().const_null(niche_llty)
462 bx.cx().const_uint_big(niche_llty, niche_value)
464 OperandValue::Immediate(niche_llval).store(bx, niche);
470 pub fn project_index<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
475 // Statically compute the offset if we can, otherwise just use the element size,
476 // as this will yield the lowest alignment.
477 let layout = self.layout.field(bx, 0);
478 let offset = if let Some(llindex) = bx.const_to_opt_uint(llindex) {
479 layout.size.checked_mul(llindex, bx).unwrap_or(layout.size)
485 llval: bx.inbounds_gep(
486 bx.cx().backend_type(self.layout),
488 &[bx.cx().const_usize(0), llindex],
492 align: self.align.restrict_for_offset(offset),
496 pub fn project_downcast<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
499 variant_index: VariantIdx,
501 let mut downcast = *self;
502 downcast.layout = self.layout.for_variant(bx.cx(), variant_index);
504 // Cast to the appropriate variant struct type.
505 let variant_ty = bx.cx().backend_type(downcast.layout);
506 downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
511 pub fn project_type<Bx: BuilderMethods<'a, 'tcx, Value = V>>(
516 let mut downcast = *self;
517 downcast.layout = bx.cx().layout_of(ty);
519 // Cast to the appropriate type.
520 let variant_ty = bx.cx().backend_type(downcast.layout);
521 downcast.llval = bx.pointercast(downcast.llval, bx.cx().type_ptr_to(variant_ty));
526 pub fn storage_live<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
527 bx.lifetime_start(self.llval, self.layout.size);
530 pub fn storage_dead<Bx: BuilderMethods<'a, 'tcx, Value = V>>(&self, bx: &mut Bx) {
531 bx.lifetime_end(self.llval, self.layout.size);
535 impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
536 #[instrument(level = "trace", skip(self, bx))]
537 pub fn codegen_place(
540 place_ref: mir::PlaceRef<'tcx>,
541 ) -> PlaceRef<'tcx, Bx::Value> {
543 let tcx = self.cx.tcx();
546 let mut cg_base = match self.locals[place_ref.local] {
547 LocalRef::Place(place) => place,
548 LocalRef::UnsizedPlace(place) => bx.load_operand(place).deref(cx),
549 LocalRef::Operand(..) => {
550 if place_ref.has_deref() {
552 let cg_base = self.codegen_consume(
554 mir::PlaceRef { projection: &place_ref.projection[..0], ..place_ref },
556 cg_base.deref(bx.cx())
558 bug!("using operand local {:?} as place", place_ref);
562 for elem in place_ref.projection[base..].iter() {
563 cg_base = match *elem {
564 mir::ProjectionElem::Deref => bx.load_operand(cg_base).deref(bx.cx()),
565 mir::ProjectionElem::Field(ref field, _) => {
566 cg_base.project_field(bx, field.index())
568 mir::ProjectionElem::OpaqueCast(ty) => cg_base.project_type(bx, ty),
569 mir::ProjectionElem::Index(index) => {
570 let index = &mir::Operand::Copy(mir::Place::from(index));
571 let index = self.codegen_operand(bx, index);
572 let llindex = index.immediate();
573 cg_base.project_index(bx, llindex)
575 mir::ProjectionElem::ConstantIndex { offset, from_end: false, min_length: _ } => {
576 let lloffset = bx.cx().const_usize(offset as u64);
577 cg_base.project_index(bx, lloffset)
579 mir::ProjectionElem::ConstantIndex { offset, from_end: true, min_length: _ } => {
580 let lloffset = bx.cx().const_usize(offset as u64);
581 let lllen = cg_base.len(bx.cx());
582 let llindex = bx.sub(lllen, lloffset);
583 cg_base.project_index(bx, llindex)
585 mir::ProjectionElem::Subslice { from, to, from_end } => {
586 let mut subslice = cg_base.project_index(bx, bx.cx().const_usize(from as u64));
588 PlaceTy::from_ty(cg_base.layout.ty).projection_ty(tcx, *elem).ty;
589 subslice.layout = bx.cx().layout_of(self.monomorphize(projected_ty));
591 if subslice.layout.is_unsized() {
592 assert!(from_end, "slice subslices should be `from_end`");
593 subslice.llextra = Some(bx.sub(
594 cg_base.llextra.unwrap(),
595 bx.cx().const_usize((from as u64) + (to as u64)),
599 // Cast the place pointer type to the new
600 // array or slice type (`*[%_; new_len]`).
601 subslice.llval = bx.pointercast(
603 bx.cx().type_ptr_to(bx.cx().backend_type(subslice.layout)),
608 mir::ProjectionElem::Downcast(_, v) => cg_base.project_downcast(bx, v),
611 debug!("codegen_place(place={:?}) => {:?}", place_ref, cg_base);
615 pub fn monomorphized_place_ty(&self, place_ref: mir::PlaceRef<'tcx>) -> Ty<'tcx> {
616 let tcx = self.cx.tcx();
617 let place_ty = place_ref.ty(self.mir, tcx);
618 self.monomorphize(place_ty.ty)
622 fn round_up_const_value_to_alignment<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>>(
629 // if value & (align - 1) == 0 {
632 // (value & !(align - 1)) + align
635 // Usually this is written without branches as
637 // (value + align - 1) & !(align - 1)
639 // But this formula cannot take advantage of constant `value`. E.g. if `value` is known
640 // at compile time to be `1`, this expression should be optimized to `align`. However,
641 // optimization only holds if `align` is a power of two. Since the optimizer doesn't know
642 // that `align` is a power of two, it cannot perform this optimization.
646 // value + (-value & (align - 1))
648 // Since `align` is used only once, the expression can be optimized. For `value = 0`
649 // its optimized to `0` even in debug mode.
651 // NB: The previous version of this code used
653 // (value + align - 1) & -align
655 // Even though `-align == !(align - 1)`, LLVM failed to optimize this even for
656 // `value = 0`. Bug report: https://bugs.llvm.org/show_bug.cgi?id=48559
657 let one = bx.const_usize(1);
658 let align_minus_1 = bx.sub(align, one);
659 let neg_value = bx.neg(value);
660 let offset = bx.and(neg_value, align_minus_1);
661 bx.add(value, offset)