1 //! Definition of [`CValue`] and [`CPlace`]
5 use cranelift_codegen::ir::immediates::Offset32;
7 fn codegen_field<'tcx>(
8 fx: &mut FunctionCx<'_, '_, 'tcx>,
11 layout: TyAndLayout<'tcx>,
13 ) -> (Pointer, TyAndLayout<'tcx>) {
14 let field_offset = layout.fields.offset(field.index());
15 let field_layout = layout.field(&*fx, field.index());
17 let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
18 (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
21 if let Some(extra) = extra {
22 if !field_layout.is_unsized() {
25 match field_layout.ty.kind() {
26 ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
27 ty::Adt(def, _) if def.repr().packed() => {
28 assert_eq!(layout.align.abi.bytes(), 1);
32 // We have to align the offset for DST's
33 let unaligned_offset = field_offset.bytes();
34 let (_, unsized_align) =
35 crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
37 let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
38 let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
39 let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
40 let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
41 let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
42 let offset = fx.bcx.ins().band(and_lhs, and_rhs);
44 (base.offset_value(fx, offset), field_layout)
52 fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
53 let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
54 Offset32::new(b_offset.bytes().try_into().unwrap())
58 #[derive(Debug, Copy, Clone)]
59 pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
61 #[derive(Debug, Copy, Clone)]
63 ByRef(Pointer, Option<Value>),
65 ByValPair(Value, Value),
68 impl<'tcx> CValue<'tcx> {
69 pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
70 CValue(CValueInner::ByRef(ptr, None), layout)
73 pub(crate) fn by_ref_unsized(
76 layout: TyAndLayout<'tcx>,
78 CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
81 pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
82 CValue(CValueInner::ByVal(value), layout)
85 pub(crate) fn by_val_pair(
88 layout: TyAndLayout<'tcx>,
90 CValue(CValueInner::ByValPair(value, extra), layout)
93 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
98 pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
101 CValueInner::ByRef(ptr, meta) => (ptr, meta),
102 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
103 let cplace = CPlace::new_stack_slot(fx, layout);
104 cplace.write_cvalue(fx, self);
105 (cplace.to_ptr(), None)
110 pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
112 CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
113 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
117 /// Load a value with layout.abi of scalar
118 pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
121 CValueInner::ByRef(ptr, None) => {
122 let clif_ty = match layout.abi {
123 Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
124 Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
125 .by(u16::try_from(count).unwrap())
127 _ => unreachable!("{:?}", layout.ty),
129 let mut flags = MemFlags::new();
131 ptr.load(fx, clif_ty, flags)
133 CValueInner::ByVal(value) => value,
134 CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
135 CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
139 /// Load a value pair with layout.abi of scalar pair
140 pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
143 CValueInner::ByRef(ptr, None) => {
144 let (a_scalar, b_scalar) = match layout.abi {
145 Abi::ScalarPair(a, b) => (a, b),
146 _ => unreachable!("load_scalar_pair({:?})", self),
148 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
149 let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
150 let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
151 let mut flags = MemFlags::new();
153 let val1 = ptr.load(fx, clif_ty1, flags);
154 let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
157 CValueInner::ByRef(_, Some(_)) => {
158 bug!("load_scalar_pair for unsized value not allowed")
160 CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
161 CValueInner::ByValPair(val1, val2) => (val1, val2),
165 pub(crate) fn value_field(
167 fx: &mut FunctionCx<'_, '_, 'tcx>,
172 CValueInner::ByVal(val) => match layout.abi {
173 Abi::Vector { element: _, count } => {
174 let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
175 let field = u8::try_from(field.index()).unwrap();
176 assert!(field < count);
177 let lane = fx.bcx.ins().extractlane(val, field);
178 let field_layout = layout.field(&*fx, usize::from(field));
179 CValue::by_val(lane, field_layout)
181 _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
183 CValueInner::ByValPair(val1, val2) => match layout.abi {
184 Abi::ScalarPair(_, _) => {
185 let val = match field.as_u32() {
188 _ => bug!("field should be 0 or 1"),
190 let field_layout = layout.field(&*fx, usize::from(field));
191 CValue::by_val(val, field_layout)
193 _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
195 CValueInner::ByRef(ptr, None) => {
196 let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
197 CValue::by_ref(field_ptr, field_layout)
199 CValueInner::ByRef(_, Some(_)) => todo!(),
203 /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
204 /// such that you can access individual lanes.
205 pub(crate) fn value_lane(
207 fx: &mut FunctionCx<'_, '_, 'tcx>,
211 assert!(layout.ty.is_simd());
212 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
213 let lane_layout = fx.layout_of(lane_ty);
214 assert!(lane_idx < lane_count);
216 CValueInner::ByVal(val) => match layout.abi {
217 Abi::Vector { element: _, count: _ } => {
218 assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
219 let lane_idx = u8::try_from(lane_idx).unwrap();
220 let lane = fx.bcx.ins().extractlane(val, lane_idx);
221 CValue::by_val(lane, lane_layout)
223 _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
225 CValueInner::ByValPair(_, _) => unreachable!(),
226 CValueInner::ByRef(ptr, None) => {
227 let field_offset = lane_layout.size * lane_idx;
228 let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
229 CValue::by_ref(field_ptr, lane_layout)
231 CValueInner::ByRef(_, Some(_)) => unreachable!(),
235 pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
236 crate::unsize::coerce_unsized_into(fx, self, dest);
239 /// If `ty` is signed, `const_val` must already be sign extended.
240 pub(crate) fn const_val(
241 fx: &mut FunctionCx<'_, '_, 'tcx>,
242 layout: TyAndLayout<'tcx>,
243 const_val: ty::ScalarInt,
245 assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
246 use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
248 let clif_ty = fx.clif_type(layout.ty).unwrap();
250 if let ty::Bool = layout.ty.kind() {
252 const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
253 "Invalid bool 0x{:032X}",
258 let val = match layout.ty.kind() {
259 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
260 let const_val = const_val.to_bits(layout.size).unwrap();
261 let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
262 let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
263 fx.bcx.ins().iconcat(lsb, msb)
265 ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
266 fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
268 ty::Float(FloatTy::F32) => {
269 fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
271 ty::Float(FloatTy::F64) => {
272 fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
275 "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
280 CValue::by_val(val, layout)
283 pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
284 assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
285 assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
286 assert_eq!(self.layout().abi, layout.abi);
287 CValue(self.0, layout)
291 /// A place where you can write a value to or read a value from
292 #[derive(Debug, Copy, Clone)]
293 pub(crate) struct CPlace<'tcx> {
295 layout: TyAndLayout<'tcx>,
298 #[derive(Debug, Copy, Clone)]
299 pub(crate) enum CPlaceInner {
300 Var(Local, Variable),
301 VarPair(Local, Variable, Variable),
302 VarLane(Local, Variable, u8),
303 Addr(Pointer, Option<Value>),
306 impl<'tcx> CPlace<'tcx> {
307 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
311 pub(crate) fn inner(&self) -> &CPlaceInner {
315 pub(crate) fn new_stack_slot(
316 fx: &mut FunctionCx<'_, '_, 'tcx>,
317 layout: TyAndLayout<'tcx>,
319 assert!(!layout.is_unsized());
320 if layout.size.bytes() == 0 {
322 inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
327 if layout.size.bytes() >= u64::from(u32::MAX - 16) {
330 .fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
333 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
334 kind: StackSlotKind::ExplicitSlot,
335 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
336 // specify stack slot alignment.
337 size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
339 CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
342 pub(crate) fn new_var(
343 fx: &mut FunctionCx<'_, '_, 'tcx>,
345 layout: TyAndLayout<'tcx>,
347 let var = Variable::with_u32(fx.next_ssa_var);
348 fx.next_ssa_var += 1;
349 fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
350 CPlace { inner: CPlaceInner::Var(local, var), layout }
353 pub(crate) fn new_var_pair(
354 fx: &mut FunctionCx<'_, '_, 'tcx>,
356 layout: TyAndLayout<'tcx>,
358 let var1 = Variable::with_u32(fx.next_ssa_var);
359 fx.next_ssa_var += 1;
360 let var2 = Variable::with_u32(fx.next_ssa_var);
361 fx.next_ssa_var += 1;
363 let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
364 fx.bcx.declare_var(var1, ty1);
365 fx.bcx.declare_var(var2, ty2);
366 CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
369 pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
370 CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
373 pub(crate) fn for_ptr_with_extra(
376 layout: TyAndLayout<'tcx>,
378 CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
381 pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
382 let layout = self.layout();
384 CPlaceInner::Var(_local, var) => {
385 let val = fx.bcx.use_var(var);
386 //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
387 CValue::by_val(val, layout)
389 CPlaceInner::VarPair(_local, var1, var2) => {
390 let val1 = fx.bcx.use_var(var1);
391 //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
392 let val2 = fx.bcx.use_var(var2);
393 //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
394 CValue::by_val_pair(val1, val2, layout)
396 CPlaceInner::VarLane(_local, var, lane) => {
397 let val = fx.bcx.use_var(var);
398 //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
399 let val = fx.bcx.ins().extractlane(val, lane);
400 CValue::by_val(val, layout)
402 CPlaceInner::Addr(ptr, extra) => {
403 if let Some(extra) = extra {
404 CValue::by_ref_unsized(ptr, extra, layout)
406 CValue::by_ref(ptr, layout)
412 pub(crate) fn to_ptr(self) -> Pointer {
413 match self.to_ptr_maybe_unsized() {
415 (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
419 pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
421 CPlaceInner::Addr(ptr, extra) => (ptr, extra),
422 CPlaceInner::Var(_, _)
423 | CPlaceInner::VarPair(_, _, _)
424 | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
428 pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
429 assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
431 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
434 pub(crate) fn write_cvalue_transmute(
436 fx: &mut FunctionCx<'_, '_, 'tcx>,
439 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
442 fn write_cvalue_maybe_transmute(
444 fx: &mut FunctionCx<'_, '_, 'tcx>,
446 method: &'static str,
448 fn transmute_value<'tcx>(
449 fx: &mut FunctionCx<'_, '_, 'tcx>,
454 let src_ty = fx.bcx.func.dfg.value_type(data);
458 "write_cvalue_transmute: {:?} -> {:?}",
462 let data = match (src_ty, dst_ty) {
463 (_, _) if src_ty == dst_ty => data,
465 // This is a `write_cvalue_transmute`.
466 (types::I32, types::F32)
467 | (types::F32, types::I32)
468 | (types::I64, types::F64)
469 | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
470 _ if src_ty.is_vector() && dst_ty.is_vector() => {
471 fx.bcx.ins().raw_bitcast(dst_ty, data)
473 _ if src_ty.is_vector() || dst_ty.is_vector() => {
474 // FIXME do something more efficient for transmutes between vectors and integers.
475 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
476 kind: StackSlotKind::ExplicitSlot,
477 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
478 // specify stack slot alignment.
479 size: (src_ty.bytes() + 15) / 16 * 16,
481 let ptr = Pointer::stack_slot(stack_slot);
482 ptr.store(fx, data, MemFlags::trusted());
483 ptr.load(fx, dst_ty, MemFlags::trusted())
486 // `CValue`s should never contain SSA-only types, so if you ended
487 // up here having seen an error like `B1 -> I8`, then before
488 // calling `write_cvalue` you need to add a `bint` instruction.
489 _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
491 //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
492 fx.bcx.def_var(var, data);
495 assert_eq!(self.layout().size, from.layout().size);
497 if fx.clif_comments.enabled() {
498 use cranelift_codegen::cursor::{Cursor, CursorPosition};
499 let cur_block = match fx.bcx.cursor().position() {
500 CursorPosition::After(block) => block,
504 fx.bcx.func.layout.last_inst(cur_block).unwrap(),
506 "{}: {:?}: {:?} <- {:?}: {:?}",
516 let dst_layout = self.layout();
517 let to_ptr = match self.inner {
518 CPlaceInner::Var(_local, var) => {
519 if let ty::Array(element, len) = dst_layout.ty.kind() {
520 // Can only happen for vector types
522 u16::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
523 let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
525 let data = match from.0 {
526 CValueInner::ByRef(ptr, None) => {
527 let mut flags = MemFlags::new();
529 ptr.load(fx, vector_ty, flags)
531 CValueInner::ByVal(_)
532 | CValueInner::ByValPair(_, _)
533 | CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
536 fx.bcx.def_var(var, data);
539 let data = CValue(from.0, dst_layout).load_scalar(fx);
540 let dst_ty = fx.clif_type(self.layout().ty).unwrap();
541 transmute_value(fx, var, data, dst_ty);
544 CPlaceInner::VarPair(_local, var1, var2) => {
545 let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
546 let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
547 transmute_value(fx, var1, data1, dst_ty1);
548 transmute_value(fx, var2, data2, dst_ty2);
551 CPlaceInner::VarLane(_local, var, lane) => {
552 let data = from.load_scalar(fx);
554 // First get the old vector
555 let vector = fx.bcx.use_var(var);
556 //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
558 // Next insert the written lane into the vector
559 let vector = fx.bcx.ins().insertlane(vector, data, lane);
561 // Finally write the new vector
562 //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
563 fx.bcx.def_var(var, vector);
567 CPlaceInner::Addr(ptr, None) => {
568 if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
573 CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
576 let mut flags = MemFlags::new();
578 match from.layout().abi {
579 // FIXME make Abi::Vector work too
581 let val = from.load_scalar(fx);
582 to_ptr.store(fx, val, flags);
585 Abi::ScalarPair(a_scalar, b_scalar) => {
586 let (value, extra) = from.load_scalar_pair(fx);
587 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
588 to_ptr.store(fx, value, flags);
589 to_ptr.offset(fx, b_offset).store(fx, extra, flags);
596 CValueInner::ByVal(val) => {
597 to_ptr.store(fx, val, flags);
599 CValueInner::ByValPair(_, _) => {
600 bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
602 CValueInner::ByRef(from_ptr, None) => {
603 let from_addr = from_ptr.get_addr(fx);
604 let to_addr = to_ptr.get_addr(fx);
605 let src_layout = from.1;
606 let size = dst_layout.size.bytes();
607 let src_align = src_layout.align.abi.bytes() as u8;
608 let dst_align = dst_layout.align.abi.bytes() as u8;
609 fx.bcx.emit_small_memory_copy(
620 CValueInner::ByRef(_, Some(_)) => todo!(),
624 pub(crate) fn place_field(
626 fx: &mut FunctionCx<'_, '_, 'tcx>,
629 let layout = self.layout();
632 CPlaceInner::Var(local, var) => match layout.ty.kind() {
634 // Can only happen for vector types
636 inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
637 layout: layout.field(fx, field.as_u32().try_into().unwrap()),
640 ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
641 let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
645 assert_eq!(field.as_u32(), 0);
647 inner: CPlaceInner::Var(local, var),
648 layout: layout.field(fx, field.as_u32().try_into().unwrap()),
653 inner: CPlaceInner::VarLane(
656 field.as_u32().try_into().unwrap(),
658 layout: layout.field(fx, field.as_u32().try_into().unwrap()),
665 CPlaceInner::VarPair(local, var1, var2) => {
666 let layout = layout.field(&*fx, field.index());
668 match field.as_u32() {
669 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
670 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
671 _ => unreachable!("field should be 0 or 1"),
677 let (base, extra) = self.to_ptr_maybe_unsized();
679 let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
680 if field_layout.is_unsized() {
681 if let ty::Foreign(_) = field_layout.ty.kind() {
682 assert!(extra.is_none());
683 CPlace::for_ptr(field_ptr, field_layout)
685 CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
688 CPlace::for_ptr(field_ptr, field_layout)
692 /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
693 /// such that you can access individual lanes.
694 pub(crate) fn place_lane(
696 fx: &mut FunctionCx<'_, '_, 'tcx>,
699 let layout = self.layout();
700 assert!(layout.ty.is_simd());
701 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
702 let lane_layout = fx.layout_of(lane_ty);
703 assert!(lane_idx < lane_count);
706 CPlaceInner::Var(local, var) => {
707 assert!(matches!(layout.abi, Abi::Vector { .. }));
709 inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
713 CPlaceInner::VarPair(_, _, _) => unreachable!(),
714 CPlaceInner::VarLane(_, _, _) => unreachable!(),
715 CPlaceInner::Addr(ptr, None) => {
716 let field_offset = lane_layout.size * lane_idx;
717 let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
718 CPlace::for_ptr(field_ptr, lane_layout)
720 CPlaceInner::Addr(_, Some(_)) => unreachable!(),
724 pub(crate) fn place_index(
726 fx: &mut FunctionCx<'_, '_, 'tcx>,
729 let (elem_layout, ptr) = match self.layout().ty.kind() {
730 ty::Array(elem_ty, _) => (fx.layout_of(*elem_ty), self.to_ptr()),
731 ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
732 _ => bug!("place_index({:?})", self.layout().ty),
735 let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
737 CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
740 pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
741 let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
742 if has_ptr_meta(fx.tcx, inner_layout.ty) {
743 let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
744 CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
746 CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
750 pub(crate) fn place_ref(
752 fx: &mut FunctionCx<'_, '_, 'tcx>,
753 layout: TyAndLayout<'tcx>,
755 if has_ptr_meta(fx.tcx, self.layout().ty) {
756 let (ptr, extra) = self.to_ptr_maybe_unsized();
759 extra.expect("unsized type without metadata"),
763 CValue::by_val(self.to_ptr().get_addr(fx), layout)
767 pub(crate) fn downcast_variant(
769 fx: &FunctionCx<'_, '_, 'tcx>,
772 assert!(!self.layout().is_unsized());
773 let layout = self.layout().for_variant(fx, variant);
774 CPlace { inner: self.inner, layout }
779 pub(crate) fn assert_assignable<'tcx>(
780 fx: &FunctionCx<'_, '_, 'tcx>,
786 // assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
787 // soundness. don't attempt to check deep types to avoid exponential behavior in certain
791 match (from_ty.kind(), to_ty.kind()) {
792 (ty::Ref(_, a, _), ty::Ref(_, b, _))
794 ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
795 ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
797 assert_assignable(fx, *a, *b, limit - 1);
799 (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
800 | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
801 assert_assignable(fx, *a, *b, limit - 1);
803 (ty::FnPtr(_), ty::FnPtr(_)) => {
804 let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
805 ParamEnv::reveal_all(),
806 from_ty.fn_sig(fx.tcx),
810 .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
813 "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
814 from_sig, to_sig, fx,
816 // fn(&T) -> for<'l> fn(&'l T) is allowed
818 (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
819 for (from, to) in from_traits.iter().zip(to_traits) {
821 fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
822 let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
825 "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
826 from_traits, to_traits, fx,
829 // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
831 (&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
832 let mut types_a = types_a.iter();
833 let mut types_b = types_b.iter();
835 match (types_a.next(), types_b.next()) {
836 (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
837 (None, None) => return,
838 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
842 (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
843 if adt_def_a.did() == adt_def_b.did() =>
845 let mut types_a = substs_a.types();
846 let mut types_b = substs_b.types();
848 match (types_a.next(), types_b.next()) {
849 (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
850 (None, None) => return,
851 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
855 (ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
856 (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
857 if def_id_a == def_id_b =>
859 let mut types_a = substs_a.types();
860 let mut types_b = substs_b.types();
862 match (types_a.next(), types_b.next()) {
863 (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
864 (None, None) => return,
865 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
869 (ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
870 // No way to check if it is correct or not with polymorphization enabled
876 "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",