1 //! Definition of [`CValue`] and [`CPlace`]
5 use cranelift_codegen::ir::immediates::Offset32;
7 fn codegen_field<'tcx>(
8 fx: &mut FunctionCx<'_, '_, 'tcx>,
11 layout: TyAndLayout<'tcx>,
13 ) -> (Pointer, TyAndLayout<'tcx>) {
14 let field_offset = layout.fields.offset(field.index());
15 let field_layout = layout.field(&*fx, field.index());
17 let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
18 (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
21 if let Some(extra) = extra {
22 if field_layout.is_sized() {
25 match field_layout.ty.kind() {
26 ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
27 ty::Adt(def, _) if def.repr().packed() => {
28 assert_eq!(layout.align.abi.bytes(), 1);
32 // We have to align the offset for DST's
33 let unaligned_offset = field_offset.bytes();
34 let (_, unsized_align) =
35 crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
37 let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
38 let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
39 let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
40 let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
41 let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
42 let offset = fx.bcx.ins().band(and_lhs, and_rhs);
44 (base.offset_value(fx, offset), field_layout)
52 fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
53 let b_offset = a_scalar.size(&tcx).align_to(b_scalar.align(&tcx).abi);
54 Offset32::new(b_offset.bytes().try_into().unwrap())
58 #[derive(Debug, Copy, Clone)]
59 pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
61 #[derive(Debug, Copy, Clone)]
63 ByRef(Pointer, Option<Value>),
65 ByValPair(Value, Value),
68 impl<'tcx> CValue<'tcx> {
69 pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
70 CValue(CValueInner::ByRef(ptr, None), layout)
73 pub(crate) fn by_ref_unsized(
76 layout: TyAndLayout<'tcx>,
78 CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
81 pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
82 CValue(CValueInner::ByVal(value), layout)
85 pub(crate) fn by_val_pair(
88 layout: TyAndLayout<'tcx>,
90 CValue(CValueInner::ByValPair(value, extra), layout)
93 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
98 pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
101 CValueInner::ByRef(ptr, meta) => (ptr, meta),
102 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
103 let cplace = CPlace::new_stack_slot(fx, layout);
104 cplace.write_cvalue(fx, self);
105 (cplace.to_ptr(), None)
111 /// Forces the data value of a dyn* value to the stack and returns a pointer to it as well as the
113 pub(crate) fn dyn_star_force_data_on_stack(
115 fx: &mut FunctionCx<'_, '_, 'tcx>,
116 ) -> (Value, Value) {
117 assert!(self.1.ty.is_dyn_star());
120 CValueInner::ByRef(ptr, None) => {
121 let (a_scalar, b_scalar) = match self.1.abi {
122 Abi::ScalarPair(a, b) => (a, b),
123 _ => unreachable!("dyn_star_force_data_on_stack({:?})", self),
125 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
126 let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
127 let mut flags = MemFlags::new();
129 let vtable = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
130 (ptr.get_addr(fx), vtable)
132 CValueInner::ByValPair(data, vtable) => {
133 let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
134 kind: StackSlotKind::ExplicitSlot,
135 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
136 // specify stack slot alignment.
137 size: (u32::try_from(fx.target_config.pointer_type().bytes()).unwrap() + 15)
141 let data_ptr = Pointer::stack_slot(stack_slot);
142 let mut flags = MemFlags::new();
144 data_ptr.store(fx, data, flags);
146 (data_ptr.get_addr(fx), vtable)
148 CValueInner::ByRef(_, Some(_)) | CValueInner::ByVal(_) => {
149 unreachable!("dyn_star_force_data_on_stack({:?})", self)
154 pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
156 CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
157 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
161 /// Load a value with layout.abi of scalar
162 pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
165 CValueInner::ByRef(ptr, None) => {
166 let clif_ty = match layout.abi {
167 Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
168 Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
169 .by(u32::try_from(count).unwrap())
171 _ => unreachable!("{:?}", layout.ty),
173 let mut flags = MemFlags::new();
175 ptr.load(fx, clif_ty, flags)
177 CValueInner::ByVal(value) => value,
178 CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
179 CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
183 /// Load a value pair with layout.abi of scalar pair
184 pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
187 CValueInner::ByRef(ptr, None) => {
188 let (a_scalar, b_scalar) = match layout.abi {
189 Abi::ScalarPair(a, b) => (a, b),
190 _ => unreachable!("load_scalar_pair({:?})", self),
192 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
193 let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
194 let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
195 let mut flags = MemFlags::new();
197 let val1 = ptr.load(fx, clif_ty1, flags);
198 let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
201 CValueInner::ByRef(_, Some(_)) => {
202 bug!("load_scalar_pair for unsized value not allowed")
204 CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
205 CValueInner::ByValPair(val1, val2) => (val1, val2),
209 pub(crate) fn value_field(
211 fx: &mut FunctionCx<'_, '_, 'tcx>,
216 CValueInner::ByVal(val) => match layout.abi {
217 Abi::Vector { element: _, count } => {
218 let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
219 let field = u8::try_from(field.index()).unwrap();
220 assert!(field < count);
221 let lane = fx.bcx.ins().extractlane(val, field);
222 let field_layout = layout.field(&*fx, usize::from(field));
223 CValue::by_val(lane, field_layout)
225 _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
227 CValueInner::ByValPair(val1, val2) => match layout.abi {
228 Abi::ScalarPair(_, _) => {
229 let val = match field.as_u32() {
232 _ => bug!("field should be 0 or 1"),
234 let field_layout = layout.field(&*fx, usize::from(field));
235 CValue::by_val(val, field_layout)
237 _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
239 CValueInner::ByRef(ptr, None) => {
240 let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
241 CValue::by_ref(field_ptr, field_layout)
243 CValueInner::ByRef(_, Some(_)) => todo!(),
247 /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
248 /// such that you can access individual lanes.
249 pub(crate) fn value_lane(
251 fx: &mut FunctionCx<'_, '_, 'tcx>,
255 assert!(layout.ty.is_simd());
256 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
257 let lane_layout = fx.layout_of(lane_ty);
258 assert!(lane_idx < lane_count);
260 CValueInner::ByVal(val) => match layout.abi {
261 Abi::Vector { element: _, count: _ } => {
262 assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
263 let lane_idx = u8::try_from(lane_idx).unwrap();
264 let lane = fx.bcx.ins().extractlane(val, lane_idx);
265 CValue::by_val(lane, lane_layout)
267 _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
269 CValueInner::ByValPair(_, _) => unreachable!(),
270 CValueInner::ByRef(ptr, None) => {
271 let field_offset = lane_layout.size * lane_idx;
272 let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
273 CValue::by_ref(field_ptr, lane_layout)
275 CValueInner::ByRef(_, Some(_)) => unreachable!(),
279 pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
280 crate::unsize::coerce_unsized_into(fx, self, dest);
283 pub(crate) fn coerce_dyn_star(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
284 crate::unsize::coerce_dyn_star(fx, self, dest);
287 /// If `ty` is signed, `const_val` must already be sign extended.
288 pub(crate) fn const_val(
289 fx: &mut FunctionCx<'_, '_, 'tcx>,
290 layout: TyAndLayout<'tcx>,
291 const_val: ty::ScalarInt,
293 assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
294 use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
296 let clif_ty = fx.clif_type(layout.ty).unwrap();
298 if let ty::Bool = layout.ty.kind() {
300 const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
301 "Invalid bool 0x{:032X}",
306 let val = match layout.ty.kind() {
307 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
308 let const_val = const_val.to_bits(layout.size).unwrap();
309 let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
310 let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
311 fx.bcx.ins().iconcat(lsb, msb)
313 ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
314 fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
316 ty::Float(FloatTy::F32) => {
317 fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
319 ty::Float(FloatTy::F64) => {
320 fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
323 "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
328 CValue::by_val(val, layout)
331 pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
332 assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
333 assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
334 assert_eq!(self.layout().abi, layout.abi);
335 CValue(self.0, layout)
339 /// A place where you can write a value to or read a value from
340 #[derive(Debug, Copy, Clone)]
341 pub(crate) struct CPlace<'tcx> {
343 layout: TyAndLayout<'tcx>,
346 #[derive(Debug, Copy, Clone)]
347 pub(crate) enum CPlaceInner {
348 Var(Local, Variable),
349 VarPair(Local, Variable, Variable),
350 VarLane(Local, Variable, u8),
351 Addr(Pointer, Option<Value>),
354 impl<'tcx> CPlace<'tcx> {
355 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
359 pub(crate) fn inner(&self) -> &CPlaceInner {
363 pub(crate) fn new_stack_slot(
364 fx: &mut FunctionCx<'_, '_, 'tcx>,
365 layout: TyAndLayout<'tcx>,
367 assert!(layout.is_sized());
368 if layout.size.bytes() == 0 {
370 inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
375 if layout.size.bytes() >= u64::from(u32::MAX - 16) {
378 .fatal(&format!("values of type {} are too big to store on the stack", layout.ty));
381 let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
382 kind: StackSlotKind::ExplicitSlot,
383 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
384 // specify stack slot alignment.
385 size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
387 CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
390 pub(crate) fn new_var(
391 fx: &mut FunctionCx<'_, '_, 'tcx>,
393 layout: TyAndLayout<'tcx>,
395 let var = Variable::from_u32(fx.next_ssa_var);
396 fx.next_ssa_var += 1;
397 fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
398 CPlace { inner: CPlaceInner::Var(local, var), layout }
401 pub(crate) fn new_var_pair(
402 fx: &mut FunctionCx<'_, '_, 'tcx>,
404 layout: TyAndLayout<'tcx>,
406 let var1 = Variable::from_u32(fx.next_ssa_var);
407 fx.next_ssa_var += 1;
408 let var2 = Variable::from_u32(fx.next_ssa_var);
409 fx.next_ssa_var += 1;
411 let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
412 fx.bcx.declare_var(var1, ty1);
413 fx.bcx.declare_var(var2, ty2);
414 CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
417 pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
418 CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
421 pub(crate) fn for_ptr_with_extra(
424 layout: TyAndLayout<'tcx>,
426 CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
429 pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
430 let layout = self.layout();
432 CPlaceInner::Var(_local, var) => {
433 let val = fx.bcx.use_var(var);
434 //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
435 CValue::by_val(val, layout)
437 CPlaceInner::VarPair(_local, var1, var2) => {
438 let val1 = fx.bcx.use_var(var1);
439 //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
440 let val2 = fx.bcx.use_var(var2);
441 //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
442 CValue::by_val_pair(val1, val2, layout)
444 CPlaceInner::VarLane(_local, var, lane) => {
445 let val = fx.bcx.use_var(var);
446 //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
447 let val = fx.bcx.ins().extractlane(val, lane);
448 CValue::by_val(val, layout)
450 CPlaceInner::Addr(ptr, extra) => {
451 if let Some(extra) = extra {
452 CValue::by_ref_unsized(ptr, extra, layout)
454 CValue::by_ref(ptr, layout)
460 pub(crate) fn to_ptr(self) -> Pointer {
461 match self.to_ptr_maybe_unsized() {
463 (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
467 pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
469 CPlaceInner::Addr(ptr, extra) => (ptr, extra),
470 CPlaceInner::Var(_, _)
471 | CPlaceInner::VarPair(_, _, _)
472 | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
476 pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
477 assert_assignable(fx, from.layout().ty, self.layout().ty, 16);
479 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
482 pub(crate) fn write_cvalue_transmute(
484 fx: &mut FunctionCx<'_, '_, 'tcx>,
487 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
490 fn write_cvalue_maybe_transmute(
492 fx: &mut FunctionCx<'_, '_, 'tcx>,
494 method: &'static str,
496 fn transmute_value<'tcx>(
497 fx: &mut FunctionCx<'_, '_, 'tcx>,
502 let src_ty = fx.bcx.func.dfg.value_type(data);
506 "write_cvalue_transmute: {:?} -> {:?}",
510 let data = match (src_ty, dst_ty) {
511 (_, _) if src_ty == dst_ty => data,
513 // This is a `write_cvalue_transmute`.
514 (types::I32, types::F32)
515 | (types::F32, types::I32)
516 | (types::I64, types::F64)
517 | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
518 _ if src_ty.is_vector() && dst_ty.is_vector() => fx.bcx.ins().bitcast(dst_ty, data),
519 _ if src_ty.is_vector() || dst_ty.is_vector() => {
520 // FIXME do something more efficient for transmutes between vectors and integers.
521 let stack_slot = fx.bcx.create_sized_stack_slot(StackSlotData {
522 kind: StackSlotKind::ExplicitSlot,
523 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
524 // specify stack slot alignment.
525 size: (src_ty.bytes() + 15) / 16 * 16,
527 let ptr = Pointer::stack_slot(stack_slot);
528 ptr.store(fx, data, MemFlags::trusted());
529 ptr.load(fx, dst_ty, MemFlags::trusted())
532 // `CValue`s should never contain SSA-only types, so if you ended
533 // up here having seen an error like `B1 -> I8`, then before
534 // calling `write_cvalue` you need to add a `bint` instruction.
535 _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
537 //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
538 fx.bcx.def_var(var, data);
541 assert_eq!(self.layout().size, from.layout().size);
543 if fx.clif_comments.enabled() {
544 use cranelift_codegen::cursor::{Cursor, CursorPosition};
545 let cur_block = match fx.bcx.cursor().position() {
546 CursorPosition::After(block) => block,
550 fx.bcx.func.layout.last_inst(cur_block).unwrap(),
552 "{}: {:?}: {:?} <- {:?}: {:?}",
562 let dst_layout = self.layout();
563 let to_ptr = match self.inner {
564 CPlaceInner::Var(_local, var) => {
565 if let ty::Array(element, len) = dst_layout.ty.kind() {
566 // Can only happen for vector types
568 u32::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
569 let vector_ty = fx.clif_type(*element).unwrap().by(len).unwrap();
571 let data = match from.0 {
572 CValueInner::ByRef(ptr, None) => {
573 let mut flags = MemFlags::new();
575 ptr.load(fx, vector_ty, flags)
577 CValueInner::ByVal(_)
578 | CValueInner::ByValPair(_, _)
579 | CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
582 fx.bcx.def_var(var, data);
585 let data = CValue(from.0, dst_layout).load_scalar(fx);
586 let dst_ty = fx.clif_type(self.layout().ty).unwrap();
587 transmute_value(fx, var, data, dst_ty);
590 CPlaceInner::VarPair(_local, var1, var2) => {
591 let (ptr, meta) = from.force_stack(fx);
592 assert!(meta.is_none());
594 CValue(CValueInner::ByRef(ptr, None), dst_layout).load_scalar_pair(fx);
595 let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
596 transmute_value(fx, var1, data1, dst_ty1);
597 transmute_value(fx, var2, data2, dst_ty2);
600 CPlaceInner::VarLane(_local, var, lane) => {
601 let data = from.load_scalar(fx);
603 // First get the old vector
604 let vector = fx.bcx.use_var(var);
605 //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
607 // Next insert the written lane into the vector
608 let vector = fx.bcx.ins().insertlane(vector, data, lane);
610 // Finally write the new vector
611 //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
612 fx.bcx.def_var(var, vector);
616 CPlaceInner::Addr(ptr, None) => {
617 if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
622 CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
625 let mut flags = MemFlags::new();
627 match from.layout().abi {
628 // FIXME make Abi::Vector work too
630 let val = from.load_scalar(fx);
631 to_ptr.store(fx, val, flags);
634 Abi::ScalarPair(a_scalar, b_scalar) => {
635 let (value, extra) = from.load_scalar_pair(fx);
636 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
637 to_ptr.store(fx, value, flags);
638 to_ptr.offset(fx, b_offset).store(fx, extra, flags);
645 CValueInner::ByVal(val) => {
646 to_ptr.store(fx, val, flags);
648 CValueInner::ByValPair(_, _) => {
649 bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
651 CValueInner::ByRef(from_ptr, None) => {
652 let from_addr = from_ptr.get_addr(fx);
653 let to_addr = to_ptr.get_addr(fx);
654 let src_layout = from.1;
655 let size = dst_layout.size.bytes();
656 let src_align = src_layout.align.abi.bytes() as u8;
657 let dst_align = dst_layout.align.abi.bytes() as u8;
658 fx.bcx.emit_small_memory_copy(
669 CValueInner::ByRef(_, Some(_)) => todo!(),
673 pub(crate) fn place_opaque_cast(
675 fx: &mut FunctionCx<'_, '_, 'tcx>,
678 CPlace { inner: self.inner, layout: fx.layout_of(ty) }
681 pub(crate) fn place_field(
683 fx: &mut FunctionCx<'_, '_, 'tcx>,
686 let layout = self.layout();
689 CPlaceInner::Var(local, var) => match layout.ty.kind() {
691 // Can only happen for vector types
693 inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
694 layout: layout.field(fx, field.as_u32().try_into().unwrap()),
697 ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
698 let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
702 assert_eq!(field.as_u32(), 0);
704 inner: CPlaceInner::Var(local, var),
705 layout: layout.field(fx, field.as_u32().try_into().unwrap()),
710 inner: CPlaceInner::VarLane(
713 field.as_u32().try_into().unwrap(),
715 layout: layout.field(fx, field.as_u32().try_into().unwrap()),
722 CPlaceInner::VarPair(local, var1, var2) => {
723 let layout = layout.field(&*fx, field.index());
725 match field.as_u32() {
726 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
727 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
728 _ => unreachable!("field should be 0 or 1"),
734 let (base, extra) = self.to_ptr_maybe_unsized();
736 let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
737 if field_layout.is_unsized() {
738 if let ty::Foreign(_) = field_layout.ty.kind() {
739 assert!(extra.is_none());
740 CPlace::for_ptr(field_ptr, field_layout)
742 CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
745 CPlace::for_ptr(field_ptr, field_layout)
749 /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
750 /// such that you can access individual lanes.
751 pub(crate) fn place_lane(
753 fx: &mut FunctionCx<'_, '_, 'tcx>,
756 let layout = self.layout();
757 assert!(layout.ty.is_simd());
758 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
759 let lane_layout = fx.layout_of(lane_ty);
760 assert!(lane_idx < lane_count);
763 CPlaceInner::Var(local, var) => {
764 assert!(matches!(layout.abi, Abi::Vector { .. }));
766 inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
770 CPlaceInner::VarPair(_, _, _) => unreachable!(),
771 CPlaceInner::VarLane(_, _, _) => unreachable!(),
772 CPlaceInner::Addr(ptr, None) => {
773 let field_offset = lane_layout.size * lane_idx;
774 let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
775 CPlace::for_ptr(field_ptr, lane_layout)
777 CPlaceInner::Addr(_, Some(_)) => unreachable!(),
781 pub(crate) fn place_index(
783 fx: &mut FunctionCx<'_, '_, 'tcx>,
786 let (elem_layout, ptr) = match self.layout().ty.kind() {
787 ty::Array(elem_ty, _) => (fx.layout_of(*elem_ty), self.to_ptr()),
788 ty::Slice(elem_ty) => (fx.layout_of(*elem_ty), self.to_ptr_maybe_unsized().0),
789 _ => bug!("place_index({:?})", self.layout().ty),
792 let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
794 CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
797 pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
798 let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
799 if has_ptr_meta(fx.tcx, inner_layout.ty) {
800 let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
801 CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
803 CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
807 pub(crate) fn place_ref(
809 fx: &mut FunctionCx<'_, '_, 'tcx>,
810 layout: TyAndLayout<'tcx>,
812 if has_ptr_meta(fx.tcx, self.layout().ty) {
813 let (ptr, extra) = self.to_ptr_maybe_unsized();
816 extra.expect("unsized type without metadata"),
820 CValue::by_val(self.to_ptr().get_addr(fx), layout)
824 pub(crate) fn downcast_variant(
826 fx: &FunctionCx<'_, '_, 'tcx>,
829 assert!(self.layout().is_sized());
830 let layout = self.layout().for_variant(fx, variant);
831 CPlace { inner: self.inner, layout }
836 pub(crate) fn assert_assignable<'tcx>(
837 fx: &FunctionCx<'_, '_, 'tcx>,
843 // assert_assignable exists solely to catch bugs in cg_clif. it isn't necessary for
844 // soundness. don't attempt to check deep types to avoid exponential behavior in certain
848 match (from_ty.kind(), to_ty.kind()) {
849 (ty::Ref(_, a, _), ty::Ref(_, b, _))
851 ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
852 ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
854 assert_assignable(fx, *a, *b, limit - 1);
856 (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
857 | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
858 assert_assignable(fx, *a, *b, limit - 1);
860 (ty::FnPtr(_), ty::FnPtr(_)) => {
861 let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
862 ParamEnv::reveal_all(),
863 from_ty.fn_sig(fx.tcx),
867 .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
870 "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
871 from_sig, to_sig, fx,
873 // fn(&T) -> for<'l> fn(&'l T) is allowed
875 (&ty::Dynamic(from_traits, _, _from_kind), &ty::Dynamic(to_traits, _, _to_kind)) => {
876 // FIXME(dyn-star): Do the right thing with DynKinds
877 for (from, to) in from_traits.iter().zip(to_traits) {
879 fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
880 let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
883 "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
884 from_traits, to_traits, fx,
887 // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
889 (&ty::Tuple(types_a), &ty::Tuple(types_b)) => {
890 let mut types_a = types_a.iter();
891 let mut types_b = types_b.iter();
893 match (types_a.next(), types_b.next()) {
894 (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
895 (None, None) => return,
896 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
900 (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b))
901 if adt_def_a.did() == adt_def_b.did() =>
903 let mut types_a = substs_a.types();
904 let mut types_b = substs_b.types();
906 match (types_a.next(), types_b.next()) {
907 (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
908 (None, None) => return,
909 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
913 (ty::Array(a, _), ty::Array(b, _)) => assert_assignable(fx, *a, *b, limit - 1),
914 (&ty::Closure(def_id_a, substs_a), &ty::Closure(def_id_b, substs_b))
915 if def_id_a == def_id_b =>
917 let mut types_a = substs_a.types();
918 let mut types_b = substs_b.types();
920 match (types_a.next(), types_b.next()) {
921 (Some(a), Some(b)) => assert_assignable(fx, a, b, limit - 1),
922 (None, None) => return,
923 (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty),
927 (ty::Param(_), _) | (_, ty::Param(_)) if fx.tcx.sess.opts.unstable_opts.polymorphize => {
928 // No way to check if it is correct or not with polymorphization enabled
934 "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",