1 //! Definition of [`CValue`] and [`CPlace`]
5 use cranelift_codegen::entity::EntityRef;
6 use cranelift_codegen::ir::immediates::Offset32;
8 fn codegen_field<'tcx>(
9 fx: &mut FunctionCx<'_, '_, 'tcx>,
12 layout: TyAndLayout<'tcx>,
14 ) -> (Pointer, TyAndLayout<'tcx>) {
15 let field_offset = layout.fields.offset(field.index());
16 let field_layout = layout.field(&*fx, field.index());
18 let simple = |fx: &mut FunctionCx<'_, '_, '_>| {
19 (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout)
22 if let Some(extra) = extra {
23 if !field_layout.is_unsized() {
26 match field_layout.ty.kind() {
27 ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx),
28 ty::Adt(def, _) if def.repr.packed() => {
29 assert_eq!(layout.align.abi.bytes(), 1);
33 // We have to align the offset for DST's
34 let unaligned_offset = field_offset.bytes();
35 let (_, unsized_align) =
36 crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
38 let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
39 let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
40 let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
41 let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
42 let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
43 let offset = fx.bcx.ins().band(and_lhs, and_rhs);
45 (base.offset_value(fx, offset), field_layout)
53 fn scalar_pair_calculate_b_offset(
58 let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
59 Offset32::new(b_offset.bytes().try_into().unwrap())
63 #[derive(Debug, Copy, Clone)]
64 pub(crate) struct CValue<'tcx>(CValueInner, TyAndLayout<'tcx>);
66 #[derive(Debug, Copy, Clone)]
68 ByRef(Pointer, Option<Value>),
70 ByValPair(Value, Value),
73 impl<'tcx> CValue<'tcx> {
74 pub(crate) fn by_ref(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
75 CValue(CValueInner::ByRef(ptr, None), layout)
78 pub(crate) fn by_ref_unsized(
81 layout: TyAndLayout<'tcx>,
83 CValue(CValueInner::ByRef(ptr, Some(meta)), layout)
86 pub(crate) fn by_val(value: Value, layout: TyAndLayout<'tcx>) -> CValue<'tcx> {
87 CValue(CValueInner::ByVal(value), layout)
90 pub(crate) fn by_val_pair(
93 layout: TyAndLayout<'tcx>,
95 CValue(CValueInner::ByValPair(value, extra), layout)
98 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
103 pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option<Value>) {
106 CValueInner::ByRef(ptr, meta) => (ptr, meta),
107 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
108 let cplace = CPlace::new_stack_slot(fx, layout);
109 cplace.write_cvalue(fx, self);
110 (cplace.to_ptr(), None)
115 pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option<Value>)> {
117 CValueInner::ByRef(ptr, meta) => Some((ptr, meta)),
118 CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
122 /// Load a value with layout.abi of scalar
123 pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
126 CValueInner::ByRef(ptr, None) => {
127 let clif_ty = match layout.abi {
128 Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
129 Abi::Vector { ref element, count } => {
130 scalar_to_clif_type(fx.tcx, element.clone())
131 .by(u16::try_from(count).unwrap())
134 _ => unreachable!("{:?}", layout.ty),
136 let mut flags = MemFlags::new();
138 ptr.load(fx, clif_ty, flags)
140 CValueInner::ByVal(value) => value,
141 CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"),
142 CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
146 /// Load a value pair with layout.abi of scalar pair
147 pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) {
150 CValueInner::ByRef(ptr, None) => {
151 let (a_scalar, b_scalar) = match &layout.abi {
152 Abi::ScalarPair(a, b) => (a, b),
153 _ => unreachable!("load_scalar_pair({:?})", self),
155 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
156 let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
157 let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
158 let mut flags = MemFlags::new();
160 let val1 = ptr.load(fx, clif_ty1, flags);
161 let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags);
164 CValueInner::ByRef(_, Some(_)) => {
165 bug!("load_scalar_pair for unsized value not allowed")
167 CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
168 CValueInner::ByValPair(val1, val2) => (val1, val2),
172 pub(crate) fn value_field(
174 fx: &mut FunctionCx<'_, '_, 'tcx>,
179 CValueInner::ByVal(val) => match layout.abi {
180 Abi::Vector { element: _, count } => {
181 let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
182 let field = u8::try_from(field.index()).unwrap();
183 assert!(field < count);
184 let lane = fx.bcx.ins().extractlane(val, field);
185 let field_layout = layout.field(&*fx, usize::from(field));
186 CValue::by_val(lane, field_layout)
188 _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
190 CValueInner::ByValPair(val1, val2) => match layout.abi {
191 Abi::ScalarPair(_, _) => {
192 let val = match field.as_u32() {
195 _ => bug!("field should be 0 or 1"),
197 let field_layout = layout.field(&*fx, usize::from(field));
198 CValue::by_val(val, field_layout)
200 _ => unreachable!("value_field for ByValPair with abi {:?}", layout.abi),
202 CValueInner::ByRef(ptr, None) => {
203 let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
204 CValue::by_ref(field_ptr, field_layout)
206 CValueInner::ByRef(_, Some(_)) => todo!(),
210 pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
211 crate::unsize::coerce_unsized_into(fx, self, dest);
214 /// If `ty` is signed, `const_val` must already be sign extended.
215 pub(crate) fn const_val(
216 fx: &mut FunctionCx<'_, '_, 'tcx>,
217 layout: TyAndLayout<'tcx>,
218 const_val: ty::ScalarInt,
220 assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout);
221 use cranelift_codegen::ir::immediates::{Ieee32, Ieee64};
223 let clif_ty = fx.clif_type(layout.ty).unwrap();
225 if let ty::Bool = layout.ty.kind() {
227 const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE,
228 "Invalid bool 0x{:032X}",
233 let val = match layout.ty.kind() {
234 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
235 let const_val = const_val.to_bits(layout.size).unwrap();
236 let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
237 let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
238 fx.bcx.ins().iconcat(lsb, msb)
240 ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => {
241 fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64)
243 ty::Float(FloatTy::F32) => {
244 fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap()))
246 ty::Float(FloatTy::F64) => {
247 fx.bcx.ins().f64const(Ieee64::with_bits(u64::try_from(const_val).unwrap()))
250 "CValue::const_val for non bool/char/float/integer/pointer type {:?} is not allowed",
255 CValue::by_val(val, layout)
258 pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self {
259 assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
260 assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..)));
261 assert_eq!(self.layout().abi, layout.abi);
262 CValue(self.0, layout)
266 /// A place where you can write a value to or read a value from
267 #[derive(Debug, Copy, Clone)]
268 pub(crate) struct CPlace<'tcx> {
270 layout: TyAndLayout<'tcx>,
273 #[derive(Debug, Copy, Clone)]
274 pub(crate) enum CPlaceInner {
275 Var(Local, Variable),
276 VarPair(Local, Variable, Variable),
277 VarLane(Local, Variable, u8),
278 Addr(Pointer, Option<Value>),
281 impl<'tcx> CPlace<'tcx> {
282 pub(crate) fn layout(&self) -> TyAndLayout<'tcx> {
286 pub(crate) fn inner(&self) -> &CPlaceInner {
290 pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
291 CPlace { inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), layout }
294 pub(crate) fn new_stack_slot(
295 fx: &mut FunctionCx<'_, '_, 'tcx>,
296 layout: TyAndLayout<'tcx>,
298 assert!(!layout.is_unsized());
299 if layout.size.bytes() == 0 {
300 return CPlace::no_place(layout);
303 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
304 kind: StackSlotKind::ExplicitSlot,
305 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
306 // specify stack slot alignment.
307 size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
310 CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
313 pub(crate) fn new_var(
314 fx: &mut FunctionCx<'_, '_, 'tcx>,
316 layout: TyAndLayout<'tcx>,
318 let var = Variable::with_u32(fx.next_ssa_var);
319 fx.next_ssa_var += 1;
320 fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap());
321 CPlace { inner: CPlaceInner::Var(local, var), layout }
324 pub(crate) fn new_var_pair(
325 fx: &mut FunctionCx<'_, '_, 'tcx>,
327 layout: TyAndLayout<'tcx>,
329 let var1 = Variable::with_u32(fx.next_ssa_var);
330 fx.next_ssa_var += 1;
331 let var2 = Variable::with_u32(fx.next_ssa_var);
332 fx.next_ssa_var += 1;
334 let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap();
335 fx.bcx.declare_var(var1, ty1);
336 fx.bcx.declare_var(var2, ty2);
337 CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout }
340 pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
341 CPlace { inner: CPlaceInner::Addr(ptr, None), layout }
344 pub(crate) fn for_ptr_with_extra(
347 layout: TyAndLayout<'tcx>,
349 CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout }
352 pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> {
353 let layout = self.layout();
355 CPlaceInner::Var(_local, var) => {
356 let val = fx.bcx.use_var(var);
357 //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
358 CValue::by_val(val, layout)
360 CPlaceInner::VarPair(_local, var1, var2) => {
361 let val1 = fx.bcx.use_var(var1);
362 //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index()));
363 let val2 = fx.bcx.use_var(var2);
364 //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index()));
365 CValue::by_val_pair(val1, val2, layout)
367 CPlaceInner::VarLane(_local, var, lane) => {
368 let val = fx.bcx.use_var(var);
369 //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index()));
370 let val = fx.bcx.ins().extractlane(val, lane);
371 CValue::by_val(val, layout)
373 CPlaceInner::Addr(ptr, extra) => {
374 if let Some(extra) = extra {
375 CValue::by_ref_unsized(ptr, extra, layout)
377 CValue::by_ref(ptr, layout)
383 pub(crate) fn to_ptr(self) -> Pointer {
384 match self.to_ptr_maybe_unsized() {
386 (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
390 pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option<Value>) {
392 CPlaceInner::Addr(ptr, extra) => (ptr, extra),
393 CPlaceInner::Var(_, _)
394 | CPlaceInner::VarPair(_, _, _)
395 | CPlaceInner::VarLane(_, _, _) => bug!("Expected CPlace::Addr, found {:?}", self),
399 pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) {
400 assert_assignable(fx, from.layout().ty, self.layout().ty);
402 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue");
405 pub(crate) fn write_cvalue_transmute(
407 fx: &mut FunctionCx<'_, '_, 'tcx>,
410 self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute");
413 fn write_cvalue_maybe_transmute(
415 fx: &mut FunctionCx<'_, '_, 'tcx>,
417 #[cfg_attr(not(debug_assertions), allow(unused_variables))] method: &'static str,
419 fn transmute_value<'tcx>(
420 fx: &mut FunctionCx<'_, '_, 'tcx>,
425 let src_ty = fx.bcx.func.dfg.value_type(data);
429 "write_cvalue_transmute: {:?} -> {:?}",
433 let data = match (src_ty, dst_ty) {
434 (_, _) if src_ty == dst_ty => data,
436 // This is a `write_cvalue_transmute`.
437 (types::I32, types::F32)
438 | (types::F32, types::I32)
439 | (types::I64, types::F64)
440 | (types::F64, types::I64) => fx.bcx.ins().bitcast(dst_ty, data),
441 _ if src_ty.is_vector() && dst_ty.is_vector() => {
442 fx.bcx.ins().raw_bitcast(dst_ty, data)
444 _ if src_ty.is_vector() || dst_ty.is_vector() => {
445 // FIXME do something more efficient for transmutes between vectors and integers.
446 let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
447 kind: StackSlotKind::ExplicitSlot,
448 // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
449 // specify stack slot alignment.
450 size: (src_ty.bytes() + 15) / 16 * 16,
453 let ptr = Pointer::stack_slot(stack_slot);
454 ptr.store(fx, data, MemFlags::trusted());
455 ptr.load(fx, dst_ty, MemFlags::trusted())
457 _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
459 //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
460 fx.bcx.def_var(var, data);
463 assert_eq!(self.layout().size, from.layout().size);
465 #[cfg(debug_assertions)]
467 use cranelift_codegen::cursor::{Cursor, CursorPosition};
468 let cur_block = match fx.bcx.cursor().position() {
469 CursorPosition::After(block) => block,
473 fx.bcx.func.layout.last_inst(cur_block).unwrap(),
475 "{}: {:?}: {:?} <- {:?}: {:?}",
485 let dst_layout = self.layout();
486 let to_ptr = match self.inner {
487 CPlaceInner::Var(_local, var) => {
488 let data = CValue(from.0, dst_layout).load_scalar(fx);
489 let dst_ty = fx.clif_type(self.layout().ty).unwrap();
490 transmute_value(fx, var, data, dst_ty);
493 CPlaceInner::VarPair(_local, var1, var2) => {
494 let (data1, data2) = CValue(from.0, dst_layout).load_scalar_pair(fx);
495 let (dst_ty1, dst_ty2) = fx.clif_pair_type(self.layout().ty).unwrap();
496 transmute_value(fx, var1, data1, dst_ty1);
497 transmute_value(fx, var2, data2, dst_ty2);
500 CPlaceInner::VarLane(_local, var, lane) => {
501 let data = from.load_scalar(fx);
503 // First get the old vector
504 let vector = fx.bcx.use_var(var);
505 //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
507 // Next insert the written lane into the vector
508 let vector = fx.bcx.ins().insertlane(vector, data, lane);
510 // Finally write the new vector
511 //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index()));
512 fx.bcx.def_var(var, vector);
516 CPlaceInner::Addr(ptr, None) => {
517 if dst_layout.size == Size::ZERO || dst_layout.abi == Abi::Uninhabited {
522 CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self),
525 let mut flags = MemFlags::new();
527 match from.layout().abi {
528 // FIXME make Abi::Vector work too
530 let val = from.load_scalar(fx);
531 to_ptr.store(fx, val, flags);
534 Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
535 let (value, extra) = from.load_scalar_pair(fx);
536 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
537 to_ptr.store(fx, value, flags);
538 to_ptr.offset(fx, b_offset).store(fx, extra, flags);
545 CValueInner::ByVal(val) => {
546 to_ptr.store(fx, val, flags);
548 CValueInner::ByValPair(_, _) => {
549 bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi);
551 CValueInner::ByRef(from_ptr, None) => {
552 let from_addr = from_ptr.get_addr(fx);
553 let to_addr = to_ptr.get_addr(fx);
554 let src_layout = from.1;
555 let size = dst_layout.size.bytes();
556 let src_align = src_layout.align.abi.bytes() as u8;
557 let dst_align = dst_layout.align.abi.bytes() as u8;
558 fx.bcx.emit_small_memory_copy(
559 fx.cx.module.target_config(),
568 CValueInner::ByRef(_, Some(_)) => todo!(),
572 pub(crate) fn place_field(
574 fx: &mut FunctionCx<'_, '_, 'tcx>,
577 let layout = self.layout();
580 CPlaceInner::Var(local, var) => {
581 if let Abi::Vector { .. } = layout.abi {
583 inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
584 layout: layout.field(fx, field.as_u32().try_into().unwrap()),
588 CPlaceInner::VarPair(local, var1, var2) => {
589 let layout = layout.field(&*fx, field.index());
591 match field.as_u32() {
592 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout },
593 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout },
594 _ => unreachable!("field should be 0 or 1"),
600 let (base, extra) = self.to_ptr_maybe_unsized();
602 let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
603 if field_layout.is_unsized() {
604 CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
606 CPlace::for_ptr(field_ptr, field_layout)
610 pub(crate) fn place_index(
612 fx: &mut FunctionCx<'_, '_, 'tcx>,
615 let (elem_layout, ptr) = match self.layout().ty.kind() {
616 ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr()),
617 ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized().0),
618 _ => bug!("place_index({:?})", self.layout().ty),
621 let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64);
623 CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
626 pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> {
627 let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
628 if has_ptr_meta(fx.tcx, inner_layout.ty) {
629 let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
630 CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
632 CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
636 pub(crate) fn place_ref(
638 fx: &mut FunctionCx<'_, '_, 'tcx>,
639 layout: TyAndLayout<'tcx>,
641 if has_ptr_meta(fx.tcx, self.layout().ty) {
642 let (ptr, extra) = self.to_ptr_maybe_unsized();
645 extra.expect("unsized type without metadata"),
649 CValue::by_val(self.to_ptr().get_addr(fx), layout)
653 pub(crate) fn downcast_variant(
655 fx: &FunctionCx<'_, '_, 'tcx>,
658 assert!(!self.layout().is_unsized());
659 let layout = self.layout().for_variant(fx, variant);
660 CPlace { inner: self.inner, layout }
665 pub(crate) fn assert_assignable<'tcx>(
666 fx: &FunctionCx<'_, '_, 'tcx>,
670 match (from_ty.kind(), to_ty.kind()) {
671 (ty::Ref(_, a, _), ty::Ref(_, b, _))
673 ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }),
674 ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }),
676 assert_assignable(fx, a, b);
678 (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }))
679 | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => {
680 assert_assignable(fx, a, b);
682 (ty::FnPtr(_), ty::FnPtr(_)) => {
683 let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
684 ParamEnv::reveal_all(),
685 from_ty.fn_sig(fx.tcx),
689 .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx));
692 "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}",
693 from_sig, to_sig, fx,
695 // fn(&T) -> for<'l> fn(&'l T) is allowed
697 (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => {
698 for (from, to) in from_traits.iter().zip(to_traits) {
700 fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from);
701 let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to);
704 "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
705 from_traits, to_traits, fx,
708 // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed
713 "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}",