X-Git-Url: https://git.lizzy.rs/?a=blobdiff_plain;ds=sidebyside;f=src%2Fvalue_and_place.rs;h=52541d5b4964015d866f6b456ea5216c8ff45549;hb=fdd0f8a3b559411650cb5ab4040d68033fa60d52;hp=847c5fc7870025f758fd73e3d4d1c9b3c5210719;hpb=452656b782dd66108c490beffe67902e3b4fdb74;p=rust.git diff --git a/src/value_and_place.rs b/src/value_and_place.rs index 847c5fc7870..52541d5b496 100644 --- a/src/value_and_place.rs +++ b/src/value_and_place.rs @@ -1,10 +1,11 @@ +//! Definition of [`CValue`] and [`CPlace`] + use crate::prelude::*; -use cranelift_codegen::entity::EntityRef; use cranelift_codegen::ir::immediates::Offset32; fn codegen_field<'tcx>( - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, base: Pointer, extra: Option, layout: TyAndLayout<'tcx>, @@ -13,22 +14,19 @@ fn codegen_field<'tcx>( let field_offset = layout.fields.offset(field.index()); let field_layout = layout.field(&*fx, field.index()); - let simple = |fx: &mut FunctionCx<'_, '_, _>| { - ( - base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), - field_layout, - ) + let simple = |fx: &mut FunctionCx<'_, '_, '_>| { + (base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()), field_layout) }; if let Some(extra) = extra { if !field_layout.is_unsized() { return simple(fx); } - match field_layout.ty.kind { - ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(fx), + match field_layout.ty.kind() { + ty::Slice(..) | ty::Str | ty::Foreign(..) => simple(fx), ty::Adt(def, _) if def.repr.packed() => { assert_eq!(layout.align.abi.bytes(), 1); - return simple(fx); + simple(fx) } _ => { // We have to align the offset for DST's @@ -36,10 +34,10 @@ fn codegen_field<'tcx>( let (_, unsized_align) = crate::unsize::size_and_align_of_dst(fx, field_layout, extra); - let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1); + let one = fx.bcx.ins().iconst(fx.pointer_type, 1); let align_sub_1 = fx.bcx.ins().isub(unsized_align, one); let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64); - let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0); + let zero = fx.bcx.ins().iconst(fx.pointer_type, 0); let and_rhs = fx.bcx.ins().isub(zero, unsized_align); let offset = fx.bcx.ins().band(and_lhs, and_rhs); @@ -51,15 +49,8 @@ fn codegen_field<'tcx>( } } -fn scalar_pair_calculate_b_offset( - tcx: TyCtxt<'_>, - a_scalar: &Scalar, - b_scalar: &Scalar, -) -> Offset32 { - let b_offset = a_scalar - .value - .size(&tcx) - .align_to(b_scalar.value.align(&tcx).abi); +fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 { + let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi); Offset32::new(b_offset.bytes().try_into().unwrap()) } @@ -104,10 +95,7 @@ pub(crate) fn layout(&self) -> TyAndLayout<'tcx> { } // FIXME remove - pub(crate) fn force_stack( - self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, - ) -> (Pointer, Option) { + pub(crate) fn force_stack(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Pointer, Option) { let layout = self.1; match self.0 { CValueInner::ByRef(ptr, meta) => (ptr, meta), @@ -127,20 +115,20 @@ pub(crate) fn try_to_ptr(self) -> Option<(Pointer, Option)> { } /// Load a value with layout.abi of scalar - pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Value { + pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value { let layout = self.1; match self.0 { CValueInner::ByRef(ptr, None) => { let clif_ty = match layout.abi { - Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()), - Abi::Vector { ref element, count } => { - scalar_to_clif_type(fx.tcx, element.clone()) - .by(u16::try_from(count).unwrap()) - .unwrap() - } + Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar), + Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element) + .by(u16::try_from(count).unwrap()) + .unwrap(), _ => unreachable!("{:?}", layout.ty), }; - ptr.load(fx, clif_ty, MemFlags::new()) + let mut flags = MemFlags::new(); + flags.set_notrap(); + ptr.load(fx, clif_ty, flags) } CValueInner::ByVal(value) => value, CValueInner::ByRef(_, Some(_)) => bug!("load_scalar for unsized value not allowed"), @@ -149,22 +137,21 @@ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> } /// Load a value pair with layout.abi of scalar pair - pub(crate) fn load_scalar_pair( - self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, - ) -> (Value, Value) { + pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Value, Value) { let layout = self.1; match self.0 { CValueInner::ByRef(ptr, None) => { - let (a_scalar, b_scalar) = match &layout.abi { + let (a_scalar, b_scalar) = match layout.abi { Abi::ScalarPair(a, b) => (a, b), _ => unreachable!("load_scalar_pair({:?})", self), }; let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); - let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone()); - let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone()); - let val1 = ptr.load(fx, clif_ty1, MemFlags::new()); - let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, MemFlags::new()); + let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar); + let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar); + let mut flags = MemFlags::new(); + flags.set_notrap(); + let val1 = ptr.load(fx, clif_ty1, flags); + let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, flags); (val1, val2) } CValueInner::ByRef(_, Some(_)) => { @@ -177,7 +164,7 @@ pub(crate) fn load_scalar_pair( pub(crate) fn value_field( self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, field: mir::Field, ) -> CValue<'tcx> { let layout = self.1; @@ -213,50 +200,70 @@ pub(crate) fn value_field( } } - pub(crate) fn unsize_value( + /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way + /// such that you can access individual lanes. + pub(crate) fn value_lane( self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, - dest: CPlace<'tcx>, - ) { + fx: &mut FunctionCx<'_, '_, 'tcx>, + lane_idx: u64, + ) -> CValue<'tcx> { + let layout = self.1; + assert!(layout.ty.is_simd()); + let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx); + let lane_layout = fx.layout_of(lane_ty); + assert!(lane_idx < lane_count); + match self.0 { + CValueInner::ByVal(val) => match layout.abi { + Abi::Vector { element: _, count: _ } => { + assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???"); + let lane_idx = u8::try_from(lane_idx).unwrap(); + let lane = fx.bcx.ins().extractlane(val, lane_idx); + CValue::by_val(lane, lane_layout) + } + _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi), + }, + CValueInner::ByValPair(_, _) => unreachable!(), + CValueInner::ByRef(ptr, None) => { + let field_offset = lane_layout.size * lane_idx; + let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()); + CValue::by_ref(field_ptr, lane_layout) + } + CValueInner::ByRef(_, Some(_)) => unreachable!(), + } + } + + pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) { crate::unsize::coerce_unsized_into(fx, self, dest); } /// If `ty` is signed, `const_val` must already be sign extended. pub(crate) fn const_val( - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, layout: TyAndLayout<'tcx>, - const_val: u128, + const_val: ty::ScalarInt, ) -> CValue<'tcx> { + assert_eq!(const_val.size(), layout.size, "{:#?}: {:?}", const_val, layout); use cranelift_codegen::ir::immediates::{Ieee32, Ieee64}; let clif_ty = fx.clif_type(layout.ty).unwrap(); - match layout.ty.kind { - ty::Bool => { - assert!( - const_val == 0 || const_val == 1, - "Invalid bool 0x{:032X}", - const_val - ); - } - _ => {} + if let ty::Bool = layout.ty.kind() { + assert!( + const_val == ty::ScalarInt::FALSE || const_val == ty::ScalarInt::TRUE, + "Invalid bool 0x{:032X}", + const_val + ); } - let val = match layout.ty.kind { + let val = match layout.ty.kind() { ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => { + let const_val = const_val.to_bits(layout.size).unwrap(); let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64); - let msb = fx - .bcx - .ins() - .iconst(types::I64, (const_val >> 64) as u64 as i64); + let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64); fx.bcx.ins().iconcat(lsb, msb) } - ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) - | ty::RawPtr(..) => { - fx - .bcx - .ins() - .iconst(clif_ty, u64::try_from(const_val).expect("uint") as i64) + ty::Bool | ty::Char | ty::Uint(_) | ty::Int(_) | ty::Ref(..) | ty::RawPtr(..) => { + fx.bcx.ins().iconst(clif_ty, const_val.to_bits(layout.size).unwrap() as i64) } ty::Float(FloatTy::F32) => { fx.bcx.ins().f32const(Ieee32::with_bits(u32::try_from(const_val).unwrap())) @@ -274,14 +281,8 @@ pub(crate) fn const_val( } pub(crate) fn cast_pointer_to(self, layout: TyAndLayout<'tcx>) -> Self { - assert!(matches!( - self.layout().ty.kind, - ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) - )); - assert!(matches!( - layout.ty.kind, - ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..) - )); + assert!(matches!(self.layout().ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..))); + assert!(matches!(layout.ty.kind(), ty::Ref(..) | ty::RawPtr(..) | ty::FnPtr(..))); assert_eq!(self.layout().abi, layout.abi); CValue(self.0, layout) } @@ -311,49 +312,40 @@ pub(crate) fn inner(&self) -> &CPlaceInner { &self.inner } - pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> { - CPlace { - inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), - layout, - } - } - pub(crate) fn new_stack_slot( - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, layout: TyAndLayout<'tcx>, ) -> CPlace<'tcx> { assert!(!layout.is_unsized()); if layout.size.bytes() == 0 { - return CPlace::no_place(layout); + return CPlace { + inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), + layout, + }; } let stack_slot = fx.bcx.create_stack_slot(StackSlotData { kind: StackSlotKind::ExplicitSlot, - size: layout.size.bytes() as u32, - offset: None, + // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to + // specify stack slot alignment. + size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16, }); - CPlace { - inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), - layout, - } + CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout } } pub(crate) fn new_var( - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, local: Local, layout: TyAndLayout<'tcx>, ) -> CPlace<'tcx> { let var = Variable::with_u32(fx.next_ssa_var); fx.next_ssa_var += 1; fx.bcx.declare_var(var, fx.clif_type(layout.ty).unwrap()); - CPlace { - inner: CPlaceInner::Var(local, var), - layout, - } + CPlace { inner: CPlaceInner::Var(local, var), layout } } pub(crate) fn new_var_pair( - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, local: Local, layout: TyAndLayout<'tcx>, ) -> CPlace<'tcx> { @@ -365,17 +357,11 @@ pub(crate) fn new_var_pair( let (ty1, ty2) = fx.clif_pair_type(layout.ty).unwrap(); fx.bcx.declare_var(var1, ty1); fx.bcx.declare_var(var2, ty2); - CPlace { - inner: CPlaceInner::VarPair(local, var1, var2), - layout, - } + CPlace { inner: CPlaceInner::VarPair(local, var1, var2), layout } } pub(crate) fn for_ptr(ptr: Pointer, layout: TyAndLayout<'tcx>) -> CPlace<'tcx> { - CPlace { - inner: CPlaceInner::Addr(ptr, None), - layout, - } + CPlace { inner: CPlaceInner::Addr(ptr, None), layout } } pub(crate) fn for_ptr_with_extra( @@ -383,34 +369,27 @@ pub(crate) fn for_ptr_with_extra( extra: Value, layout: TyAndLayout<'tcx>, ) -> CPlace<'tcx> { - CPlace { - inner: CPlaceInner::Addr(ptr, Some(extra)), - layout, - } + CPlace { inner: CPlaceInner::Addr(ptr, Some(extra)), layout } } - pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> CValue<'tcx> { + pub(crate) fn to_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CValue<'tcx> { let layout = self.layout(); match self.inner { CPlaceInner::Var(_local, var) => { let val = fx.bcx.use_var(var); - fx.bcx - .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index())); + //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index())); CValue::by_val(val, layout) } CPlaceInner::VarPair(_local, var1, var2) => { let val1 = fx.bcx.use_var(var1); - fx.bcx - .set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index())); + //fx.bcx.set_val_label(val1, cranelift_codegen::ir::ValueLabel::new(var1.index())); let val2 = fx.bcx.use_var(var2); - fx.bcx - .set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index())); + //fx.bcx.set_val_label(val2, cranelift_codegen::ir::ValueLabel::new(var2.index())); CValue::by_val_pair(val1, val2, layout) } CPlaceInner::VarLane(_local, var, lane) => { let val = fx.bcx.use_var(var); - fx.bcx - .set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index())); + //fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::new(var.index())); let val = fx.bcx.ins().extractlane(val, lane); CValue::by_val(val, layout) } @@ -440,67 +419,7 @@ pub(crate) fn to_ptr_maybe_unsized(self) -> (Pointer, Option) { } } - pub(crate) fn write_cvalue( - self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, - from: CValue<'tcx>, - ) { - fn assert_assignable<'tcx>( - fx: &FunctionCx<'_, 'tcx, impl Backend>, - from_ty: Ty<'tcx>, - to_ty: Ty<'tcx>, - ) { - match (&from_ty.kind, &to_ty.kind) { - (ty::Ref(_, a, _), ty::Ref(_, b, _)) - | ( - ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), - ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }), - ) => { - assert_assignable(fx, a, b); - } - (ty::FnPtr(_), ty::FnPtr(_)) => { - let from_sig = fx.tcx.normalize_erasing_late_bound_regions( - ParamEnv::reveal_all(), - &from_ty.fn_sig(fx.tcx), - ); - let to_sig = fx.tcx.normalize_erasing_late_bound_regions( - ParamEnv::reveal_all(), - &to_ty.fn_sig(fx.tcx), - ); - assert_eq!( - from_sig, to_sig, - "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}", - from_sig, to_sig, fx, - ); - // fn(&T) -> for<'l> fn(&'l T) is allowed - } - (ty::Dynamic(from_traits, _), ty::Dynamic(to_traits, _)) => { - let from_traits = fx - .tcx - .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from_traits); - let to_traits = fx - .tcx - .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_traits); - assert_eq!( - from_traits, to_traits, - "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}", - from_traits, to_traits, fx, - ); - // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed - } - _ => { - assert_eq!( - from_ty, - to_ty, - "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}", - from_ty, - to_ty, - fx, - ); - } - } - } - + pub(crate) fn write_cvalue(self, fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>) { assert_assignable(fx, from.layout().ty, self.layout().ty); self.write_cvalue_maybe_transmute(fx, from, "write_cvalue"); @@ -508,7 +427,7 @@ fn assert_assignable<'tcx>( pub(crate) fn write_cvalue_transmute( self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>, ) { self.write_cvalue_maybe_transmute(fx, from, "write_cvalue_transmute"); @@ -516,17 +435,24 @@ pub(crate) fn write_cvalue_transmute( fn write_cvalue_maybe_transmute( self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, from: CValue<'tcx>, - #[cfg_attr(not(debug_assertions), allow(unused_variables))] method: &'static str, + method: &'static str, ) { fn transmute_value<'tcx>( - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, var: Variable, data: Value, dst_ty: Type, ) { let src_ty = fx.bcx.func.dfg.value_type(data); + assert_eq!( + src_ty.bytes(), + dst_ty.bytes(), + "write_cvalue_transmute: {:?} -> {:?}", + src_ty, + dst_ty, + ); let data = match (src_ty, dst_ty) { (_, _) if src_ty == dst_ty => data, @@ -538,17 +464,31 @@ fn transmute_value<'tcx>( _ if src_ty.is_vector() && dst_ty.is_vector() => { fx.bcx.ins().raw_bitcast(dst_ty, data) } + _ if src_ty.is_vector() || dst_ty.is_vector() => { + // FIXME do something more efficient for transmutes between vectors and integers. + let stack_slot = fx.bcx.create_stack_slot(StackSlotData { + kind: StackSlotKind::ExplicitSlot, + // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to + // specify stack slot alignment. + size: (src_ty.bytes() + 15) / 16 * 16, + }); + let ptr = Pointer::stack_slot(stack_slot); + ptr.store(fx, data, MemFlags::trusted()); + ptr.load(fx, dst_ty, MemFlags::trusted()) + } + + // `CValue`s should never contain SSA-only types, so if you ended + // up here having seen an error like `B1 -> I8`, then before + // calling `write_cvalue` you need to add a `bint` instruction. _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty), }; - fx.bcx - .set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index())); + //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index())); fx.bcx.def_var(var, data); } assert_eq!(self.layout().size, from.layout().size); - #[cfg(debug_assertions)] - { + if fx.clif_comments.enabled() { use cranelift_codegen::cursor::{Cursor, CursorPosition}; let cur_block = match fx.bcx.cursor().position() { CursorPosition::After(block) => block, @@ -570,6 +510,26 @@ fn transmute_value<'tcx>( let dst_layout = self.layout(); let to_ptr = match self.inner { CPlaceInner::Var(_local, var) => { + if let ty::Array(element, len) = dst_layout.ty.kind() { + // Can only happen for vector types + let len = + u16::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap(); + let vector_ty = fx.clif_type(element).unwrap().by(len).unwrap(); + + let data = match from.0 { + CValueInner::ByRef(ptr, None) => { + let mut flags = MemFlags::new(); + flags.set_notrap(); + ptr.load(fx, vector_ty, flags) + } + CValueInner::ByVal(_) + | CValueInner::ByValPair(_, _) + | CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"), + }; + + fx.bcx.def_var(var, data); + return; + } let data = CValue(from.0, dst_layout).load_scalar(fx); let dst_ty = fx.clif_type(self.layout().ty).unwrap(); transmute_value(fx, var, data, dst_ty); @@ -587,15 +547,13 @@ fn transmute_value<'tcx>( // First get the old vector let vector = fx.bcx.use_var(var); - fx.bcx - .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index())); + //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index())); // Next insert the written lane into the vector let vector = fx.bcx.ins().insertlane(vector, data, lane); // Finally write the new vector - fx.bcx - .set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index())); + //fx.bcx.set_val_label(vector, cranelift_codegen::ir::ValueLabel::new(var.index())); fx.bcx.def_var(var, vector); return; @@ -609,20 +567,20 @@ fn transmute_value<'tcx>( CPlaceInner::Addr(_, Some(_)) => bug!("Can't write value to unsized place {:?}", self), }; + let mut flags = MemFlags::new(); + flags.set_notrap(); match from.layout().abi { // FIXME make Abi::Vector work too Abi::Scalar(_) => { let val = from.load_scalar(fx); - to_ptr.store(fx, val, MemFlags::new()); + to_ptr.store(fx, val, flags); return; } - Abi::ScalarPair(ref a_scalar, ref b_scalar) => { + Abi::ScalarPair(a_scalar, b_scalar) => { let (value, extra) = from.load_scalar_pair(fx); let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar); - to_ptr.store(fx, value, MemFlags::new()); - to_ptr - .offset(fx, b_offset) - .store(fx, extra, MemFlags::new()); + to_ptr.store(fx, value, flags); + to_ptr.offset(fx, b_offset).store(fx, extra, flags); return; } _ => {} @@ -630,13 +588,10 @@ fn transmute_value<'tcx>( match from.0 { CValueInner::ByVal(val) => { - to_ptr.store(fx, val, MemFlags::new()); + to_ptr.store(fx, val, flags); } CValueInner::ByValPair(_, _) => { - bug!( - "Non ScalarPair abi {:?} for ByValPair CValue", - dst_layout.abi - ); + bug!("Non ScalarPair abi {:?} for ByValPair CValue", dst_layout.abi); } CValueInner::ByRef(from_ptr, None) => { let from_addr = from_ptr.get_addr(fx); @@ -646,13 +601,14 @@ fn transmute_value<'tcx>( let src_align = src_layout.align.abi.bytes() as u8; let dst_align = dst_layout.align.abi.bytes() as u8; fx.bcx.emit_small_memory_copy( - fx.cx.module.target_config(), + fx.target_config, to_addr, from_addr, size, dst_align, src_align, true, + MemFlags::trusted(), ); } CValueInner::ByRef(_, Some(_)) => todo!(), @@ -661,36 +617,51 @@ fn transmute_value<'tcx>( pub(crate) fn place_field( self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, field: mir::Field, ) -> CPlace<'tcx> { let layout = self.layout(); match self.inner { - CPlaceInner::Var(local, var) => { - if let Abi::Vector { .. } = layout.abi { + CPlaceInner::Var(local, var) => match layout.ty.kind() { + ty::Array(_, _) => { + // Can only happen for vector types return CPlace { inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()), layout: layout.field(fx, field.as_u32().try_into().unwrap()), }; } - } + ty::Adt(adt_def, substs) if layout.ty.is_simd() => { + let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs); + + match f0_ty.kind() { + ty::Array(_, _) => { + assert_eq!(field.as_u32(), 0); + return CPlace { + inner: CPlaceInner::Var(local, var), + layout: layout.field(fx, field.as_u32().try_into().unwrap()), + }; + } + _ => { + return CPlace { + inner: CPlaceInner::VarLane( + local, + var, + field.as_u32().try_into().unwrap(), + ), + layout: layout.field(fx, field.as_u32().try_into().unwrap()), + }; + } + } + } + _ => {} + }, CPlaceInner::VarPair(local, var1, var2) => { let layout = layout.field(&*fx, field.index()); match field.as_u32() { - 0 => { - return CPlace { - inner: CPlaceInner::Var(local, var1), - layout, - } - } - 1 => { - return CPlace { - inner: CPlaceInner::Var(local, var2), - layout, - } - } + 0 => return CPlace { inner: CPlaceInner::Var(local, var1), layout }, + 1 => return CPlace { inner: CPlaceInner::Var(local, var2), layout }, _ => unreachable!("field should be 0 or 1"), } } @@ -707,41 +678,67 @@ pub(crate) fn place_field( } } + /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way + /// such that you can access individual lanes. + pub(crate) fn place_lane( + self, + fx: &mut FunctionCx<'_, '_, 'tcx>, + lane_idx: u64, + ) -> CPlace<'tcx> { + let layout = self.layout(); + assert!(layout.ty.is_simd()); + let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx); + let lane_layout = fx.layout_of(lane_ty); + assert!(lane_idx < lane_count); + + match self.inner { + CPlaceInner::Var(local, var) => { + assert!(matches!(layout.abi, Abi::Vector { .. })); + CPlace { + inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()), + layout: lane_layout, + } + } + CPlaceInner::VarPair(_, _, _) => unreachable!(), + CPlaceInner::VarLane(_, _, _) => unreachable!(), + CPlaceInner::Addr(ptr, None) => { + let field_offset = lane_layout.size * lane_idx; + let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()); + CPlace::for_ptr(field_ptr, lane_layout) + } + CPlaceInner::Addr(_, Some(_)) => unreachable!(), + } + } + pub(crate) fn place_index( self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, index: Value, ) -> CPlace<'tcx> { - let (elem_layout, ptr) = match self.layout().ty.kind { + let (elem_layout, ptr) = match self.layout().ty.kind() { ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr()), ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized().0), _ => bug!("place_index({:?})", self.layout().ty), }; - let offset = fx - .bcx - .ins() - .imul_imm(index, elem_layout.size.bytes() as i64); + let offset = fx.bcx.ins().imul_imm(index, elem_layout.size.bytes() as i64); CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout) } - pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> CPlace<'tcx> { + pub(crate) fn place_deref(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> CPlace<'tcx> { let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty); if has_ptr_meta(fx.tcx, inner_layout.ty) { let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx); CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout) } else { - CPlace::for_ptr( - Pointer::new(self.to_cvalue(fx).load_scalar(fx)), - inner_layout, - ) + CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout) } } pub(crate) fn place_ref( self, - fx: &mut FunctionCx<'_, 'tcx, impl Backend>, + fx: &mut FunctionCx<'_, '_, 'tcx>, layout: TyAndLayout<'tcx>, ) -> CValue<'tcx> { if has_ptr_meta(fx.tcx, self.layout().ty) { @@ -758,14 +755,80 @@ pub(crate) fn place_ref( pub(crate) fn downcast_variant( self, - fx: &FunctionCx<'_, 'tcx, impl Backend>, + fx: &FunctionCx<'_, '_, 'tcx>, variant: VariantIdx, ) -> Self { assert!(!self.layout().is_unsized()); let layout = self.layout().for_variant(fx, variant); - CPlace { - inner: self.inner, - layout, + CPlace { inner: self.inner, layout } + } +} + +#[track_caller] +pub(crate) fn assert_assignable<'tcx>( + fx: &FunctionCx<'_, '_, 'tcx>, + from_ty: Ty<'tcx>, + to_ty: Ty<'tcx>, +) { + match (from_ty.kind(), to_ty.kind()) { + (ty::Ref(_, a, _), ty::Ref(_, b, _)) + | ( + ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), + ty::RawPtr(TypeAndMut { ty: b, mutbl: _ }), + ) => { + assert_assignable(fx, a, b); + } + (ty::Ref(_, a, _), ty::RawPtr(TypeAndMut { ty: b, mutbl: _ })) + | (ty::RawPtr(TypeAndMut { ty: a, mutbl: _ }), ty::Ref(_, b, _)) => { + assert_assignable(fx, a, b); + } + (ty::FnPtr(_), ty::FnPtr(_)) => { + let from_sig = fx.tcx.normalize_erasing_late_bound_regions( + ParamEnv::reveal_all(), + from_ty.fn_sig(fx.tcx), + ); + let to_sig = fx + .tcx + .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_ty.fn_sig(fx.tcx)); + assert_eq!( + from_sig, to_sig, + "Can't write fn ptr with incompatible sig {:?} to place with sig {:?}\n\n{:#?}", + from_sig, to_sig, fx, + ); + // fn(&T) -> for<'l> fn(&'l T) is allowed + } + (&ty::Dynamic(from_traits, _), &ty::Dynamic(to_traits, _)) => { + for (from, to) in from_traits.iter().zip(to_traits) { + let from = + fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from); + let to = fx.tcx.normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to); + assert_eq!( + from, to, + "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}", + from_traits, to_traits, fx, + ); + } + // dyn for<'r> Trait<'r> -> dyn Trait<'_> is allowed + } + (&ty::Adt(adt_def_a, substs_a), &ty::Adt(adt_def_b, substs_b)) + if adt_def_a.did == adt_def_b.did => + { + let mut types_a = substs_a.types(); + let mut types_b = substs_b.types(); + loop { + match (types_a.next(), types_b.next()) { + (Some(a), Some(b)) => assert_assignable(fx, a, b), + (None, None) => return, + (Some(_), None) | (None, Some(_)) => panic!("{:#?}/{:#?}", from_ty, to_ty), + } + } + } + _ => { + assert_eq!( + from_ty, to_ty, + "Can't write value with incompatible type {:?} to place with type {:?}\n\n{:#?}", + from_ty, to_ty, fx, + ); } } }