]> git.lizzy.rs Git - rust.git/blobdiff - src/value_and_place.rs
Make it possible it use value_field for SIMD values stored ByVal
[rust.git] / src / value_and_place.rs
index b1da6e2b4ea0c1a4af538675162c57d9a059fa07..cb23c814b5d2d6bd038c6821a85da12de075e2eb 100644 (file)
@@ -1,26 +1,63 @@
 use crate::prelude::*;
 
+use cranelift_codegen::ir::immediates::Offset32;
+
 fn codegen_field<'tcx>(
     fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
-    base: Value,
+    base: Pointer,
+    extra: Option<Value>,
     layout: TyLayout<'tcx>,
     field: mir::Field,
-) -> (Value, TyLayout<'tcx>) {
+) -> (Pointer, TyLayout<'tcx>) {
     let field_offset = layout.fields.offset(field.index());
-    let field_ty = layout.field(&*fx, field.index());
-    if field_offset.bytes() > 0 {
+    let field_layout = layout.field(&*fx, field.index());
+
+    let simple = |fx: &mut FunctionCx<_>| {
         (
-            fx.bcx.ins().iadd_imm(base, field_offset.bytes() as i64),
-            field_ty,
+            base.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap()),
+            field_layout,
         )
+    };
+
+    if let Some(extra) = extra {
+        if !field_layout.is_unsized() {
+            return simple(fx);
+        }
+        match field_layout.ty.kind {
+            ty::Slice(..) | ty::Str | ty::Foreign(..) => return simple(fx),
+            ty::Adt(def, _) if def.repr.packed() => {
+                assert_eq!(layout.align.abi.bytes(), 1);
+                return simple(fx);
+            }
+            _ => {
+                // We have to align the offset for DST's
+                let unaligned_offset = field_offset.bytes();
+                let (_, unsized_align) = crate::unsize::size_and_align_of_dst(fx, field_layout.ty, extra);
+
+                let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
+                let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
+                let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
+                let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
+                let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
+                let offset = fx.bcx.ins().band(and_lhs, and_rhs);
+
+                (
+                    base.offset_value(fx, offset),
+                    field_layout,
+                )
+            }
+        }
     } else {
-        (base, field_ty)
+        simple(fx)
     }
 }
 
-fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: &Scalar, b_scalar: &Scalar) -> i32 {
-    let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
-    b_offset.bytes().try_into().unwrap()
+fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: &Scalar, b_scalar: &Scalar) -> Offset32 {
+    let b_offset = a_scalar
+        .value
+        .size(&tcx)
+        .align_to(b_scalar.value.align(&tcx).abi);
+    Offset32::new(b_offset.bytes().try_into().unwrap())
 }
 
 /// A read-only value
@@ -29,14 +66,14 @@ fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: &Scalar, b_scalar:
 
 #[derive(Debug, Copy, Clone)]
 enum CValueInner {
-    ByRef(Value),
+    ByRef(Pointer),
     ByVal(Value),
     ByValPair(Value, Value),
 }
 
 impl<'tcx> CValue<'tcx> {
-    pub fn by_ref(value: Value, layout: TyLayout<'tcx>) -> CValue<'tcx> {
-        CValue(CValueInner::ByRef(value), layout)
+    pub fn by_ref(ptr: Pointer, layout: TyLayout<'tcx>) -> CValue<'tcx> {
+        CValue(CValueInner::ByRef(ptr), layout)
     }
 
     pub fn by_val(value: Value, layout: TyLayout<'tcx>) -> CValue<'tcx> {
@@ -51,21 +88,31 @@ pub fn layout(&self) -> TyLayout<'tcx> {
         self.1
     }
 
-    pub fn force_stack<'a>(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Value {
+    pub fn force_stack<'a>(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Pointer {
         let layout = self.1;
         match self.0 {
-            CValueInner::ByRef(value) => value,
+            CValueInner::ByRef(ptr) => ptr,
             CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => {
                 let cplace = CPlace::new_stack_slot(fx, layout.ty);
                 cplace.write_cvalue(fx, self);
-                cplace.to_addr(fx)
+                cplace.to_ptr(fx)
             }
         }
     }
 
     pub fn try_to_addr(self) -> Option<Value> {
         match self.0 {
-            CValueInner::ByRef(addr) => Some(addr),
+            CValueInner::ByRef(ptr) => {
+                if let Some((base_addr, offset)) = ptr.try_get_addr_and_offset() {
+                    if offset == Offset32::new(0) {
+                        Some(base_addr)
+                    } else {
+                        None
+                    }
+                } else {
+                    None
+                }
+            }
             CValueInner::ByVal(_) | CValueInner::ByValPair(_, _) => None,
         }
     }
@@ -74,13 +121,16 @@ pub fn try_to_addr(self) -> Option<Value> {
     pub fn load_scalar<'a>(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Value {
         let layout = self.1;
         match self.0 {
-            CValueInner::ByRef(addr) => {
-                let scalar = match layout.abi {
-                    layout::Abi::Scalar(ref scalar) => scalar.clone(),
+            CValueInner::ByRef(ptr) => {
+                let clif_ty = match layout.abi {
+                    layout::Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
+                    layout::Abi::Vector { ref element, count } => {
+                        scalar_to_clif_type(fx.tcx, element.clone())
+                            .by(u16::try_from(count).unwrap()).unwrap()
+                    }
                     _ => unreachable!(),
                 };
-                let clif_ty = scalar_to_clif_type(fx.tcx, scalar);
-                fx.bcx.ins().load(clif_ty, MemFlags::new(), addr, 0)
+                ptr.load(fx, clif_ty, MemFlags::new())
             }
             CValueInner::ByVal(value) => value,
             CValueInner::ByValPair(_, _) => bug!("Please use load_scalar_pair for ByValPair"),
@@ -88,24 +138,22 @@ pub fn load_scalar<'a>(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Val
     }
 
     /// Load a value pair with layout.abi of scalar pair
-    pub fn load_scalar_pair<'a>(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> (Value, Value) {
+    pub fn load_scalar_pair<'a>(
+        self,
+        fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+    ) -> (Value, Value) {
         let layout = self.1;
         match self.0 {
-            CValueInner::ByRef(addr) => {
+            CValueInner::ByRef(ptr) => {
                 let (a_scalar, b_scalar) = match &layout.abi {
                     layout::Abi::ScalarPair(a, b) => (a, b),
-                    _ => unreachable!(),
+                    _ => unreachable!("load_scalar_pair({:?})", self),
                 };
                 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
                 let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
                 let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
-                let val1 = fx.bcx.ins().load(clif_ty1, MemFlags::new(), addr, 0);
-                let val2 = fx.bcx.ins().load(
-                    clif_ty2,
-                    MemFlags::new(),
-                    addr,
-                    b_offset,
-                );
+                let val1 = ptr.load(fx, clif_ty1, MemFlags::new());
+                let val2 = ptr.offset(fx, b_offset).load(fx, clif_ty2, MemFlags::new());
                 (val1, val2)
             }
             CValueInner::ByVal(_) => bug!("Please use load_scalar for ByVal"),
@@ -119,13 +167,26 @@ pub fn value_field<'a>(
         field: mir::Field,
     ) -> CValue<'tcx> {
         let layout = self.1;
-        let base = match self.0 {
-            CValueInner::ByRef(addr) => addr,
+        match self.0 {
+            CValueInner::ByVal(val) => {
+                match layout.abi {
+                    layout::Abi::Vector { element: _, count } => {
+                        let count = u8::try_from(count).expect("SIMD type with more than 255 lanes???");
+                        let field = u8::try_from(field.index()).unwrap();
+                        assert!(field < count);
+                        let lane = fx.bcx.ins().extractlane(val, field);
+                        let field_layout = layout.field(&*fx, usize::from(field));
+                        CValue::by_val(lane, field_layout)
+                    }
+                    _ => unreachable!("value_field for ByVal with abi {:?}", layout.abi),
+                }
+            }
+            CValueInner::ByRef(ptr) => {
+                let (field_ptr, field_layout) = codegen_field(fx, ptr, None, layout, field);
+                CValue::by_ref(field_ptr, field_layout)
+            }
             _ => bug!("place_field for {:?}", self),
-        };
-
-        let (field_ptr, field_layout) = codegen_field(fx, base, layout, field);
-        CValue::by_ref(field_ptr, field_layout)
+        }
     }
 
     pub fn unsize_value<'a>(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, dest: CPlace<'tcx>) {
@@ -141,23 +202,32 @@ pub fn const_val<'a>(
         let clif_ty = fx.clif_type(ty).unwrap();
         let layout = fx.layout_of(ty);
 
-        let val = match ty.sty {
+        let val = match ty.kind {
             ty::TyKind::Uint(UintTy::U128) | ty::TyKind::Int(IntTy::I128) => {
                 let lsb = fx.bcx.ins().iconst(types::I64, const_val as u64 as i64);
-                let msb = fx.bcx.ins().iconst(types::I64, (const_val >> 64) as u64 as i64);
+                let msb = fx
+                    .bcx
+                    .ins()
+                    .iconst(types::I64, (const_val >> 64) as u64 as i64);
                 fx.bcx.ins().iconcat(lsb, msb)
             }
             ty::TyKind::Bool => {
-                assert!(const_val == 0 || const_val == 1, "Invalid bool 0x{:032X}", const_val);
+                assert!(
+                    const_val == 0 || const_val == 1,
+                    "Invalid bool 0x{:032X}",
+                    const_val
+                );
                 fx.bcx.ins().iconst(types::I8, const_val as i64)
             }
-            ty::TyKind::Uint(_) | ty::TyKind::Ref(..) | ty::TyKind::RawPtr(.. )=> {
-                fx.bcx.ins().iconst(clif_ty, u64::try_from(const_val).expect("uint") as i64)
-            }
-            ty::TyKind::Int(_) => {
-                fx.bcx.ins().iconst(clif_ty, const_val as i128 as i64)
-            }
-            _ => panic!("CValue::const_val for non bool/integer/pointer type {:?} is not allowed", ty),
+            ty::TyKind::Uint(_) | ty::TyKind::Ref(..) | ty::TyKind::RawPtr(..) => fx
+                .bcx
+                .ins()
+                .iconst(clif_ty, u64::try_from(const_val).expect("uint") as i64),
+            ty::TyKind::Int(_) => fx.bcx.ins().iconst(clif_ty, const_val as i128 as i64),
+            _ => panic!(
+                "CValue::const_val for non bool/integer/pointer type {:?} is not allowed",
+                ty
+            ),
         };
 
         CValue::by_val(val, layout)
@@ -178,8 +248,7 @@ pub struct CPlace<'tcx> {
 #[derive(Debug, Copy, Clone)]
 pub enum CPlaceInner {
     Var(Local),
-    Addr(Value, Option<Value>),
-    Stack(StackSlot),
+    Addr(Pointer, Option<Value>),
     NoPlace,
 }
 
@@ -193,9 +262,9 @@ pub fn inner(&self) -> &CPlaceInner {
     }
 
     pub fn no_place(layout: TyLayout<'tcx>) -> CPlace<'tcx> {
-        CPlace{
+        CPlace {
             inner: CPlaceInner::NoPlace,
-            layout
+            layout,
         }
     }
 
@@ -218,7 +287,7 @@ pub fn new_stack_slot(
             offset: None,
         });
         CPlace {
-            inner: CPlaceInner::Stack(stack_slot),
+            inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None),
             layout,
         }
     }
@@ -236,16 +305,16 @@ pub fn new_var(
         }
     }
 
-    pub fn for_addr(addr: Value, layout: TyLayout<'tcx>) -> CPlace<'tcx> {
+    pub fn for_ptr(ptr: Pointer, layout: TyLayout<'tcx>) -> CPlace<'tcx> {
         CPlace {
-            inner: CPlaceInner::Addr(addr, None),
+            inner: CPlaceInner::Addr(ptr, None),
             layout,
         }
     }
 
-    pub fn for_addr_with_extra(addr: Value, extra: Value, layout: TyLayout<'tcx>) -> CPlace<'tcx> {
+    pub fn for_ptr_with_extra(ptr: Pointer, extra: Value, layout: TyLayout<'tcx>) -> CPlace<'tcx> {
         CPlace {
-            inner: CPlaceInner::Addr(addr, Some(extra)),
+            inner: CPlaceInner::Addr(ptr, Some(extra)),
             layout,
         }
     }
@@ -253,64 +322,79 @@ pub fn for_addr_with_extra(addr: Value, extra: Value, layout: TyLayout<'tcx>) ->
     pub fn to_cvalue(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> CValue<'tcx> {
         let layout = self.layout();
         match self.inner {
-            CPlaceInner::Var(var) => CValue::by_val(fx.bcx.use_var(mir_var(var)), layout),
-            CPlaceInner::Addr(addr, extra) => {
+            CPlaceInner::Var(var) => {
+                let val = fx.bcx.use_var(mir_var(var));
+                fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::from_u32(var.as_u32()));
+                CValue::by_val(val, layout)
+            }
+            CPlaceInner::Addr(ptr, extra) => {
                 assert!(extra.is_none(), "unsized values are not yet supported");
-                CValue::by_ref(addr, layout)
+                CValue::by_ref(ptr, layout)
             }
-            CPlaceInner::Stack(stack_slot) => CValue::by_ref(
-                fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0),
-                layout,
-            ),
             CPlaceInner::NoPlace => CValue::by_ref(
-                fx.bcx
-                    .ins()
-                    .iconst(fx.pointer_type, fx.pointer_type.bytes() as i64),
+                Pointer::const_addr(fx, i64::try_from(self.layout.align.pref.bytes()).unwrap()),
                 layout,
             ),
         }
     }
 
-    pub fn to_addr(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Value {
-        match self.to_addr_maybe_unsized(fx) {
-            (addr, None) => addr,
+    pub fn to_ptr(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> Pointer {
+        match self.to_ptr_maybe_unsized(fx) {
+            (ptr, None) => ptr,
             (_, Some(_)) => bug!("Expected sized cplace, found {:?}", self),
         }
     }
 
-    pub fn to_addr_maybe_unsized(
+    pub fn to_ptr_maybe_unsized(
         self,
         fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
-    ) -> (Value, Option<Value>) {
+    ) -> (Pointer, Option<Value>) {
         match self.inner {
-            CPlaceInner::Addr(addr, extra) => (addr, extra),
-            CPlaceInner::Stack(stack_slot) => (
-                fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0),
-                None,
-            ),
-            CPlaceInner::NoPlace => (fx.bcx.ins().iconst(fx.pointer_type, 45), None),
+            CPlaceInner::Addr(ptr, extra) => (ptr, extra),
+            CPlaceInner::NoPlace => {
+                (
+                    Pointer::const_addr(fx, i64::try_from(self.layout.align.pref.bytes()).unwrap()),
+                    None,
+                )
+            }
             CPlaceInner::Var(_) => bug!("Expected CPlace::Addr, found CPlace::Var"),
         }
     }
 
     pub fn write_cvalue(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, from: CValue<'tcx>) {
-        use rustc::hir::Mutability::*;
+        #[cfg(debug_assertions)]
+        {
+            use cranelift_codegen::cursor::{Cursor, CursorPosition};
+            let cur_ebb = match fx.bcx.cursor().position() {
+                CursorPosition::After(ebb) => ebb,
+                _ => unreachable!(),
+            };
+            fx.add_comment(
+                fx.bcx.func.layout.last_inst(cur_ebb).unwrap(),
+                format!("write_cvalue: {:?} <- {:?}",self, from),
+            );
+        }
 
         let from_ty = from.layout().ty;
         let to_ty = self.layout().ty;
 
-        fn assert_assignable<'tcx>(fx: &FunctionCx<'_, 'tcx, impl Backend>, from_ty: Ty<'tcx>, to_ty: Ty<'tcx>) {
-            match (&from_ty.sty, &to_ty.sty) {
-                (ty::Ref(_, t, MutImmutable), ty::Ref(_, u, MutImmutable))
-                | (ty::Ref(_, t, MutMutable), ty::Ref(_, u, MutImmutable))
-                | (ty::Ref(_, t, MutMutable), ty::Ref(_, u, MutMutable)) => {
+        fn assert_assignable<'tcx>(
+            fx: &FunctionCx<'_, 'tcx, impl Backend>,
+            from_ty: Ty<'tcx>,
+            to_ty: Ty<'tcx>,
+        ) {
+            match (&from_ty.kind, &to_ty.kind) {
+                (ty::Ref(_, t, Mutability::Not), ty::Ref(_, u, Mutability::Not))
+                | (ty::Ref(_, t, Mutability::Mut), ty::Ref(_, u, Mutability::Not))
+                | (ty::Ref(_, t, Mutability::Mut), ty::Ref(_, u, Mutability::Mut)) => {
                     assert_assignable(fx, t, u);
                     // &mut T -> &T is allowed
                     // &'a T -> &'b T is allowed
                 }
-                (ty::Ref(_, _, MutImmutable), ty::Ref(_, _, MutMutable)) => {
-                    panic!("Cant assign value of type {} to place of type {}", from_ty, to_ty)
-                }
+                (ty::Ref(_, _, Mutability::Not), ty::Ref(_, _, Mutability::Mut)) => panic!(
+                    "Cant assign value of type {} to place of type {}",
+                    from_ty, to_ty
+                ),
                 (ty::FnPtr(_), ty::FnPtr(_)) => {
                     let from_sig = fx.tcx.normalize_erasing_late_bound_regions(
                         ParamEnv::reveal_all(),
@@ -328,14 +412,12 @@ fn assert_assignable<'tcx>(fx: &FunctionCx<'_, 'tcx, impl Backend>, from_ty: Ty<
                     // fn(&T) -> for<'l> fn(&'l T) is allowed
                 }
                 (ty::Dynamic(from_traits, _), ty::Dynamic(to_traits, _)) => {
-                    let from_traits = fx.tcx.normalize_erasing_late_bound_regions(
-                        ParamEnv::reveal_all(),
-                        from_traits,
-                    );
-                    let to_traits = fx.tcx.normalize_erasing_late_bound_regions(
-                        ParamEnv::reveal_all(),
-                        to_traits,
-                    );
+                    let from_traits = fx
+                        .tcx
+                        .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), from_traits);
+                    let to_traits = fx
+                        .tcx
+                        .normalize_erasing_late_bound_regions(ParamEnv::reveal_all(), to_traits);
                     assert_eq!(
                         from_traits, to_traits,
                         "Can't write trait object of incompatible traits {:?} to place with traits {:?}\n\n{:#?}",
@@ -359,16 +441,14 @@ fn assert_assignable<'tcx>(fx: &FunctionCx<'_, 'tcx, impl Backend>, from_ty: Ty<
         assert_assignable(fx, from_ty, to_ty);
 
         let dst_layout = self.layout();
-        let addr = match self.inner {
+        let to_ptr = match self.inner {
             CPlaceInner::Var(var) => {
                 let data = from.load_scalar(fx);
+                fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::from_u32(var.as_u32()));
                 fx.bcx.def_var(mir_var(var), data);
                 return;
             }
-            CPlaceInner::Addr(addr, None) => addr,
-            CPlaceInner::Stack(stack_slot) => {
-                fx.bcx.ins().stack_addr(fx.pointer_type, stack_slot, 0)
-            }
+            CPlaceInner::Addr(ptr, None) => ptr,
             CPlaceInner::NoPlace => {
                 if dst_layout.abi != Abi::Uninhabited {
                     assert_eq!(dst_layout.size.bytes(), 0, "{:?}", dst_layout);
@@ -380,34 +460,29 @@ fn assert_assignable<'tcx>(fx: &FunctionCx<'_, 'tcx, impl Backend>, from_ty: Ty<
 
         match from.0 {
             CValueInner::ByVal(val) => {
-                fx.bcx.ins().store(MemFlags::new(), val, addr, 0);
+                to_ptr.store(fx, val, MemFlags::new());
             }
-            CValueInner::ByValPair(value, extra) => {
-                match dst_layout.abi {
-                    Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
-                        let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
-                        fx.bcx.ins().store(MemFlags::new(), value, addr, 0);
-                        fx.bcx.ins().store(
-                            MemFlags::new(),
-                            extra,
-                            addr,
-                            b_offset,
-                        );
-                    }
-                    _ => bug!(
-                        "Non ScalarPair abi {:?} for ByValPair CValue",
-                        dst_layout.abi
-                    ),
+            CValueInner::ByValPair(value, extra) => match dst_layout.abi {
+                Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
+                    let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
+                    to_ptr.store(fx, value, MemFlags::new());
+                    to_ptr.offset(fx, b_offset).store(fx, extra, MemFlags::new());
                 }
-            }
-            CValueInner::ByRef(from_addr) => {
+                _ => bug!(
+                    "Non ScalarPair abi {:?} for ByValPair CValue",
+                    dst_layout.abi
+                ),
+            },
+            CValueInner::ByRef(from_ptr) => {
+                let from_addr = from_ptr.get_addr(fx);
+                let to_addr = to_ptr.get_addr(fx);
                 let src_layout = from.1;
                 let size = dst_layout.size.bytes();
                 let src_align = src_layout.align.abi.bytes() as u8;
                 let dst_align = dst_layout.align.abi.bytes() as u8;
                 fx.bcx.emit_small_memcpy(
                     fx.module.target_config(),
-                    addr,
+                    to_addr,
                     from_addr,
                     size,
                     dst_align,
@@ -423,13 +498,13 @@ pub fn place_field(
         field: mir::Field,
     ) -> CPlace<'tcx> {
         let layout = self.layout();
-        let (base, extra) = self.to_addr_maybe_unsized(fx);
+        let (base, extra) = self.to_ptr_maybe_unsized(fx);
 
-        let (field_ptr, field_layout) = codegen_field(fx, base, layout, field);
+        let (field_ptr, field_layout) = codegen_field(fx, base, extra, layout, field);
         if field_layout.is_unsized() {
-            CPlace::for_addr_with_extra(field_ptr, extra.unwrap(), field_layout)
+            CPlace::for_ptr_with_extra(field_ptr, extra.unwrap(), field_layout)
         } else {
-            CPlace::for_addr(field_ptr, field_layout)
+            CPlace::for_ptr(field_ptr, field_layout)
         }
     }
 
@@ -438,9 +513,9 @@ pub fn place_index(
         fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
         index: Value,
     ) -> CPlace<'tcx> {
-        let (elem_layout, addr) = match self.layout().ty.sty {
-            ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_addr(fx)),
-            ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_addr_maybe_unsized(fx).0),
+        let (elem_layout, ptr) = match self.layout().ty.kind {
+            ty::Array(elem_ty, _) => (fx.layout_of(elem_ty), self.to_ptr(fx)),
+            ty::Slice(elem_ty) => (fx.layout_of(elem_ty), self.to_ptr_maybe_unsized(fx).0),
             _ => bug!("place_index({:?})", self.layout().ty),
         };
 
@@ -449,26 +524,30 @@ pub fn place_index(
             .ins()
             .imul_imm(index, elem_layout.size.bytes() as i64);
 
-        CPlace::for_addr(fx.bcx.ins().iadd(addr, offset), elem_layout)
+        CPlace::for_ptr(ptr.offset_value(fx, offset), elem_layout)
     }
 
     pub fn place_deref(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>) -> CPlace<'tcx> {
         let inner_layout = fx.layout_of(self.layout().ty.builtin_deref(true).unwrap().ty);
-        if !inner_layout.is_unsized() {
-            CPlace::for_addr(self.to_cvalue(fx).load_scalar(fx), inner_layout)
-        } else {
+        if has_ptr_meta(fx.tcx, inner_layout.ty) {
             let (addr, extra) = self.to_cvalue(fx).load_scalar_pair(fx);
-            CPlace::for_addr_with_extra(addr, extra, inner_layout)
+            CPlace::for_ptr_with_extra(Pointer::new(addr), extra, inner_layout)
+        } else {
+            CPlace::for_ptr(Pointer::new(self.to_cvalue(fx).load_scalar(fx)), inner_layout)
         }
     }
 
     pub fn write_place_ref(self, fx: &mut FunctionCx<'_, 'tcx, impl Backend>, dest: CPlace<'tcx>) {
-        if !self.layout().is_unsized() {
-            let ptr = CValue::by_val(self.to_addr(fx), dest.layout());
+        if has_ptr_meta(fx.tcx, self.layout().ty) {
+            let (ptr, extra) = self.to_ptr_maybe_unsized(fx);
+            let ptr = CValue::by_val_pair(
+                ptr.get_addr(fx),
+                extra.expect("unsized type without metadata"),
+                dest.layout(),
+            );
             dest.write_cvalue(fx, ptr);
         } else {
-            let (value, extra) = self.to_addr_maybe_unsized(fx);
-            let ptr = CValue::by_val_pair(value, extra.expect("unsized type without metadata"), dest.layout());
+            let ptr = CValue::by_val(self.to_ptr(fx).get_addr(fx), dest.layout());
             dest.write_cvalue(fx, ptr);
         }
     }