]> git.lizzy.rs Git - rust.git/blobdiff - src/value_and_place.rs
Slightly reduce the amount of fx.module references
[rust.git] / src / value_and_place.rs
index 171f39805f8963ffaad26ce92566cb3f369111d6..52541d5b4964015d866f6b456ea5216c8ff45549 100644 (file)
@@ -34,10 +34,10 @@ fn codegen_field<'tcx>(
                 let (_, unsized_align) =
                     crate::unsize::size_and_align_of_dst(fx, field_layout, extra);
 
-                let one = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 1);
+                let one = fx.bcx.ins().iconst(fx.pointer_type, 1);
                 let align_sub_1 = fx.bcx.ins().isub(unsized_align, one);
                 let and_lhs = fx.bcx.ins().iadd_imm(align_sub_1, unaligned_offset as i64);
-                let zero = fx.bcx.ins().iconst(pointer_ty(fx.tcx), 0);
+                let zero = fx.bcx.ins().iconst(fx.pointer_type, 0);
                 let and_rhs = fx.bcx.ins().isub(zero, unsized_align);
                 let offset = fx.bcx.ins().band(and_lhs, and_rhs);
 
@@ -49,11 +49,7 @@ fn codegen_field<'tcx>(
     }
 }
 
-fn scalar_pair_calculate_b_offset(
-    tcx: TyCtxt<'_>,
-    a_scalar: &Scalar,
-    b_scalar: &Scalar,
-) -> Offset32 {
+fn scalar_pair_calculate_b_offset(tcx: TyCtxt<'_>, a_scalar: Scalar, b_scalar: Scalar) -> Offset32 {
     let b_offset = a_scalar.value.size(&tcx).align_to(b_scalar.value.align(&tcx).abi);
     Offset32::new(b_offset.bytes().try_into().unwrap())
 }
@@ -124,12 +120,10 @@ pub(crate) fn load_scalar(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> Value {
         match self.0 {
             CValueInner::ByRef(ptr, None) => {
                 let clif_ty = match layout.abi {
-                    Abi::Scalar(ref scalar) => scalar_to_clif_type(fx.tcx, scalar.clone()),
-                    Abi::Vector { ref element, count } => {
-                        scalar_to_clif_type(fx.tcx, element.clone())
-                            .by(u16::try_from(count).unwrap())
-                            .unwrap()
-                    }
+                    Abi::Scalar(scalar) => scalar_to_clif_type(fx.tcx, scalar),
+                    Abi::Vector { element, count } => scalar_to_clif_type(fx.tcx, element)
+                        .by(u16::try_from(count).unwrap())
+                        .unwrap(),
                     _ => unreachable!("{:?}", layout.ty),
                 };
                 let mut flags = MemFlags::new();
@@ -147,13 +141,13 @@ pub(crate) fn load_scalar_pair(self, fx: &mut FunctionCx<'_, '_, 'tcx>) -> (Valu
         let layout = self.1;
         match self.0 {
             CValueInner::ByRef(ptr, None) => {
-                let (a_scalar, b_scalar) = match &layout.abi {
+                let (a_scalar, b_scalar) = match layout.abi {
                     Abi::ScalarPair(a, b) => (a, b),
                     _ => unreachable!("load_scalar_pair({:?})", self),
                 };
                 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
-                let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar.clone());
-                let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar.clone());
+                let clif_ty1 = scalar_to_clif_type(fx.tcx, a_scalar);
+                let clif_ty2 = scalar_to_clif_type(fx.tcx, b_scalar);
                 let mut flags = MemFlags::new();
                 flags.set_notrap();
                 let val1 = ptr.load(fx, clif_ty1, flags);
@@ -206,6 +200,38 @@ pub(crate) fn value_field(
         }
     }
 
+    /// Like [`CValue::value_field`] except handling ADTs containing a single array field in a way
+    /// such that you can access individual lanes.
+    pub(crate) fn value_lane(
+        self,
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        lane_idx: u64,
+    ) -> CValue<'tcx> {
+        let layout = self.1;
+        assert!(layout.ty.is_simd());
+        let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+        let lane_layout = fx.layout_of(lane_ty);
+        assert!(lane_idx < lane_count);
+        match self.0 {
+            CValueInner::ByVal(val) => match layout.abi {
+                Abi::Vector { element: _, count: _ } => {
+                    assert!(lane_count <= u8::MAX.into(), "SIMD type with more than 255 lanes???");
+                    let lane_idx = u8::try_from(lane_idx).unwrap();
+                    let lane = fx.bcx.ins().extractlane(val, lane_idx);
+                    CValue::by_val(lane, lane_layout)
+                }
+                _ => unreachable!("value_lane for ByVal with abi {:?}", layout.abi),
+            },
+            CValueInner::ByValPair(_, _) => unreachable!(),
+            CValueInner::ByRef(ptr, None) => {
+                let field_offset = lane_layout.size * lane_idx;
+                let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+                CValue::by_ref(field_ptr, lane_layout)
+            }
+            CValueInner::ByRef(_, Some(_)) => unreachable!(),
+        }
+    }
+
     pub(crate) fn unsize_value(self, fx: &mut FunctionCx<'_, '_, 'tcx>, dest: CPlace<'tcx>) {
         crate::unsize::coerce_unsized_into(fx, self, dest);
     }
@@ -286,17 +312,16 @@ pub(crate) fn inner(&self) -> &CPlaceInner {
         &self.inner
     }
 
-    pub(crate) fn no_place(layout: TyAndLayout<'tcx>) -> CPlace<'tcx> {
-        CPlace { inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None), layout }
-    }
-
     pub(crate) fn new_stack_slot(
         fx: &mut FunctionCx<'_, '_, 'tcx>,
         layout: TyAndLayout<'tcx>,
     ) -> CPlace<'tcx> {
         assert!(!layout.is_unsized());
         if layout.size.bytes() == 0 {
-            return CPlace::no_place(layout);
+            return CPlace {
+                inner: CPlaceInner::Addr(Pointer::dangling(layout.align.pref), None),
+                layout,
+            };
         }
 
         let stack_slot = fx.bcx.create_stack_slot(StackSlotData {
@@ -304,7 +329,6 @@ pub(crate) fn new_stack_slot(
             // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
             // specify stack slot alignment.
             size: (u32::try_from(layout.size.bytes()).unwrap() + 15) / 16 * 16,
-            offset: None,
         });
         CPlace { inner: CPlaceInner::Addr(Pointer::stack_slot(stack_slot), None), layout }
     }
@@ -447,12 +471,15 @@ fn transmute_value<'tcx>(
                         // FIXME Don't force the size to a multiple of 16 bytes once Cranelift gets a way to
                         // specify stack slot alignment.
                         size: (src_ty.bytes() + 15) / 16 * 16,
-                        offset: None,
                     });
                     let ptr = Pointer::stack_slot(stack_slot);
                     ptr.store(fx, data, MemFlags::trusted());
                     ptr.load(fx, dst_ty, MemFlags::trusted())
                 }
+
+                // `CValue`s should never contain SSA-only types, so if you ended
+                // up here having seen an error like `B1 -> I8`, then before
+                // calling `write_cvalue` you need to add a `bint` instruction.
                 _ => unreachable!("write_cvalue_transmute: {:?} -> {:?}", src_ty, dst_ty),
             };
             //fx.bcx.set_val_label(data, cranelift_codegen::ir::ValueLabel::new(var.index()));
@@ -483,6 +510,26 @@ fn transmute_value<'tcx>(
         let dst_layout = self.layout();
         let to_ptr = match self.inner {
             CPlaceInner::Var(_local, var) => {
+                if let ty::Array(element, len) = dst_layout.ty.kind() {
+                    // Can only happen for vector types
+                    let len =
+                        u16::try_from(len.eval_usize(fx.tcx, ParamEnv::reveal_all())).unwrap();
+                    let vector_ty = fx.clif_type(element).unwrap().by(len).unwrap();
+
+                    let data = match from.0 {
+                        CValueInner::ByRef(ptr, None) => {
+                            let mut flags = MemFlags::new();
+                            flags.set_notrap();
+                            ptr.load(fx, vector_ty, flags)
+                        }
+                        CValueInner::ByVal(_)
+                        | CValueInner::ByValPair(_, _)
+                        | CValueInner::ByRef(_, Some(_)) => bug!("array should be ByRef"),
+                    };
+
+                    fx.bcx.def_var(var, data);
+                    return;
+                }
                 let data = CValue(from.0, dst_layout).load_scalar(fx);
                 let dst_ty = fx.clif_type(self.layout().ty).unwrap();
                 transmute_value(fx, var, data, dst_ty);
@@ -529,7 +576,7 @@ fn transmute_value<'tcx>(
                 to_ptr.store(fx, val, flags);
                 return;
             }
-            Abi::ScalarPair(ref a_scalar, ref b_scalar) => {
+            Abi::ScalarPair(a_scalar, b_scalar) => {
                 let (value, extra) = from.load_scalar_pair(fx);
                 let b_offset = scalar_pair_calculate_b_offset(fx.tcx, a_scalar, b_scalar);
                 to_ptr.store(fx, value, flags);
@@ -554,7 +601,7 @@ fn transmute_value<'tcx>(
                 let src_align = src_layout.align.abi.bytes() as u8;
                 let dst_align = dst_layout.align.abi.bytes() as u8;
                 fx.bcx.emit_small_memory_copy(
-                    fx.module.target_config(),
+                    fx.target_config,
                     to_addr,
                     from_addr,
                     size,
@@ -576,14 +623,39 @@ pub(crate) fn place_field(
         let layout = self.layout();
 
         match self.inner {
-            CPlaceInner::Var(local, var) => {
-                if let Abi::Vector { .. } = layout.abi {
+            CPlaceInner::Var(local, var) => match layout.ty.kind() {
+                ty::Array(_, _) => {
+                    // Can only happen for vector types
                     return CPlace {
                         inner: CPlaceInner::VarLane(local, var, field.as_u32().try_into().unwrap()),
                         layout: layout.field(fx, field.as_u32().try_into().unwrap()),
                     };
                 }
-            }
+                ty::Adt(adt_def, substs) if layout.ty.is_simd() => {
+                    let f0_ty = adt_def.non_enum_variant().fields[0].ty(fx.tcx, substs);
+
+                    match f0_ty.kind() {
+                        ty::Array(_, _) => {
+                            assert_eq!(field.as_u32(), 0);
+                            return CPlace {
+                                inner: CPlaceInner::Var(local, var),
+                                layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+                            };
+                        }
+                        _ => {
+                            return CPlace {
+                                inner: CPlaceInner::VarLane(
+                                    local,
+                                    var,
+                                    field.as_u32().try_into().unwrap(),
+                                ),
+                                layout: layout.field(fx, field.as_u32().try_into().unwrap()),
+                            };
+                        }
+                    }
+                }
+                _ => {}
+            },
             CPlaceInner::VarPair(local, var1, var2) => {
                 let layout = layout.field(&*fx, field.index());
 
@@ -606,6 +678,38 @@ pub(crate) fn place_field(
         }
     }
 
+    /// Like [`CPlace::place_field`] except handling ADTs containing a single array field in a way
+    /// such that you can access individual lanes.
+    pub(crate) fn place_lane(
+        self,
+        fx: &mut FunctionCx<'_, '_, 'tcx>,
+        lane_idx: u64,
+    ) -> CPlace<'tcx> {
+        let layout = self.layout();
+        assert!(layout.ty.is_simd());
+        let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
+        let lane_layout = fx.layout_of(lane_ty);
+        assert!(lane_idx < lane_count);
+
+        match self.inner {
+            CPlaceInner::Var(local, var) => {
+                assert!(matches!(layout.abi, Abi::Vector { .. }));
+                CPlace {
+                    inner: CPlaceInner::VarLane(local, var, lane_idx.try_into().unwrap()),
+                    layout: lane_layout,
+                }
+            }
+            CPlaceInner::VarPair(_, _, _) => unreachable!(),
+            CPlaceInner::VarLane(_, _, _) => unreachable!(),
+            CPlaceInner::Addr(ptr, None) => {
+                let field_offset = lane_layout.size * lane_idx;
+                let field_ptr = ptr.offset_i64(fx, i64::try_from(field_offset.bytes()).unwrap());
+                CPlace::for_ptr(field_ptr, lane_layout)
+            }
+            CPlaceInner::Addr(_, Some(_)) => unreachable!(),
+        }
+    }
+
     pub(crate) fn place_index(
         self,
         fx: &mut FunctionCx<'_, '_, 'tcx>,