]> git.lizzy.rs Git - rust.git/commitdiff
organize intrinsics into groups
authorRalf Jung <post@ralfj.de>
Sun, 12 Apr 2020 09:01:59 +0000 (11:01 +0200)
committerRalf Jung <post@ralfj.de>
Sun, 12 Apr 2020 09:01:59 +0000 (11:01 +0200)
src/shims/intrinsics.rs

index 844eac398de818f1853b35bcdf73c5cb19f9f235..de34f1a7b6cefec5d33923dc8a81e300919aa516 100644 (file)
@@ -37,183 +37,9 @@ fn call_intrinsic(
             Some(p) => p,
         };
 
+        // Then handle terminating intrinsics.
         match intrinsic_name {
-            "try" => return this.handle_try(args, dest, ret),
-
-            "arith_offset" => {
-                let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
-                let ptr = this.read_scalar(args[0])?.not_undef()?;
-
-                let pointee_ty = substs.type_at(0);
-                let pointee_size = i64::try_from(this.layout_of(pointee_ty)?.size.bytes()).unwrap();
-                let offset = offset.overflowing_mul(pointee_size).0;
-                let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
-                this.write_scalar(result_ptr, dest)?;
-            }
-
-            "assume" => {
-                let cond = this.read_scalar(args[0])?.to_bool()?;
-                if !cond {
-                    throw_ub_format!("`assume` intrinsic called with `false`");
-                }
-            }
-
-            "volatile_load" => {
-                let place = this.deref_operand(args[0])?;
-                this.copy_op(place.into(), dest)?;
-            }
-
-            "volatile_store" => {
-                let place = this.deref_operand(args[0])?;
-                this.copy_op(args[1], place.into())?;
-            }
-
-            #[rustfmt::skip]
-            | "atomic_load"
-            | "atomic_load_relaxed"
-            | "atomic_load_acq"
-            => {
-                let place = this.deref_operand(args[0])?;
-                let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
-
-                // Check alignment requirements. Atomics must always be aligned to their size,
-                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
-                // be 8-aligned).
-                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
-                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
-
-                this.write_scalar(val, dest)?;
-            }
-
-            #[rustfmt::skip]
-            | "atomic_store"
-            | "atomic_store_relaxed"
-            | "atomic_store_rel"
-            => {
-                let place = this.deref_operand(args[0])?;
-                let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
-
-                // Check alignment requirements. Atomics must always be aligned to their size,
-                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
-                // be 8-aligned).
-                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
-                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
-
-                this.write_scalar(val, place.into())?;
-            }
-
-            #[rustfmt::skip]
-            | "atomic_fence_acq"
-            | "atomic_fence_rel"
-            | "atomic_fence_acqrel"
-            | "atomic_fence"
-            | "atomic_singlethreadfence_acq"
-            | "atomic_singlethreadfence_rel"
-            | "atomic_singlethreadfence_acqrel"
-            | "atomic_singlethreadfence"
-            => {
-                // we are inherently singlethreaded and singlecored, this is a nop
-            }
-
-            _ if intrinsic_name.starts_with("atomic_xchg") => {
-                let place = this.deref_operand(args[0])?;
-                let new = this.read_scalar(args[1])?;
-                let old = this.read_scalar(place.into())?;
-
-                // Check alignment requirements. Atomics must always be aligned to their size,
-                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
-                // be 8-aligned).
-                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
-                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
-
-                this.write_scalar(old, dest)?; // old value is returned
-                this.write_scalar(new, place.into())?;
-            }
-
-            _ if intrinsic_name.starts_with("atomic_cxchg") => {
-                let place = this.deref_operand(args[0])?;
-                let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
-                let new = this.read_scalar(args[2])?;
-                let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
-
-                // Check alignment requirements. Atomics must always be aligned to their size,
-                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
-                // be 8-aligned).
-                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
-                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
-
-                // `binary_op` will bail if either of them is not a scalar.
-                let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
-                let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
-                // Return old value.
-                this.write_immediate(res, dest)?;
-                // Update ptr depending on comparison.
-                if eq.to_bool()? {
-                    this.write_scalar(new, place.into())?;
-                }
-            }
-
-            #[rustfmt::skip]
-            | "atomic_or"
-            | "atomic_or_acq"
-            | "atomic_or_rel"
-            | "atomic_or_acqrel"
-            | "atomic_or_relaxed"
-            | "atomic_xor"
-            | "atomic_xor_acq"
-            | "atomic_xor_rel"
-            | "atomic_xor_acqrel"
-            | "atomic_xor_relaxed"
-            | "atomic_and"
-            | "atomic_and_acq"
-            | "atomic_and_rel"
-            | "atomic_and_acqrel"
-            | "atomic_and_relaxed"
-            | "atomic_nand"
-            | "atomic_nand_acq"
-            | "atomic_nand_rel"
-            | "atomic_nand_acqrel"
-            | "atomic_nand_relaxed"
-            | "atomic_xadd"
-            | "atomic_xadd_acq"
-            | "atomic_xadd_rel"
-            | "atomic_xadd_acqrel"
-            | "atomic_xadd_relaxed"
-            | "atomic_xsub"
-            | "atomic_xsub_acq"
-            | "atomic_xsub_rel"
-            | "atomic_xsub_acqrel"
-            | "atomic_xsub_relaxed"
-            => {
-                let place = this.deref_operand(args[0])?;
-                if !place.layout.ty.is_integral() {
-                    bug!("Atomic arithmetic operations only work on integer types");
-                }
-                let rhs = this.read_immediate(args[1])?;
-                let old = this.read_immediate(place.into())?;
-
-                // Check alignment requirements. Atomics must always be aligned to their size,
-                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
-                // be 8-aligned).
-                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
-                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
-
-                this.write_immediate(*old, dest)?; // old value is returned
-                let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
-                    "or" => (mir::BinOp::BitOr, false),
-                    "xor" => (mir::BinOp::BitXor, false),
-                    "and" => (mir::BinOp::BitAnd, false),
-                    "xadd" => (mir::BinOp::Add, false),
-                    "xsub" => (mir::BinOp::Sub, false),
-                    "nand" => (mir::BinOp::BitAnd, true),
-                    _ => bug!(),
-                };
-                // Atomics wrap around on overflow.
-                let val = this.binary_op(op, old, rhs)?;
-                let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
-                this.write_immediate(*val, place.into())?;
-            }
-
+            // Raw memory accesses
             #[rustfmt::skip]
             | "copy"
             | "copy_nonoverlapping"
@@ -240,6 +66,51 @@ fn call_intrinsic(
                 }
             }
 
+            "move_val_init" => {
+                let place = this.deref_operand(args[0])?;
+                this.copy_op(args[1], place.into())?;
+            }
+
+            "volatile_load" => {
+                let place = this.deref_operand(args[0])?;
+                this.copy_op(place.into(), dest)?;
+            }
+            "volatile_store" => {
+                let place = this.deref_operand(args[0])?;
+                this.copy_op(args[1], place.into())?;
+            }
+
+            "write_bytes" => {
+                let ty = substs.type_at(0);
+                let ty_layout = this.layout_of(ty)?;
+                let val_byte = this.read_scalar(args[1])?.to_u8()?;
+                let ptr = this.read_scalar(args[0])?.not_undef()?;
+                let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
+                let byte_count = ty_layout.size.checked_mul(count, this)
+                    .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?;
+                this.memory
+                    .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
+            }
+
+            // Pointer arithmetic
+            "arith_offset" => {
+                let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
+                let ptr = this.read_scalar(args[0])?.not_undef()?;
+
+                let pointee_ty = substs.type_at(0);
+                let pointee_size = i64::try_from(this.layout_of(pointee_ty)?.size.bytes()).unwrap();
+                let offset = offset.overflowing_mul(pointee_size).0;
+                let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
+                this.write_scalar(result_ptr, dest)?;
+            }
+            "offset" => {
+                let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
+                let ptr = this.read_scalar(args[0])?.not_undef()?;
+                let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
+                this.write_scalar(result_ptr, dest)?;
+            }
+
+            // Floating-point operations
             #[rustfmt::skip]
             | "sinf32"
             | "fabsf32"
@@ -363,58 +234,7 @@ fn call_intrinsic(
                 };
                 this.write_scalar(Scalar::from_f64(res), dest)?;
             }
-
-            "exact_div" =>
-                this.exact_div(this.read_immediate(args[0])?, this.read_immediate(args[1])?, dest)?,
-
-            "forget" => {}
-
-            #[rustfmt::skip]
-            | "likely"
-            | "unlikely"
-            => {
-                // These just return their argument
-                let b = this.read_immediate(args[0])?;
-                this.write_immediate(*b, dest)?;
-            }
-
-            "pref_align_of" => {
-                let ty = substs.type_at(0);
-                let layout = this.layout_of(ty)?;
-                let align = layout.align.pref.bytes();
-                let align_val = Scalar::from_machine_usize(align, this);
-                this.write_scalar(align_val, dest)?;
-            }
-
-            "move_val_init" => {
-                let place = this.deref_operand(args[0])?;
-                this.copy_op(args[1], place.into())?;
-            }
-
-            "offset" => {
-                let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
-                let ptr = this.read_scalar(args[0])?.not_undef()?;
-                let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
-                this.write_scalar(result_ptr, dest)?;
-            }
-
-            "assert_inhabited" |
-            "assert_zero_valid" |
-            "assert_uninit_valid" => {
-                let ty = substs.type_at(0);
-                let layout = this.layout_of(ty)?;
-                // Abort here because the caller might not be panic safe.
-                if layout.abi.is_uninhabited() {
-                    throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to instantiate uninhabited type `{}`", ty))))
-                }
-                if intrinsic_name == "assert_zero_valid" && !layout.might_permit_raw_init(this, /*zero:*/ true).unwrap() {
-                    throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to zero-initialize type `{}`, which is invalid", ty))))
-                }
-                if intrinsic_name == "assert_uninit_valid" && !layout.might_permit_raw_init(this, /*zero:*/ false).unwrap() {
-                    throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to leave type `{}` uninitialized, which is invalid", ty))))
-                }
-            }
-
+            
             "powf32" => {
                 // FIXME: Using host floats.
                 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
@@ -459,12 +279,177 @@ fn call_intrinsic(
                 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
             }
 
-            "size_of_val" => {
-                let mplace = this.deref_operand(args[0])?;
-                let (size, _) = this
-                    .size_and_align_of_mplace(mplace)?
-                    .expect("size_of_val called on extern type");
-                this.write_scalar(Scalar::from_machine_usize(size.bytes(), this), dest)?;
+            // Atomic operations
+            #[rustfmt::skip]
+            | "atomic_load"
+            | "atomic_load_relaxed"
+            | "atomic_load_acq"
+            => {
+                let place = this.deref_operand(args[0])?;
+                let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
+
+                // Check alignment requirements. Atomics must always be aligned to their size,
+                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
+                // be 8-aligned).
+                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
+                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
+
+                this.write_scalar(val, dest)?;
+            }
+
+            #[rustfmt::skip]
+            | "atomic_store"
+            | "atomic_store_relaxed"
+            | "atomic_store_rel"
+            => {
+                let place = this.deref_operand(args[0])?;
+                let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
+
+                // Check alignment requirements. Atomics must always be aligned to their size,
+                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
+                // be 8-aligned).
+                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
+                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
+
+                this.write_scalar(val, place.into())?;
+            }
+
+            #[rustfmt::skip]
+            | "atomic_fence_acq"
+            | "atomic_fence_rel"
+            | "atomic_fence_acqrel"
+            | "atomic_fence"
+            | "atomic_singlethreadfence_acq"
+            | "atomic_singlethreadfence_rel"
+            | "atomic_singlethreadfence_acqrel"
+            | "atomic_singlethreadfence"
+            => {
+                // we are inherently singlethreaded and singlecored, this is a nop
+            }
+
+            _ if intrinsic_name.starts_with("atomic_xchg") => {
+                let place = this.deref_operand(args[0])?;
+                let new = this.read_scalar(args[1])?;
+                let old = this.read_scalar(place.into())?;
+
+                // Check alignment requirements. Atomics must always be aligned to their size,
+                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
+                // be 8-aligned).
+                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
+                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
+
+                this.write_scalar(old, dest)?; // old value is returned
+                this.write_scalar(new, place.into())?;
+            }
+
+            _ if intrinsic_name.starts_with("atomic_cxchg") => {
+                let place = this.deref_operand(args[0])?;
+                let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
+                let new = this.read_scalar(args[2])?;
+                let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
+
+                // Check alignment requirements. Atomics must always be aligned to their size,
+                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
+                // be 8-aligned).
+                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
+                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
+
+                // `binary_op` will bail if either of them is not a scalar.
+                let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
+                let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
+                // Return old value.
+                this.write_immediate(res, dest)?;
+                // Update ptr depending on comparison.
+                if eq.to_bool()? {
+                    this.write_scalar(new, place.into())?;
+                }
+            }
+
+            #[rustfmt::skip]
+            | "atomic_or"
+            | "atomic_or_acq"
+            | "atomic_or_rel"
+            | "atomic_or_acqrel"
+            | "atomic_or_relaxed"
+            | "atomic_xor"
+            | "atomic_xor_acq"
+            | "atomic_xor_rel"
+            | "atomic_xor_acqrel"
+            | "atomic_xor_relaxed"
+            | "atomic_and"
+            | "atomic_and_acq"
+            | "atomic_and_rel"
+            | "atomic_and_acqrel"
+            | "atomic_and_relaxed"
+            | "atomic_nand"
+            | "atomic_nand_acq"
+            | "atomic_nand_rel"
+            | "atomic_nand_acqrel"
+            | "atomic_nand_relaxed"
+            | "atomic_xadd"
+            | "atomic_xadd_acq"
+            | "atomic_xadd_rel"
+            | "atomic_xadd_acqrel"
+            | "atomic_xadd_relaxed"
+            | "atomic_xsub"
+            | "atomic_xsub_acq"
+            | "atomic_xsub_rel"
+            | "atomic_xsub_acqrel"
+            | "atomic_xsub_relaxed"
+            => {
+                let place = this.deref_operand(args[0])?;
+                if !place.layout.ty.is_integral() {
+                    bug!("Atomic arithmetic operations only work on integer types");
+                }
+                let rhs = this.read_immediate(args[1])?;
+                let old = this.read_immediate(place.into())?;
+
+                // Check alignment requirements. Atomics must always be aligned to their size,
+                // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
+                // be 8-aligned).
+                let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
+                this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
+
+                this.write_immediate(*old, dest)?; // old value is returned
+                let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
+                    "or" => (mir::BinOp::BitOr, false),
+                    "xor" => (mir::BinOp::BitXor, false),
+                    "and" => (mir::BinOp::BitAnd, false),
+                    "xadd" => (mir::BinOp::Add, false),
+                    "xsub" => (mir::BinOp::Sub, false),
+                    "nand" => (mir::BinOp::BitAnd, true),
+                    _ => bug!(),
+                };
+                // Atomics wrap around on overflow.
+                let val = this.binary_op(op, old, rhs)?;
+                let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
+                this.write_immediate(*val, place.into())?;
+            }
+
+            // Query type information
+            "assert_inhabited" |
+            "assert_zero_valid" |
+            "assert_uninit_valid" => {
+                let ty = substs.type_at(0);
+                let layout = this.layout_of(ty)?;
+                // Abort here because the caller might not be panic safe.
+                if layout.abi.is_uninhabited() {
+                    throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to instantiate uninhabited type `{}`", ty))))
+                }
+                if intrinsic_name == "assert_zero_valid" && !layout.might_permit_raw_init(this, /*zero:*/ true).unwrap() {
+                    throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to zero-initialize type `{}`, which is invalid", ty))))
+                }
+                if intrinsic_name == "assert_uninit_valid" && !layout.might_permit_raw_init(this, /*zero:*/ false).unwrap() {
+                    throw_machine_stop!(TerminationInfo::Abort(Some(format!("attempted to leave type `{}` uninitialized, which is invalid", ty))))
+                }
+            }
+
+            "pref_align_of" => {
+                let ty = substs.type_at(0);
+                let layout = this.layout_of(ty)?;
+                let align = layout.align.pref.bytes();
+                let align_val = Scalar::from_machine_usize(align, this);
+                this.write_scalar(align_val, dest)?;
             }
 
             #[rustfmt::skip]
@@ -478,18 +463,38 @@ fn call_intrinsic(
                 this.write_scalar(Scalar::from_machine_usize(align.bytes(), this), dest)?;
             }
 
-            "write_bytes" => {
-                let ty = substs.type_at(0);
-                let ty_layout = this.layout_of(ty)?;
-                let val_byte = this.read_scalar(args[1])?.to_u8()?;
-                let ptr = this.read_scalar(args[0])?.not_undef()?;
-                let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
-                let byte_count = ty_layout.size.checked_mul(count, this)
-                    .ok_or_else(|| err_ub_format!("overflow computing total size of `write_bytes`"))?;
-                this.memory
-                    .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
+            "size_of_val" => {
+                let mplace = this.deref_operand(args[0])?;
+                let (size, _) = this
+                    .size_and_align_of_mplace(mplace)?
+                    .expect("size_of_val called on extern type");
+                this.write_scalar(Scalar::from_machine_usize(size.bytes(), this), dest)?;
+            }
+
+            // Other
+            "assume" => {
+                let cond = this.read_scalar(args[0])?.to_bool()?;
+                if !cond {
+                    throw_ub_format!("`assume` intrinsic called with `false`");
+                }
+            }
+
+            "exact_div" =>
+                this.exact_div(this.read_immediate(args[0])?, this.read_immediate(args[1])?, dest)?,
+
+            "forget" => {}
+
+            #[rustfmt::skip]
+            | "likely"
+            | "unlikely"
+            => {
+                // These just return their argument
+                let b = this.read_immediate(args[0])?;
+                this.write_immediate(*b, dest)?;
             }
 
+            "try" => return this.handle_try(args, dest, ret),
+
             name => throw_unsup_format!("unimplemented intrinsic: {}", name),
         }