X-Git-Url: https://git.lizzy.rs/?a=blobdiff_plain;f=src%2Fshims%2Fintrinsics.rs;h=5ef7fba7f59083b093902b40a094b4b2fe851d95;hb=cf9340113efcf5c7bea4a143f42c8ea47da4550e;hp=5c3ff139c0262f7ee62b3fc75e95717c1569e498;hpb=d9d6df93a98f6545b2757a802e440eb043f23945;p=rust.git diff --git a/src/shims/intrinsics.rs b/src/shims/intrinsics.rs index 5c3ff139c02..5ef7fba7f59 100644 --- a/src/shims/intrinsics.rs +++ b/src/shims/intrinsics.rs @@ -1,11 +1,13 @@ +use std::iter; + use rustc_apfloat::Float; use rustc::mir; use rustc::mir::interpret::{InterpResult, PointerArithmetic}; -use rustc::ty::layout::{self, LayoutOf, Size}; +use rustc::ty::layout::{self, LayoutOf, Size, Align}; use rustc::ty; use crate::{ - PlaceTy, OpTy, ImmTy, Immediate, Scalar, Tag, + PlaceTy, OpTy, Immediate, Scalar, Tag, OperatorEvalContextExt }; @@ -28,8 +30,8 @@ fn call_intrinsic( // (as opposed to through a place), we have to remember to erase any tag // that might still hang around! - let intrinsic_name = this.tcx.item_name(instance.def_id()).as_str(); - match intrinsic_name.get() { + let intrinsic_name = &*tcx.item_name(instance.def_id()).as_str(); + match intrinsic_name { "arith_offset" => { let offset = this.read_scalar(args[1])?.to_isize(this)?; let ptr = this.read_scalar(args[0])?.not_undef()?; @@ -44,60 +46,91 @@ fn call_intrinsic( "assume" => { let cond = this.read_scalar(args[0])?.to_bool()?; if !cond { - throw_unsup!(AssumptionNotHeld); + throw_ub_format!("`assume` intrinsic called with `false`"); } } + "volatile_load" => { + let place = this.deref_operand(args[0])?; + this.copy_op(place.into(), dest)?; + } + + "volatile_store" => { + let place = this.deref_operand(args[0])?; + this.copy_op(args[1], place.into())?; + } + "atomic_load" | "atomic_load_relaxed" | "atomic_load_acq" => { - let ptr = this.deref_operand(args[0])?; - let val = this.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic - this.write_scalar(val, dest)?; - } + let place = this.deref_operand(args[0])?; + let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic - "volatile_load" => { - let ptr = this.deref_operand(args[0])?; - this.copy_op(ptr.into(), dest)?; + // Check alignment requirements. Atomics must always be aligned to their size, + // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must + // be 8-aligned). + let align = Align::from_bytes(place.layout.size.bytes()).unwrap(); + this.memory.check_ptr_access(place.ptr, place.layout.size, align)?; + + this.write_scalar(val, dest)?; } "atomic_store" | "atomic_store_relaxed" | "atomic_store_rel" => { - let ptr = this.deref_operand(args[0])?; + let place = this.deref_operand(args[0])?; let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic - this.write_scalar(val, ptr.into())?; - } - "volatile_store" => { - let ptr = this.deref_operand(args[0])?; - this.copy_op(args[1], ptr.into())?; + // Check alignment requirements. Atomics must always be aligned to their size, + // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must + // be 8-aligned). + let align = Align::from_bytes(place.layout.size.bytes()).unwrap(); + this.memory.check_ptr_access(place.ptr, place.layout.size, align)?; + + this.write_scalar(val, place.into())?; } - "atomic_fence_acq" => { + "atomic_fence_acq" | + "atomic_fence_rel" | + "atomic_fence_acqrel" | + "atomic_fence" => { // we are inherently singlethreaded and singlecored, this is a nop } _ if intrinsic_name.starts_with("atomic_xchg") => { - let ptr = this.deref_operand(args[0])?; + let place = this.deref_operand(args[0])?; let new = this.read_scalar(args[1])?; - let old = this.read_scalar(ptr.into())?; + let old = this.read_scalar(place.into())?; + + // Check alignment requirements. Atomics must always be aligned to their size, + // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must + // be 8-aligned). + let align = Align::from_bytes(place.layout.size.bytes()).unwrap(); + this.memory.check_ptr_access(place.ptr, place.layout.size, align)?; + this.write_scalar(old, dest)?; // old value is returned - this.write_scalar(new, ptr.into())?; + this.write_scalar(new, place.into())?; } _ if intrinsic_name.starts_with("atomic_cxchg") => { - let ptr = this.deref_operand(args[0])?; + let place = this.deref_operand(args[0])?; let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()` let new = this.read_scalar(args[2])?; - let old = this.read_immediate(ptr.into())?; // read as immediate for the sake of `binary_op()` + let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()` + + // Check alignment requirements. Atomics must always be aligned to their size, + // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must + // be 8-aligned). + let align = Align::from_bytes(place.layout.size.bytes()).unwrap(); + this.memory.check_ptr_access(place.ptr, place.layout.size, align)?; + // binary_op will bail if either of them is not a scalar - let (eq, _) = this.binary_op(mir::BinOp::Eq, old, expect_old)?; + let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0; let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into()); this.write_immediate(res, dest)?; // old value is returned // update ptr depending on comparison if eq.to_bool()? { - this.write_scalar(new, ptr.into())?; + this.write_scalar(new, place.into())?; } } @@ -131,12 +164,19 @@ fn call_intrinsic( "atomic_xsub_rel" | "atomic_xsub_acqrel" | "atomic_xsub_relaxed" => { - let ptr = this.deref_operand(args[0])?; - if !ptr.layout.ty.is_integral() { + let place = this.deref_operand(args[0])?; + if !place.layout.ty.is_integral() { bug!("Atomic arithmetic operations only work on integer types"); } let rhs = this.read_immediate(args[1])?; - let old = this.read_immediate(ptr.into())?; + let old = this.read_immediate(place.into())?; + + // Check alignment requirements. Atomics must always be aligned to their size, + // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must + // be 8-aligned). + let align = Align::from_bytes(place.layout.size.bytes()).unwrap(); + this.memory.check_ptr_access(place.ptr, place.layout.size, align)?; + this.write_immediate(*old, dest)?; // old value is returned let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() { "or" => (mir::BinOp::BitOr, false), @@ -148,13 +188,13 @@ fn call_intrinsic( _ => bug!(), }; // Atomics wrap around on overflow. - let (val, _overflowed) = this.binary_op(op, old, rhs)?; + let val = this.binary_op(op, old, rhs)?; let val = if neg { - this.unary_op(mir::UnOp::Not, ImmTy::from_scalar(val, old.layout))? + this.unary_op(mir::UnOp::Not, val)? } else { val }; - this.write_scalar(val, ptr.into())?; + this.write_immediate(*val, place.into())?; } "breakpoint" => unimplemented!(), // halt miri @@ -169,12 +209,12 @@ fn call_intrinsic( let size = Size::from_bytes(count * elem_size); let src = this.read_scalar(args[0])?.not_undef()?; - let src = this.memory().check_ptr_access(src, size, elem_align)?; + let src = this.memory.check_ptr_access(src, size, elem_align)?; let dest = this.read_scalar(args[1])?.not_undef()?; - let dest = this.memory().check_ptr_access(dest, size, elem_align)?; + let dest = this.memory.check_ptr_access(dest, size, elem_align)?; if let (Some(src), Some(dest)) = (src, dest) { - this.memory_mut().copy( + this.memory.copy( src, dest, size, @@ -190,10 +230,10 @@ fn call_intrinsic( } "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" | - "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => { + "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" | "roundf32" => { // FIXME: Using host floats. let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?); - let f = match intrinsic_name.get() { + let f = match intrinsic_name { "sinf32" => f.sin(), "fabsf32" => f.abs(), "cosf32" => f.cos(), @@ -206,16 +246,17 @@ fn call_intrinsic( "floorf32" => f.floor(), "ceilf32" => f.ceil(), "truncf32" => f.trunc(), + "roundf32" => f.round(), _ => bug!(), }; this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?; } "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" | - "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => { + "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" | "roundf64" => { // FIXME: Using host floats. let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?); - let f = match intrinsic_name.get() { + let f = match intrinsic_name { "sinf64" => f.sin(), "fabsf64" => f.abs(), "cosf64" => f.cos(), @@ -228,6 +269,7 @@ fn call_intrinsic( "floorf64" => f.floor(), "ceilf64" => f.ceil(), "truncf64" => f.trunc(), + "roundf64" => f.round(), _ => bug!(), }; this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?; @@ -236,7 +278,7 @@ fn call_intrinsic( "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => { let a = this.read_immediate(args[0])?; let b = this.read_immediate(args[1])?; - let op = match intrinsic_name.get() { + let op = match intrinsic_name { "fadd_fast" => mir::BinOp::Add, "fsub_fast" => mir::BinOp::Sub, "fmul_fast" => mir::BinOp::Mul, @@ -250,7 +292,7 @@ fn call_intrinsic( "minnumf32" | "maxnumf32" => { let a = this.read_scalar(args[0])?.to_f32()?; let b = this.read_scalar(args[1])?.to_f32()?; - let res = if intrinsic_name.get().starts_with("min") { + let res = if intrinsic_name.starts_with("min") { a.min(b) } else { a.max(b) @@ -261,7 +303,7 @@ fn call_intrinsic( "minnumf64" | "maxnumf64" => { let a = this.read_scalar(args[0])?.to_f64()?; let b = this.read_scalar(args[1])?.to_f64()?; - let res = if intrinsic_name.get().starts_with("min") { + let res = if intrinsic_name.starts_with("min") { a.min(b) } else { a.max(b) @@ -275,13 +317,13 @@ fn call_intrinsic( let a = this.read_immediate(args[0])?; let b = this.read_immediate(args[1])?; // check x % y != 0 - if this.binary_op(mir::BinOp::Rem, a, b)?.0.to_bits(dest.layout.size)? != 0 { + if this.overflowing_binary_op(mir::BinOp::Rem, a, b)?.0.to_bits(dest.layout.size)? != 0 { // Check if `b` is -1, which is the "min_value / -1" case. let minus1 = Scalar::from_int(-1, dest.layout.size); return Err(if b.to_scalar().unwrap() == minus1 { - err_ub!(Ub(format!("exact_div: result of dividing MIN by -1 cannot be represented"))) + err_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented") } else { - err_ub!(Ub(format!("exact_div: {:?} cannot be divided by {:?} without remainder", *a, *b))) + err_ub_format!("exact_div: {:?} cannot be divided by {:?} without remainder", *a, *b) }.into()); } this.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?; @@ -316,10 +358,8 @@ fn call_intrinsic( _ => { // Do it in memory let mplace = this.force_allocation(dest)?; - assert!(mplace.meta.is_none()); - // not a zst, must be valid pointer - let ptr = mplace.ptr.to_ptr()?; - this.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?; + mplace.meta.unwrap_none(); // must be sized + this.memory.write_bytes(mplace.ptr, iter::repeat(0u8).take(dest.layout.size.bytes() as usize))?; } } } @@ -335,8 +375,8 @@ fn call_intrinsic( } "move_val_init" => { - let ptr = this.deref_operand(args[0])?; - this.copy_op(args[1], ptr.into())?; + let place = this.deref_operand(args[0])?; + this.copy_op(args[1], place.into())?; } "offset" => { @@ -472,19 +512,51 @@ fn call_intrinsic( "unchecked_add" | "unchecked_sub" | "unchecked_mul" => { let l = this.read_immediate(args[0])?; let r = this.read_immediate(args[1])?; - let op = match intrinsic_name.get() { + let op = match intrinsic_name { "unchecked_add" => mir::BinOp::Add, "unchecked_sub" => mir::BinOp::Sub, "unchecked_mul" => mir::BinOp::Mul, _ => bug!(), }; - let (res, overflowed) = this.binary_op(op, l, r)?; + let (res, overflowed, _ty) = this.overflowing_binary_op(op, l, r)?; if overflowed { - throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name.get()); + throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name); } this.write_scalar(res, dest)?; } + "uninit" => { + // Check fast path: we don't want to force an allocation in case the destination is a simple value, + // but we also do not want to create a new allocation with 0s and then copy that over. + // FIXME: We do not properly validate in case of ZSTs and when doing it in memory! + // However, this only affects direct calls of the intrinsic; calls to the stable + // functions wrapping them do get their validation. + // FIXME: should we check alignment for ZSTs? + use crate::ScalarMaybeUndef; + if !dest.layout.is_zst() { + match dest.layout.abi { + layout::Abi::Scalar(..) => { + let x = ScalarMaybeUndef::Undef; + this.write_immediate(Immediate::Scalar(x), dest)?; + } + layout::Abi::ScalarPair(..) => { + let x = ScalarMaybeUndef::Undef; + this.write_immediate(Immediate::ScalarPair(x, x), dest)?; + } + _ => { + // Do it in memory + let mplace = this.force_allocation(dest)?; + mplace.meta.unwrap_none(); + let ptr = mplace.ptr.to_ptr()?; + // We know the return place is in-bounds + this.memory + .get_mut(ptr.alloc_id)? + .mark_definedness(ptr, dest.layout.size, false); + } + } + } + } + "write_bytes" => { let ty = substs.type_at(0); let ty_layout = this.layout_of(ty)?; @@ -492,16 +564,7 @@ fn call_intrinsic( let ptr = this.read_scalar(args[0])?.not_undef()?; let count = this.read_scalar(args[2])?.to_usize(this)?; let byte_count = ty_layout.size * count; - match this.memory().check_ptr_access(ptr, byte_count, ty_layout.align.abi)? { - Some(ptr) => { - this.memory_mut() - .get_mut(ptr.alloc_id)? - .write_repeat(tcx, ptr, val_byte, byte_count)?; - } - None => { - // Size is 0, nothing to do. - } - } + this.memory.write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?; } name => throw_unsup_format!("unimplemented intrinsic: {}", name),