1 //! Intrinsics and other functions that the miri engine executes without
2 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
5 use std::convert::TryFrom;
7 use rustc_hir::def_id::DefId;
8 use rustc_middle::mir::{
10 interpret::{uabs, ConstValue, GlobalId, InterpResult, Scalar},
14 use rustc_middle::ty::subst::SubstsRef;
15 use rustc_middle::ty::{Ty, TyCtxt};
16 use rustc_span::symbol::{sym, Symbol};
17 use rustc_target::abi::{Abi, LayoutOf as _, Primitive, Size};
20 util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
26 fn numeric_intrinsic<'tcx, Tag>(
30 ) -> InterpResult<'tcx, Scalar<Tag>> {
31 let size = match kind {
32 Primitive::Int(integer, _) => integer.size(),
33 _ => bug!("invalid `{}` argument: {:?}", name, bits),
35 let extra = 128 - u128::from(size.bits());
36 let bits_out = match name {
37 sym::ctpop => u128::from(bits.count_ones()),
38 sym::ctlz => u128::from(bits.leading_zeros()) - extra,
39 sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
40 sym::bswap => (bits << extra).swap_bytes(),
41 sym::bitreverse => (bits << extra).reverse_bits(),
42 _ => bug!("not a numeric intrinsic: {}", name),
44 Ok(Scalar::from_uint(bits_out, size))
47 /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
48 /// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
49 crate fn eval_nullary_intrinsic<'tcx>(
51 param_env: ty::ParamEnv<'tcx>,
53 substs: SubstsRef<'tcx>,
54 ) -> InterpResult<'tcx, ConstValue<'tcx>> {
55 let tp_ty = substs.type_at(0);
56 let name = tcx.item_name(def_id);
59 ensure_monomorphic_enough(tcx, tp_ty)?;
60 let alloc = type_name::alloc_type_name(tcx, tp_ty);
61 ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
63 sym::needs_drop => ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env)),
64 sym::size_of | sym::min_align_of | sym::pref_align_of => {
65 let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
67 sym::pref_align_of => layout.align.pref.bytes(),
68 sym::min_align_of => layout.align.abi.bytes(),
69 sym::size_of => layout.size.bytes(),
72 ConstValue::from_machine_usize(n, &tcx)
75 ensure_monomorphic_enough(tcx, tp_ty)?;
76 ConstValue::from_u64(tcx.type_id_hash(tp_ty))
78 sym::variant_count => match tp_ty.kind() {
79 ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx),
85 | ty::Infer(_) => throw_inval!(TooGeneric),
101 | ty::Generator(_, _, _)
102 | ty::GeneratorWitness(_)
105 | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
107 other => bug!("`{}` is not a zero arg intrinsic", other),
111 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
112 /// Returns `true` if emulation happened.
113 /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
114 /// intrinsic handling.
115 pub fn emulate_intrinsic(
117 instance: ty::Instance<'tcx>,
118 args: &[OpTy<'tcx, M::PointerTag>],
119 ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
120 ) -> InterpResult<'tcx, bool> {
121 let substs = instance.substs;
122 let intrinsic_name = self.tcx.item_name(instance.def_id());
124 // First handle intrinsics without return place.
125 let (dest, ret) = match ret {
126 None => match intrinsic_name {
127 sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
128 sym::unreachable => throw_ub!(Unreachable),
129 sym::abort => M::abort(self)?,
130 // Unsupported diverging intrinsic.
131 _ => return Ok(false),
136 // Keep the patterns in this match ordered the same as the list in
137 // `src/librustc_middle/ty/constness.rs`
138 match intrinsic_name {
139 sym::caller_location => {
140 let span = self.find_closest_untracked_caller_location();
141 let location = self.alloc_caller_location_for_span(span);
142 self.write_scalar(location.ptr, dest)?;
145 sym::min_align_of_val | sym::size_of_val => {
146 let place = self.deref_operand(args[0])?;
147 let (size, align) = self
148 .size_and_align_of(place.meta, place.layout)?
149 .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
151 let result = match intrinsic_name {
152 sym::min_align_of_val => align.bytes(),
153 sym::size_of_val => size.bytes(),
157 self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
166 | sym::variant_count => {
167 let gid = GlobalId { instance, promoted: None };
168 let ty = match intrinsic_name {
169 sym::min_align_of | sym::pref_align_of | sym::size_of | sym::variant_count => {
172 sym::needs_drop => self.tcx.types.bool,
173 sym::type_id => self.tcx.types.u64,
174 sym::type_name => self.tcx.mk_static_str(),
175 _ => bug!("already checked for nullary intrinsics"),
178 self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
179 let const_ = ty::Const { val: ty::ConstKind::Value(val), ty };
180 let val = self.const_to_op(&const_, None)?;
181 self.copy_op(val, dest)?;
190 | sym::bitreverse => {
191 let ty = substs.type_at(0);
192 let layout_of = self.layout_of(ty)?;
193 let val = self.read_scalar(args[0])?.check_init()?;
194 let bits = self.force_bits(val, layout_of.size)?;
195 let kind = match layout_of.abi {
196 Abi::Scalar(ref scalar) => scalar.value,
199 "{} called on invalid type {:?}",
204 let (nonzero, intrinsic_name) = match intrinsic_name {
205 sym::cttz_nonzero => (true, sym::cttz),
206 sym::ctlz_nonzero => (true, sym::ctlz),
207 other => (false, other),
209 if nonzero && bits == 0 {
210 throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
212 let out_val = numeric_intrinsic(intrinsic_name, bits, kind)?;
213 self.write_scalar(out_val, dest)?;
218 | sym::add_with_overflow
219 | sym::sub_with_overflow
220 | sym::mul_with_overflow => {
221 let lhs = self.read_immediate(args[0])?;
222 let rhs = self.read_immediate(args[1])?;
223 let (bin_op, ignore_overflow) = match intrinsic_name {
224 sym::wrapping_add => (BinOp::Add, true),
225 sym::wrapping_sub => (BinOp::Sub, true),
226 sym::wrapping_mul => (BinOp::Mul, true),
227 sym::add_with_overflow => (BinOp::Add, false),
228 sym::sub_with_overflow => (BinOp::Sub, false),
229 sym::mul_with_overflow => (BinOp::Mul, false),
230 _ => bug!("Already checked for int ops"),
233 self.binop_ignore_overflow(bin_op, lhs, rhs, dest)?;
235 self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
238 sym::saturating_add | sym::saturating_sub => {
239 let l = self.read_immediate(args[0])?;
240 let r = self.read_immediate(args[1])?;
241 let is_add = intrinsic_name == sym::saturating_add;
242 let (val, overflowed, _ty) =
243 self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, l, r)?;
244 let val = if overflowed {
245 let num_bits = l.layout.size.bits();
246 if l.layout.abi.is_signed() {
247 // For signed ints the saturated value depends on the sign of the first
248 // term since the sign of the second term can be inferred from this and
249 // the fact that the operation has overflowed (if either is 0 no
250 // overflow can occur)
251 let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?;
252 let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
253 if first_term_positive {
254 // Negative overflow not possible since the positive first term
255 // can only increase an (in range) negative term for addition
256 // or corresponding negated positive term for subtraction
258 (1u128 << (num_bits - 1)) - 1, // max positive
259 Size::from_bits(num_bits),
262 // Positive overflow not possible for similar reason
264 Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
271 u128::MAX >> (128 - num_bits),
272 Size::from_bits(num_bits),
276 Scalar::from_uint(0u128, Size::from_bits(num_bits))
282 self.write_scalar(val, dest)?;
284 sym::discriminant_value => {
285 let place = self.deref_operand(args[0])?;
286 let discr_val = self.read_discriminant(place.into())?.0;
287 self.write_scalar(discr_val, dest)?;
295 | sym::unchecked_rem => {
296 let l = self.read_immediate(args[0])?;
297 let r = self.read_immediate(args[1])?;
298 let bin_op = match intrinsic_name {
299 sym::unchecked_shl => BinOp::Shl,
300 sym::unchecked_shr => BinOp::Shr,
301 sym::unchecked_add => BinOp::Add,
302 sym::unchecked_sub => BinOp::Sub,
303 sym::unchecked_mul => BinOp::Mul,
304 sym::unchecked_div => BinOp::Div,
305 sym::unchecked_rem => BinOp::Rem,
306 _ => bug!("Already checked for int ops"),
308 let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?;
310 let layout = self.layout_of(substs.type_at(0))?;
311 let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
312 if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
313 throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
315 throw_ub_format!("overflow executing `{}`", intrinsic_name);
318 self.write_scalar(val, dest)?;
320 sym::rotate_left | sym::rotate_right => {
321 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
322 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
323 let layout = self.layout_of(substs.type_at(0))?;
324 let val = self.read_scalar(args[0])?.check_init()?;
325 let val_bits = self.force_bits(val, layout.size)?;
326 let raw_shift = self.read_scalar(args[1])?.check_init()?;
327 let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
328 let width_bits = u128::from(layout.size.bits());
329 let shift_bits = raw_shift_bits % width_bits;
330 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
331 let result_bits = if intrinsic_name == sym::rotate_left {
332 (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
334 (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
336 let truncated_bits = self.truncate(result_bits, layout);
337 let result = Scalar::from_uint(truncated_bits, layout.size);
338 self.write_scalar(result, dest)?;
341 let ptr = self.read_scalar(args[0])?.check_init()?;
342 let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
343 let pointee_ty = substs.type_at(0);
345 let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
346 self.write_scalar(offset_ptr, dest)?;
348 sym::arith_offset => {
349 let ptr = self.read_scalar(args[0])?.check_init()?;
350 let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
351 let pointee_ty = substs.type_at(0);
353 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
354 let offset_bytes = offset_count.wrapping_mul(pointee_size);
355 let offset_ptr = ptr.ptr_wrapping_signed_offset(offset_bytes, self);
356 self.write_scalar(offset_ptr, dest)?;
358 sym::ptr_offset_from => {
359 let a = self.read_immediate(args[0])?.to_scalar()?;
360 let b = self.read_immediate(args[1])?.to_scalar()?;
362 // Special case: if both scalars are *equal integers*
363 // and not NULL, we pretend there is an allocation of size 0 right there,
364 // and their offset is 0. (There's never a valid object at NULL, making it an
365 // exception from the exception.)
366 // This is the dual to the special exception for offset-by-0
367 // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
369 // Control flow is weird because we cannot early-return (to reach the
370 // `go_to_block` at the end).
371 let done = if a.is_bits() && b.is_bits() {
372 let a = a.to_machine_usize(self)?;
373 let b = b.to_machine_usize(self)?;
374 if a == b && a != 0 {
375 self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
385 // General case: we need two pointers.
386 let a = self.force_ptr(a)?;
387 let b = self.force_ptr(b)?;
388 if a.alloc_id != b.alloc_id {
390 "ptr_offset_from cannot compute offset of pointers into different \
394 let usize_layout = self.layout_of(self.tcx.types.usize)?;
395 let isize_layout = self.layout_of(self.tcx.types.isize)?;
396 let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
397 let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
398 let (val, _overflowed, _ty) =
399 self.overflowing_binary_op(BinOp::Sub, a_offset, b_offset)?;
400 let pointee_layout = self.layout_of(substs.type_at(0))?;
401 let val = ImmTy::from_scalar(val, isize_layout);
402 let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
403 self.exact_div(val, size, dest)?;
408 self.copy_op_transmute(args[0], dest)?;
410 sym::simd_insert => {
411 let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
414 let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
417 "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
423 input.layout, dest.layout,
424 "Return type `{}` must match vector type `{}`",
425 dest.layout.ty, input.layout.ty
428 elem.layout.ty, e_ty,
429 "Scalar element type `{}` must match vector element type `{}`",
434 let place = self.place_index(dest, i)?;
435 let value = if i == index { elem } else { self.operand_index(input, i)? };
436 self.copy_op(value, place)?;
439 sym::simd_extract => {
440 let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
441 let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
444 "index `{}` is out-of-bounds of vector type `{}` with length `{}`",
450 e_ty, dest.layout.ty,
451 "Return type `{}` must match vector element type `{}`",
454 self.copy_op(self.operand_index(args[0], index)?, dest)?;
456 sym::likely | sym::unlikely => {
457 // These just return their argument
458 self.copy_op(args[0], dest)?;
461 let cond = self.read_scalar(args[0])?.check_init()?.to_bool()?;
463 throw_ub_format!("`assume` intrinsic called with `false`");
466 _ => return Ok(false),
469 trace!("{:?}", self.dump_place(*dest));
470 self.go_to_block(ret);
476 a: ImmTy<'tcx, M::PointerTag>,
477 b: ImmTy<'tcx, M::PointerTag>,
478 dest: PlaceTy<'tcx, M::PointerTag>,
479 ) -> InterpResult<'tcx> {
480 // Performs an exact division, resulting in undefined behavior where
481 // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
482 // First, check x % y != 0 (or if that computation overflows).
483 let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
484 if overflow || res.assert_bits(a.layout.size) != 0 {
485 // Then, check if `b` is -1, which is the "MIN / -1" case.
486 let minus1 = Scalar::from_int(-1, dest.layout.size);
487 let b_scalar = b.to_scalar().unwrap();
488 if b_scalar == minus1 {
489 throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
491 throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
494 // `Rem` says this is all right, so we can let `Div` do its job.
495 self.binop_ignore_overflow(BinOp::Div, a, b, dest)
498 /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
499 /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
500 /// 0, so offset-by-0 (and only 0) is okay -- except that NULL cannot be offset by _any_ value.
501 pub fn ptr_offset_inbounds(
503 ptr: Scalar<M::PointerTag>,
504 pointee_ty: Ty<'tcx>,
506 ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
507 // We cannot overflow i64 as a type's size must be <= isize::MAX.
508 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
509 // The computed offset, in bytes, cannot overflow an isize.
511 offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
512 // The offset being in bounds cannot rely on "wrapping around" the address space.
513 // So, first rule out overflows in the pointer arithmetic.
514 let offset_ptr = ptr.ptr_signed_offset(offset_bytes, self)?;
515 // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
516 // memory between these pointers must be accessible. Note that we do not require the
517 // pointers to be properly aligned (unlike a read/write operation).
518 let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
519 let size: u64 = uabs(offset_bytes);
520 // This call handles checking for integer/NULL pointers.
521 self.memory.check_ptr_access_align(
523 Size::from_bytes(size),
525 CheckInAllocMsg::InboundsTest,