1 //! Intrinsics and other functions that the miri engine executes without
2 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
5 use std::convert::TryFrom;
7 use rustc_hir::def_id::DefId;
8 use rustc_middle::mir::{
10 interpret::{uabs, ConstValue, GlobalId, InterpResult, Scalar},
14 use rustc_middle::ty::subst::SubstsRef;
15 use rustc_middle::ty::{Ty, TyCtxt, TypeFoldable};
16 use rustc_span::symbol::{sym, Symbol};
17 use rustc_target::abi::{Abi, LayoutOf as _, Primitive, Size};
19 use super::{CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy};
24 fn numeric_intrinsic<'tcx, Tag>(
28 ) -> InterpResult<'tcx, Scalar<Tag>> {
29 let size = match kind {
30 Primitive::Int(integer, _) => integer.size(),
31 _ => bug!("invalid `{}` argument: {:?}", name, bits),
33 let extra = 128 - u128::from(size.bits());
34 let bits_out = match name {
35 sym::ctpop => u128::from(bits.count_ones()),
36 sym::ctlz => u128::from(bits.leading_zeros()) - extra,
37 sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
38 sym::bswap => (bits << extra).swap_bytes(),
39 sym::bitreverse => (bits << extra).reverse_bits(),
40 _ => bug!("not a numeric intrinsic: {}", name),
42 Ok(Scalar::from_uint(bits_out, size))
45 /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
46 /// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
47 crate fn eval_nullary_intrinsic<'tcx>(
49 param_env: ty::ParamEnv<'tcx>,
51 substs: SubstsRef<'tcx>,
52 ) -> InterpResult<'tcx, ConstValue<'tcx>> {
53 let tp_ty = substs.type_at(0);
54 let name = tcx.item_name(def_id);
57 if tp_ty.needs_subst() {
58 throw_inval!(TooGeneric);
60 let alloc = type_name::alloc_type_name(tcx, tp_ty);
61 ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
63 sym::needs_drop => ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env)),
64 sym::size_of | sym::min_align_of | sym::pref_align_of => {
65 let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
67 sym::pref_align_of => layout.align.pref.bytes(),
68 sym::min_align_of => layout.align.abi.bytes(),
69 sym::size_of => layout.size.bytes(),
72 ConstValue::from_machine_usize(n, &tcx)
75 if tp_ty.needs_subst() {
76 throw_inval!(TooGeneric);
78 ConstValue::from_u64(tcx.type_id_hash(tp_ty))
80 sym::variant_count => {
81 if let ty::Adt(ref adt, _) = tp_ty.kind {
82 ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx)
84 ConstValue::from_machine_usize(0u64, &tcx)
87 other => bug!("`{}` is not a zero arg intrinsic", other),
91 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
92 /// Returns `true` if emulation happened.
93 pub fn emulate_intrinsic(
95 instance: ty::Instance<'tcx>,
96 args: &[OpTy<'tcx, M::PointerTag>],
97 ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
98 ) -> InterpResult<'tcx, bool> {
99 let substs = instance.substs;
100 let intrinsic_name = self.tcx.item_name(instance.def_id());
102 // First handle intrinsics without return place.
103 let (dest, ret) = match ret {
104 None => match intrinsic_name {
105 sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
106 sym::unreachable => throw_ub!(Unreachable),
107 sym::abort => M::abort(self)?,
108 // Unsupported diverging intrinsic.
109 _ => return Ok(false),
114 // Keep the patterns in this match ordered the same as the list in
115 // `src/librustc_middle/ty/constness.rs`
116 match intrinsic_name {
117 sym::caller_location => {
118 let span = self.find_closest_untracked_caller_location();
119 let location = self.alloc_caller_location_for_span(span);
120 self.write_scalar(location.ptr, dest)?;
129 | sym::variant_count => {
130 let gid = GlobalId { instance, promoted: None };
131 let ty = match intrinsic_name {
132 sym::min_align_of | sym::pref_align_of | sym::size_of | sym::variant_count => {
135 sym::needs_drop => self.tcx.types.bool,
136 sym::type_id => self.tcx.types.u64,
137 sym::type_name => self.tcx.mk_static_str(),
138 _ => bug!("already checked for nullary intrinsics"),
140 let val = self.const_eval(gid, ty)?;
141 self.copy_op(val, dest)?;
150 | sym::bitreverse => {
151 let ty = substs.type_at(0);
152 let layout_of = self.layout_of(ty)?;
153 let val = self.read_scalar(args[0])?.not_undef()?;
154 let bits = self.force_bits(val, layout_of.size)?;
155 let kind = match layout_of.abi {
156 Abi::Scalar(ref scalar) => scalar.value,
159 "{} called on invalid type {:?}",
164 let (nonzero, intrinsic_name) = match intrinsic_name {
165 sym::cttz_nonzero => (true, sym::cttz),
166 sym::ctlz_nonzero => (true, sym::ctlz),
167 other => (false, other),
169 if nonzero && bits == 0 {
170 throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
172 let out_val = numeric_intrinsic(intrinsic_name, bits, kind)?;
173 self.write_scalar(out_val, dest)?;
178 | sym::add_with_overflow
179 | sym::sub_with_overflow
180 | sym::mul_with_overflow => {
181 let lhs = self.read_immediate(args[0])?;
182 let rhs = self.read_immediate(args[1])?;
183 let (bin_op, ignore_overflow) = match intrinsic_name {
184 sym::wrapping_add => (BinOp::Add, true),
185 sym::wrapping_sub => (BinOp::Sub, true),
186 sym::wrapping_mul => (BinOp::Mul, true),
187 sym::add_with_overflow => (BinOp::Add, false),
188 sym::sub_with_overflow => (BinOp::Sub, false),
189 sym::mul_with_overflow => (BinOp::Mul, false),
190 _ => bug!("Already checked for int ops"),
193 self.binop_ignore_overflow(bin_op, lhs, rhs, dest)?;
195 self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
198 sym::saturating_add | sym::saturating_sub => {
199 let l = self.read_immediate(args[0])?;
200 let r = self.read_immediate(args[1])?;
201 let is_add = intrinsic_name == sym::saturating_add;
202 let (val, overflowed, _ty) =
203 self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, l, r)?;
204 let val = if overflowed {
205 let num_bits = l.layout.size.bits();
206 if l.layout.abi.is_signed() {
207 // For signed ints the saturated value depends on the sign of the first
208 // term since the sign of the second term can be inferred from this and
209 // the fact that the operation has overflowed (if either is 0 no
210 // overflow can occur)
211 let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?;
212 let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
213 if first_term_positive {
214 // Negative overflow not possible since the positive first term
215 // can only increase an (in range) negative term for addition
216 // or corresponding negated positive term for subtraction
218 (1u128 << (num_bits - 1)) - 1, // max positive
219 Size::from_bits(num_bits),
222 // Positive overflow not possible for similar reason
224 Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
231 u128::MAX >> (128 - num_bits),
232 Size::from_bits(num_bits),
236 Scalar::from_uint(0u128, Size::from_bits(num_bits))
242 self.write_scalar(val, dest)?;
244 sym::discriminant_value => {
245 let place = self.deref_operand(args[0])?;
246 let discr_val = self.read_discriminant(place.into())?.0;
247 self.write_scalar(discr_val, dest)?;
255 | sym::unchecked_rem => {
256 let l = self.read_immediate(args[0])?;
257 let r = self.read_immediate(args[1])?;
258 let bin_op = match intrinsic_name {
259 sym::unchecked_shl => BinOp::Shl,
260 sym::unchecked_shr => BinOp::Shr,
261 sym::unchecked_add => BinOp::Add,
262 sym::unchecked_sub => BinOp::Sub,
263 sym::unchecked_mul => BinOp::Mul,
264 sym::unchecked_div => BinOp::Div,
265 sym::unchecked_rem => BinOp::Rem,
266 _ => bug!("Already checked for int ops"),
268 let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?;
270 let layout = self.layout_of(substs.type_at(0))?;
271 let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
272 if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
273 throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
275 throw_ub_format!("overflow executing `{}`", intrinsic_name);
278 self.write_scalar(val, dest)?;
280 sym::rotate_left | sym::rotate_right => {
281 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
282 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
283 let layout = self.layout_of(substs.type_at(0))?;
284 let val = self.read_scalar(args[0])?.not_undef()?;
285 let val_bits = self.force_bits(val, layout.size)?;
286 let raw_shift = self.read_scalar(args[1])?.not_undef()?;
287 let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
288 let width_bits = u128::from(layout.size.bits());
289 let shift_bits = raw_shift_bits % width_bits;
290 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
291 let result_bits = if intrinsic_name == sym::rotate_left {
292 (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
294 (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
296 let truncated_bits = self.truncate(result_bits, layout);
297 let result = Scalar::from_uint(truncated_bits, layout.size);
298 self.write_scalar(result, dest)?;
301 let ptr = self.read_scalar(args[0])?.not_undef()?;
302 let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
303 let pointee_ty = substs.type_at(0);
305 let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
306 self.write_scalar(offset_ptr, dest)?;
308 sym::arith_offset => {
309 let ptr = self.read_scalar(args[0])?.not_undef()?;
310 let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
311 let pointee_ty = substs.type_at(0);
313 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
314 let offset_bytes = offset_count.wrapping_mul(pointee_size);
315 let offset_ptr = ptr.ptr_wrapping_signed_offset(offset_bytes, self);
316 self.write_scalar(offset_ptr, dest)?;
318 sym::ptr_guaranteed_eq | sym::ptr_guaranteed_ne => {
319 // FIXME: return `true` for at least some comparisons where we can reliably
320 // determine the result of runtime (in)equality tests at compile-time.
321 self.write_scalar(Scalar::from_bool(false), dest)?;
323 sym::ptr_offset_from => {
324 let a = self.read_immediate(args[0])?.to_scalar()?;
325 let b = self.read_immediate(args[1])?.to_scalar()?;
327 // Special case: if both scalars are *equal integers*
328 // and not NULL, we pretend there is an allocation of size 0 right there,
329 // and their offset is 0. (There's never a valid object at NULL, making it an
330 // exception from the exception.)
331 // This is the dual to the special exception for offset-by-0
332 // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
334 // Control flow is weird because we cannot early-return (to reach the
335 // `go_to_block` at the end).
336 let done = if a.is_bits() && b.is_bits() {
337 let a = a.to_machine_usize(self)?;
338 let b = b.to_machine_usize(self)?;
339 if a == b && a != 0 {
340 self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
350 // General case: we need two pointers.
351 let a = self.force_ptr(a)?;
352 let b = self.force_ptr(b)?;
353 if a.alloc_id != b.alloc_id {
355 "ptr_offset_from cannot compute offset of pointers into different \
359 let usize_layout = self.layout_of(self.tcx.types.usize)?;
360 let isize_layout = self.layout_of(self.tcx.types.isize)?;
361 let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
362 let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
363 let (val, _overflowed, _ty) =
364 self.overflowing_binary_op(BinOp::Sub, a_offset, b_offset)?;
365 let pointee_layout = self.layout_of(substs.type_at(0))?;
366 let val = ImmTy::from_scalar(val, isize_layout);
367 let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
368 self.exact_div(val, size, dest)?;
373 self.copy_op_transmute(args[0], dest)?;
375 sym::simd_insert => {
376 let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
379 let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
382 "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
388 input.layout, dest.layout,
389 "Return type `{}` must match vector type `{}`",
390 dest.layout.ty, input.layout.ty
393 elem.layout.ty, e_ty,
394 "Scalar element type `{}` must match vector element type `{}`",
399 let place = self.place_index(dest, i)?;
400 let value = if i == index { elem } else { self.operand_index(input, i)? };
401 self.copy_op(value, place)?;
404 sym::simd_extract => {
405 let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
406 let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
409 "index `{}` is out-of-bounds of vector type `{}` with length `{}`",
415 e_ty, dest.layout.ty,
416 "Return type `{}` must match vector element type `{}`",
419 self.copy_op(self.operand_index(args[0], index)?, dest)?;
421 sym::likely | sym::unlikely => {
422 // These just return their argument
423 self.copy_op(args[0], dest)?;
425 // FIXME(#73156): Handle source code coverage in const eval
426 sym::count_code_region
427 | sym::coverage_counter_add
428 | sym::coverage_counter_subtract
429 | sym::coverage_unreachable => (),
430 _ => return Ok(false),
433 self.dump_place(*dest);
434 self.go_to_block(ret);
440 a: ImmTy<'tcx, M::PointerTag>,
441 b: ImmTy<'tcx, M::PointerTag>,
442 dest: PlaceTy<'tcx, M::PointerTag>,
443 ) -> InterpResult<'tcx> {
444 // Performs an exact division, resulting in undefined behavior where
445 // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
446 // First, check x % y != 0 (or if that computation overflows).
447 let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
448 if overflow || res.assert_bits(a.layout.size) != 0 {
449 // Then, check if `b` is -1, which is the "MIN / -1" case.
450 let minus1 = Scalar::from_int(-1, dest.layout.size);
451 let b_scalar = b.to_scalar().unwrap();
452 if b_scalar == minus1 {
453 throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
455 throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
458 // `Rem` says this is all right, so we can let `Div` do its job.
459 self.binop_ignore_overflow(BinOp::Div, a, b, dest)
462 /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
463 /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
464 /// 0, so offset-by-0 (and only 0) is okay -- except that NULL cannot be offset by _any_ value.
465 pub fn ptr_offset_inbounds(
467 ptr: Scalar<M::PointerTag>,
468 pointee_ty: Ty<'tcx>,
470 ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
471 // We cannot overflow i64 as a type's size must be <= isize::MAX.
472 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
473 // The computed offset, in bytes, cannot overflow an isize.
475 offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
476 // The offset being in bounds cannot rely on "wrapping around" the address space.
477 // So, first rule out overflows in the pointer arithmetic.
478 let offset_ptr = ptr.ptr_signed_offset(offset_bytes, self)?;
479 // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
480 // memory between these pointers must be accessible. Note that we do not require the
481 // pointers to be properly aligned (unlike a read/write operation).
482 let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
483 let size: u64 = uabs(offset_bytes);
484 // This call handles checking for integer/NULL pointers.
485 self.memory.check_ptr_access_align(
487 Size::from_bytes(size),
489 CheckInAllocMsg::InboundsTest,