1 //! Intrinsics and other functions that the miri engine executes without
2 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
5 use std::convert::TryFrom;
7 use rustc_hir::def_id::DefId;
8 use rustc_middle::mir::{
10 interpret::{ConstValue, GlobalId, InterpResult, Scalar},
14 use rustc_middle::ty::layout::LayoutOf as _;
15 use rustc_middle::ty::subst::SubstsRef;
16 use rustc_middle::ty::{Ty, TyCtxt};
17 use rustc_span::symbol::{sym, Symbol};
18 use rustc_target::abi::{Abi, Align, Primitive, Size};
21 util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
28 fn numeric_intrinsic<Tag>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Tag> {
29 let size = match kind {
30 Primitive::Int(integer, _) => integer.size(),
31 _ => bug!("invalid `{}` argument: {:?}", name, bits),
33 let extra = 128 - u128::from(size.bits());
34 let bits_out = match name {
35 sym::ctpop => u128::from(bits.count_ones()),
36 sym::ctlz => u128::from(bits.leading_zeros()) - extra,
37 sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
38 sym::bswap => (bits << extra).swap_bytes(),
39 sym::bitreverse => (bits << extra).reverse_bits(),
40 _ => bug!("not a numeric intrinsic: {}", name),
42 Scalar::from_uint(bits_out, size)
45 /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
46 /// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
47 crate fn eval_nullary_intrinsic<'tcx>(
49 param_env: ty::ParamEnv<'tcx>,
51 substs: SubstsRef<'tcx>,
52 ) -> InterpResult<'tcx, ConstValue<'tcx>> {
53 let tp_ty = substs.type_at(0);
54 let name = tcx.item_name(def_id);
57 ensure_monomorphic_enough(tcx, tp_ty)?;
58 let alloc = type_name::alloc_type_name(tcx, tp_ty);
59 ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
62 ensure_monomorphic_enough(tcx, tp_ty)?;
63 ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env))
65 sym::pref_align_of => {
66 // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
67 let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
68 ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx)
71 ensure_monomorphic_enough(tcx, tp_ty)?;
72 ConstValue::from_u64(tcx.type_id_hash(tp_ty))
74 sym::variant_count => match tp_ty.kind() {
75 // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
76 ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx),
82 | ty::Infer(_) => throw_inval!(TooGeneric),
98 | ty::Generator(_, _, _)
99 | ty::GeneratorWitness(_)
102 | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
104 other => bug!("`{}` is not a zero arg intrinsic", other),
108 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
109 /// Returns `true` if emulation happened.
110 /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
111 /// intrinsic handling.
112 pub fn emulate_intrinsic(
114 instance: ty::Instance<'tcx>,
115 args: &[OpTy<'tcx, M::PointerTag>],
116 ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
117 ) -> InterpResult<'tcx, bool> {
118 let substs = instance.substs;
119 let intrinsic_name = self.tcx.item_name(instance.def_id());
121 // First handle intrinsics without return place.
122 let (dest, ret) = match ret {
123 None => match intrinsic_name {
124 sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
125 sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
126 // Unsupported diverging intrinsic.
127 _ => return Ok(false),
132 // Keep the patterns in this match ordered the same as the list in
133 // `src/librustc_middle/ty/constness.rs`
134 match intrinsic_name {
135 sym::caller_location => {
136 let span = self.find_closest_untracked_caller_location();
137 let location = self.alloc_caller_location_for_span(span);
138 self.write_immediate(location.to_ref(self), dest)?;
141 sym::min_align_of_val | sym::size_of_val => {
142 // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
144 let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
145 let (size, align) = self
146 .size_and_align_of_mplace(&place)?
147 .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
149 let result = match intrinsic_name {
150 sym::min_align_of_val => align.bytes(),
151 sym::size_of_val => size.bytes(),
155 self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
162 | sym::variant_count => {
163 let gid = GlobalId { instance, promoted: None };
164 let ty = match intrinsic_name {
165 sym::pref_align_of | sym::variant_count => self.tcx.types.usize,
166 sym::needs_drop => self.tcx.types.bool,
167 sym::type_id => self.tcx.types.u64,
168 sym::type_name => self.tcx.mk_static_str(),
169 _ => bug!("already checked for nullary intrinsics"),
172 self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
173 let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
174 self.copy_op(&val, dest)?;
183 | sym::bitreverse => {
184 let ty = substs.type_at(0);
185 let layout_of = self.layout_of(ty)?;
186 let val = self.read_scalar(&args[0])?.check_init()?;
187 let bits = val.to_bits(layout_of.size)?;
188 let kind = match layout_of.abi {
189 Abi::Scalar(scalar) => scalar.value,
192 "{} called on invalid type {:?}",
197 let (nonzero, intrinsic_name) = match intrinsic_name {
198 sym::cttz_nonzero => (true, sym::cttz),
199 sym::ctlz_nonzero => (true, sym::ctlz),
200 other => (false, other),
202 if nonzero && bits == 0 {
203 throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
205 let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
206 self.write_scalar(out_val, dest)?;
208 sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
209 let lhs = self.read_immediate(&args[0])?;
210 let rhs = self.read_immediate(&args[1])?;
211 let bin_op = match intrinsic_name {
212 sym::add_with_overflow => BinOp::Add,
213 sym::sub_with_overflow => BinOp::Sub,
214 sym::mul_with_overflow => BinOp::Mul,
215 _ => bug!("Already checked for int ops"),
217 self.binop_with_overflow(bin_op, &lhs, &rhs, dest)?;
219 sym::saturating_add | sym::saturating_sub => {
220 let l = self.read_immediate(&args[0])?;
221 let r = self.read_immediate(&args[1])?;
222 let is_add = intrinsic_name == sym::saturating_add;
223 let (val, overflowed, _ty) = self.overflowing_binary_op(
224 if is_add { BinOp::Add } else { BinOp::Sub },
228 let val = if overflowed {
229 let size = l.layout.size;
230 let num_bits = size.bits();
231 if l.layout.abi.is_signed() {
232 // For signed ints the saturated value depends on the sign of the first
233 // term since the sign of the second term can be inferred from this and
234 // the fact that the operation has overflowed (if either is 0 no
235 // overflow can occur)
236 let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
237 let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
238 if first_term_positive {
239 // Negative overflow not possible since the positive first term
240 // can only increase an (in range) negative term for addition
241 // or corresponding negated positive term for subtraction
243 (1u128 << (num_bits - 1)) - 1, // max positive
244 Size::from_bits(num_bits),
247 // Positive overflow not possible for similar reason
249 Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
255 Scalar::from_uint(size.unsigned_int_max(), Size::from_bits(num_bits))
258 Scalar::from_uint(0u128, Size::from_bits(num_bits))
264 self.write_scalar(val, dest)?;
266 sym::discriminant_value => {
267 let place = self.deref_operand(&args[0])?;
268 let discr_val = self.read_discriminant(&place.into())?.0;
269 self.write_scalar(discr_val, dest)?;
277 | sym::unchecked_rem => {
278 let l = self.read_immediate(&args[0])?;
279 let r = self.read_immediate(&args[1])?;
280 let bin_op = match intrinsic_name {
281 sym::unchecked_shl => BinOp::Shl,
282 sym::unchecked_shr => BinOp::Shr,
283 sym::unchecked_add => BinOp::Add,
284 sym::unchecked_sub => BinOp::Sub,
285 sym::unchecked_mul => BinOp::Mul,
286 sym::unchecked_div => BinOp::Div,
287 sym::unchecked_rem => BinOp::Rem,
288 _ => bug!("Already checked for int ops"),
290 let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
292 let layout = self.layout_of(substs.type_at(0))?;
293 let r_val = r.to_scalar()?.to_bits(layout.size)?;
294 if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
295 throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
297 throw_ub_format!("overflow executing `{}`", intrinsic_name);
300 self.write_scalar(val, dest)?;
302 sym::rotate_left | sym::rotate_right => {
303 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
304 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
305 let layout = self.layout_of(substs.type_at(0))?;
306 let val = self.read_scalar(&args[0])?.check_init()?;
307 let val_bits = val.to_bits(layout.size)?;
308 let raw_shift = self.read_scalar(&args[1])?.check_init()?;
309 let raw_shift_bits = raw_shift.to_bits(layout.size)?;
310 let width_bits = u128::from(layout.size.bits());
311 let shift_bits = raw_shift_bits % width_bits;
312 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
313 let result_bits = if intrinsic_name == sym::rotate_left {
314 (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
316 (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
318 let truncated_bits = self.truncate(result_bits, layout);
319 let result = Scalar::from_uint(truncated_bits, layout.size);
320 self.write_scalar(result, dest)?;
323 self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
326 let ptr = self.read_pointer(&args[0])?;
327 let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
328 let pointee_ty = substs.type_at(0);
330 let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
331 self.write_pointer(offset_ptr, dest)?;
333 sym::arith_offset => {
334 let ptr = self.read_pointer(&args[0])?;
335 let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
336 let pointee_ty = substs.type_at(0);
338 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
339 let offset_bytes = offset_count.wrapping_mul(pointee_size);
340 let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
341 self.write_pointer(offset_ptr, dest)?;
343 sym::ptr_offset_from => {
344 let a = self.read_immediate(&args[0])?.to_scalar()?;
345 let b = self.read_immediate(&args[1])?.to_scalar()?;
347 // Special case: if both scalars are *equal integers*
348 // and not null, we pretend there is an allocation of size 0 right there,
349 // and their offset is 0. (There's never a valid object at null, making it an
350 // exception from the exception.)
351 // This is the dual to the special exception for offset-by-0
352 // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
354 // Control flow is weird because we cannot early-return (to reach the
355 // `go_to_block` at the end).
356 let done = if let (Ok(a), Ok(b)) = (a.try_to_int(), b.try_to_int()) {
357 let a = a.try_to_machine_usize(*self.tcx).unwrap();
358 let b = b.try_to_machine_usize(*self.tcx).unwrap();
359 if a == b && a != 0 {
360 self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
370 // General case: we need two pointers.
371 let a = self.scalar_to_ptr(a);
372 let b = self.scalar_to_ptr(b);
373 let (a_alloc_id, a_offset, _) = self.memory.ptr_get_alloc(a)?;
374 let (b_alloc_id, b_offset, _) = self.memory.ptr_get_alloc(b)?;
375 if a_alloc_id != b_alloc_id {
377 "ptr_offset_from cannot compute offset of pointers into different \
381 let usize_layout = self.layout_of(self.tcx.types.usize)?;
382 let isize_layout = self.layout_of(self.tcx.types.isize)?;
383 let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
384 let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
385 let (val, _overflowed, _ty) =
386 self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
387 let pointee_layout = self.layout_of(substs.type_at(0))?;
388 let val = ImmTy::from_scalar(val, isize_layout);
389 let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
390 self.exact_div(&val, &size, dest)?;
395 self.copy_op_transmute(&args[0], dest)?;
397 sym::assert_inhabited => {
398 let ty = instance.substs.type_at(0);
399 let layout = self.layout_of(ty)?;
401 if layout.abi.is_uninhabited() {
402 // The run-time intrinsic panics just to get a good backtrace; here we abort
403 // since there is no problem showing a backtrace even for aborts.
407 "aborted execution: attempted to instantiate uninhabited type `{}`",
413 sym::simd_insert => {
414 let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
416 let input = &args[0];
417 let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
420 "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
426 input.layout, dest.layout,
427 "Return type `{}` must match vector type `{}`",
428 dest.layout.ty, input.layout.ty
431 elem.layout.ty, e_ty,
432 "Scalar element type `{}` must match vector element type `{}`",
437 let place = self.place_index(dest, i)?;
438 let value = if i == index { *elem } else { self.operand_index(input, i)? };
439 self.copy_op(&value, &place)?;
442 sym::simd_extract => {
443 let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
444 let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
447 "index `{}` is out-of-bounds of vector type `{}` with length `{}`",
453 e_ty, dest.layout.ty,
454 "Return type `{}` must match vector element type `{}`",
457 self.copy_op(&self.operand_index(&args[0], index)?, dest)?;
459 sym::likely | sym::unlikely | sym::black_box => {
460 // These just return their argument
461 self.copy_op(&args[0], dest)?;
464 let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
466 throw_ub_format!("`assume` intrinsic called with `false`");
470 let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
471 self.write_scalar(result, dest)?;
473 _ => return Ok(false),
476 trace!("{:?}", self.dump_place(**dest));
477 self.go_to_block(ret);
483 a: &ImmTy<'tcx, M::PointerTag>,
484 b: &ImmTy<'tcx, M::PointerTag>,
485 dest: &PlaceTy<'tcx, M::PointerTag>,
486 ) -> InterpResult<'tcx> {
487 // Performs an exact division, resulting in undefined behavior where
488 // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
489 // First, check x % y != 0 (or if that computation overflows).
490 let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
491 if overflow || res.assert_bits(a.layout.size) != 0 {
492 // Then, check if `b` is -1, which is the "MIN / -1" case.
493 let minus1 = Scalar::from_int(-1, dest.layout.size);
494 let b_scalar = b.to_scalar().unwrap();
495 if b_scalar == minus1 {
496 throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
498 throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
501 // `Rem` says this is all right, so we can let `Div` do its job.
502 self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
505 /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
506 /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
507 /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
508 pub fn ptr_offset_inbounds(
510 ptr: Pointer<Option<M::PointerTag>>,
511 pointee_ty: Ty<'tcx>,
513 ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
514 // We cannot overflow i64 as a type's size must be <= isize::MAX.
515 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
516 // The computed offset, in bytes, cannot overflow an isize.
518 offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
519 // The offset being in bounds cannot rely on "wrapping around" the address space.
520 // So, first rule out overflows in the pointer arithmetic.
521 let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
522 // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
523 // memory between these pointers must be accessible. Note that we do not require the
524 // pointers to be properly aligned (unlike a read/write operation).
525 let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
526 let size = offset_bytes.unsigned_abs();
527 // This call handles checking for integer/null pointers.
528 self.memory.check_ptr_access_align(
530 Size::from_bytes(size),
532 CheckInAllocMsg::PointerArithmeticTest,
537 /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
538 pub(crate) fn copy_intrinsic(
540 src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
541 dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
542 count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
543 nonoverlapping: bool,
544 ) -> InterpResult<'tcx> {
545 let count = self.read_scalar(&count)?.to_machine_usize(self)?;
546 let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
547 let (size, align) = (layout.size, layout.align.abi);
548 let size = size.checked_mul(count, self).ok_or_else(|| {
550 "overflow computing total size of `{}`",
551 if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
555 let src = self.read_pointer(&src)?;
556 let dst = self.read_pointer(&dst)?;
558 self.memory.copy(src, align, dst, align, size, nonoverlapping)
561 pub(crate) fn raw_eq_intrinsic(
563 lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
564 rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
565 ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
566 let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
567 assert!(!layout.is_unsized());
569 let lhs = self.read_pointer(lhs)?;
570 let rhs = self.read_pointer(rhs)?;
571 let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?;
572 let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?;
573 Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))