]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_const_eval/src/interpret/intrinsics.rs
Configure saved panic locations based on location-detail flag
[rust.git] / compiler / rustc_const_eval / src / interpret / intrinsics.rs
1 //! Intrinsics and other functions that the miri engine executes without
2 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
3 //! and miri.
4
5 use std::convert::TryFrom;
6
7 use rustc_hir::def_id::DefId;
8 use rustc_middle::mir::{
9     self,
10     interpret::{ConstValue, GlobalId, InterpResult, Scalar},
11     BinOp,
12 };
13 use rustc_middle::ty;
14 use rustc_middle::ty::layout::LayoutOf as _;
15 use rustc_middle::ty::subst::SubstsRef;
16 use rustc_middle::ty::{Ty, TyCtxt};
17 use rustc_span::symbol::{sym, Symbol};
18 use rustc_target::abi::{Abi, Align, Primitive, Size};
19
20 use super::{
21     util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
22     Pointer,
23 };
24
25 mod caller_location;
26 mod type_name;
27
28 fn numeric_intrinsic<Tag>(name: Symbol, bits: u128, kind: Primitive) -> Scalar<Tag> {
29     let size = match kind {
30         Primitive::Int(integer, _) => integer.size(),
31         _ => bug!("invalid `{}` argument: {:?}", name, bits),
32     };
33     let extra = 128 - u128::from(size.bits());
34     let bits_out = match name {
35         sym::ctpop => u128::from(bits.count_ones()),
36         sym::ctlz => u128::from(bits.leading_zeros()) - extra,
37         sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
38         sym::bswap => (bits << extra).swap_bytes(),
39         sym::bitreverse => (bits << extra).reverse_bits(),
40         _ => bug!("not a numeric intrinsic: {}", name),
41     };
42     Scalar::from_uint(bits_out, size)
43 }
44
45 /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
46 /// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
47 crate fn eval_nullary_intrinsic<'tcx>(
48     tcx: TyCtxt<'tcx>,
49     param_env: ty::ParamEnv<'tcx>,
50     def_id: DefId,
51     substs: SubstsRef<'tcx>,
52 ) -> InterpResult<'tcx, ConstValue<'tcx>> {
53     let tp_ty = substs.type_at(0);
54     let name = tcx.item_name(def_id);
55     Ok(match name {
56         sym::type_name => {
57             ensure_monomorphic_enough(tcx, tp_ty)?;
58             let alloc = type_name::alloc_type_name(tcx, tp_ty);
59             ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
60         }
61         sym::needs_drop => {
62             ensure_monomorphic_enough(tcx, tp_ty)?;
63             ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env))
64         }
65         sym::pref_align_of => {
66             // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
67             let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
68             ConstValue::from_machine_usize(layout.align.pref.bytes(), &tcx)
69         }
70         sym::type_id => {
71             ensure_monomorphic_enough(tcx, tp_ty)?;
72             ConstValue::from_u64(tcx.type_id_hash(tp_ty))
73         }
74         sym::variant_count => match tp_ty.kind() {
75             // Correctly handles non-monomorphic calls, so there is no need for ensure_monomorphic_enough.
76             ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx),
77             ty::Projection(_)
78             | ty::Opaque(_, _)
79             | ty::Param(_)
80             | ty::Bound(_, _)
81             | ty::Placeholder(_)
82             | ty::Infer(_) => throw_inval!(TooGeneric),
83             ty::Bool
84             | ty::Char
85             | ty::Int(_)
86             | ty::Uint(_)
87             | ty::Float(_)
88             | ty::Foreign(_)
89             | ty::Str
90             | ty::Array(_, _)
91             | ty::Slice(_)
92             | ty::RawPtr(_)
93             | ty::Ref(_, _, _)
94             | ty::FnDef(_, _)
95             | ty::FnPtr(_)
96             | ty::Dynamic(_, _)
97             | ty::Closure(_, _)
98             | ty::Generator(_, _, _)
99             | ty::GeneratorWitness(_)
100             | ty::Never
101             | ty::Tuple(_)
102             | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
103         },
104         other => bug!("`{}` is not a zero arg intrinsic", other),
105     })
106 }
107
108 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
109     /// Returns `true` if emulation happened.
110     /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
111     /// intrinsic handling.
112     pub fn emulate_intrinsic(
113         &mut self,
114         instance: ty::Instance<'tcx>,
115         args: &[OpTy<'tcx, M::PointerTag>],
116         ret: Option<(&PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
117     ) -> InterpResult<'tcx, bool> {
118         let substs = instance.substs;
119         let intrinsic_name = self.tcx.item_name(instance.def_id());
120
121         // First handle intrinsics without return place.
122         let (dest, ret) = match ret {
123             None => match intrinsic_name {
124                 sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
125                 sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
126                 // Unsupported diverging intrinsic.
127                 _ => return Ok(false),
128             },
129             Some(p) => p,
130         };
131
132         // Keep the patterns in this match ordered the same as the list in
133         // `src/librustc_middle/ty/constness.rs`
134         match intrinsic_name {
135             sym::caller_location => {
136                 let span = self.find_closest_untracked_caller_location();
137                 let location = self.alloc_caller_location_for_span(span);
138                 self.write_immediate(location.to_ref(self), dest)?;
139             }
140
141             sym::min_align_of_val | sym::size_of_val => {
142                 // Avoid `deref_operand` -- this is not a deref, the ptr does not have to be
143                 // dereferencable!
144                 let place = self.ref_to_mplace(&self.read_immediate(&args[0])?)?;
145                 let (size, align) = self
146                     .size_and_align_of_mplace(&place)?
147                     .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
148
149                 let result = match intrinsic_name {
150                     sym::min_align_of_val => align.bytes(),
151                     sym::size_of_val => size.bytes(),
152                     _ => bug!(),
153                 };
154
155                 self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
156             }
157
158             sym::pref_align_of
159             | sym::needs_drop
160             | sym::type_id
161             | sym::type_name
162             | sym::variant_count => {
163                 let gid = GlobalId { instance, promoted: None };
164                 let ty = match intrinsic_name {
165                     sym::pref_align_of | sym::variant_count => self.tcx.types.usize,
166                     sym::needs_drop => self.tcx.types.bool,
167                     sym::type_id => self.tcx.types.u64,
168                     sym::type_name => self.tcx.mk_static_str(),
169                     _ => bug!("already checked for nullary intrinsics"),
170                 };
171                 let val =
172                     self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
173                 let val = self.const_val_to_op(val, ty, Some(dest.layout))?;
174                 self.copy_op(&val, dest)?;
175             }
176
177             sym::ctpop
178             | sym::cttz
179             | sym::cttz_nonzero
180             | sym::ctlz
181             | sym::ctlz_nonzero
182             | sym::bswap
183             | sym::bitreverse => {
184                 let ty = substs.type_at(0);
185                 let layout_of = self.layout_of(ty)?;
186                 let val = self.read_scalar(&args[0])?.check_init()?;
187                 let bits = val.to_bits(layout_of.size)?;
188                 let kind = match layout_of.abi {
189                     Abi::Scalar(scalar) => scalar.value,
190                     _ => span_bug!(
191                         self.cur_span(),
192                         "{} called on invalid type {:?}",
193                         intrinsic_name,
194                         ty
195                     ),
196                 };
197                 let (nonzero, intrinsic_name) = match intrinsic_name {
198                     sym::cttz_nonzero => (true, sym::cttz),
199                     sym::ctlz_nonzero => (true, sym::ctlz),
200                     other => (false, other),
201                 };
202                 if nonzero && bits == 0 {
203                     throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
204                 }
205                 let out_val = numeric_intrinsic(intrinsic_name, bits, kind);
206                 self.write_scalar(out_val, dest)?;
207             }
208             sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
209                 let lhs = self.read_immediate(&args[0])?;
210                 let rhs = self.read_immediate(&args[1])?;
211                 let bin_op = match intrinsic_name {
212                     sym::add_with_overflow => BinOp::Add,
213                     sym::sub_with_overflow => BinOp::Sub,
214                     sym::mul_with_overflow => BinOp::Mul,
215                     _ => bug!("Already checked for int ops"),
216                 };
217                 self.binop_with_overflow(bin_op, &lhs, &rhs, dest)?;
218             }
219             sym::saturating_add | sym::saturating_sub => {
220                 let l = self.read_immediate(&args[0])?;
221                 let r = self.read_immediate(&args[1])?;
222                 let is_add = intrinsic_name == sym::saturating_add;
223                 let (val, overflowed, _ty) = self.overflowing_binary_op(
224                     if is_add { BinOp::Add } else { BinOp::Sub },
225                     &l,
226                     &r,
227                 )?;
228                 let val = if overflowed {
229                     let size = l.layout.size;
230                     let num_bits = size.bits();
231                     if l.layout.abi.is_signed() {
232                         // For signed ints the saturated value depends on the sign of the first
233                         // term since the sign of the second term can be inferred from this and
234                         // the fact that the operation has overflowed (if either is 0 no
235                         // overflow can occur)
236                         let first_term: u128 = l.to_scalar()?.to_bits(l.layout.size)?;
237                         let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
238                         if first_term_positive {
239                             // Negative overflow not possible since the positive first term
240                             // can only increase an (in range) negative term for addition
241                             // or corresponding negated positive term for subtraction
242                             Scalar::from_uint(
243                                 (1u128 << (num_bits - 1)) - 1, // max positive
244                                 Size::from_bits(num_bits),
245                             )
246                         } else {
247                             // Positive overflow not possible for similar reason
248                             // max negative
249                             Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
250                         }
251                     } else {
252                         // unsigned
253                         if is_add {
254                             // max unsigned
255                             Scalar::from_uint(size.unsigned_int_max(), Size::from_bits(num_bits))
256                         } else {
257                             // underflow to 0
258                             Scalar::from_uint(0u128, Size::from_bits(num_bits))
259                         }
260                     }
261                 } else {
262                     val
263                 };
264                 self.write_scalar(val, dest)?;
265             }
266             sym::discriminant_value => {
267                 let place = self.deref_operand(&args[0])?;
268                 let discr_val = self.read_discriminant(&place.into())?.0;
269                 self.write_scalar(discr_val, dest)?;
270             }
271             sym::unchecked_shl
272             | sym::unchecked_shr
273             | sym::unchecked_add
274             | sym::unchecked_sub
275             | sym::unchecked_mul
276             | sym::unchecked_div
277             | sym::unchecked_rem => {
278                 let l = self.read_immediate(&args[0])?;
279                 let r = self.read_immediate(&args[1])?;
280                 let bin_op = match intrinsic_name {
281                     sym::unchecked_shl => BinOp::Shl,
282                     sym::unchecked_shr => BinOp::Shr,
283                     sym::unchecked_add => BinOp::Add,
284                     sym::unchecked_sub => BinOp::Sub,
285                     sym::unchecked_mul => BinOp::Mul,
286                     sym::unchecked_div => BinOp::Div,
287                     sym::unchecked_rem => BinOp::Rem,
288                     _ => bug!("Already checked for int ops"),
289                 };
290                 let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, &l, &r)?;
291                 if overflowed {
292                     let layout = self.layout_of(substs.type_at(0))?;
293                     let r_val = r.to_scalar()?.to_bits(layout.size)?;
294                     if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
295                         throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
296                     } else {
297                         throw_ub_format!("overflow executing `{}`", intrinsic_name);
298                     }
299                 }
300                 self.write_scalar(val, dest)?;
301             }
302             sym::rotate_left | sym::rotate_right => {
303                 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
304                 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
305                 let layout = self.layout_of(substs.type_at(0))?;
306                 let val = self.read_scalar(&args[0])?.check_init()?;
307                 let val_bits = val.to_bits(layout.size)?;
308                 let raw_shift = self.read_scalar(&args[1])?.check_init()?;
309                 let raw_shift_bits = raw_shift.to_bits(layout.size)?;
310                 let width_bits = u128::from(layout.size.bits());
311                 let shift_bits = raw_shift_bits % width_bits;
312                 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
313                 let result_bits = if intrinsic_name == sym::rotate_left {
314                     (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
315                 } else {
316                     (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
317                 };
318                 let truncated_bits = self.truncate(result_bits, layout);
319                 let result = Scalar::from_uint(truncated_bits, layout.size);
320                 self.write_scalar(result, dest)?;
321             }
322             sym::copy => {
323                 self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
324             }
325             sym::offset => {
326                 let ptr = self.read_pointer(&args[0])?;
327                 let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
328                 let pointee_ty = substs.type_at(0);
329
330                 let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
331                 self.write_pointer(offset_ptr, dest)?;
332             }
333             sym::arith_offset => {
334                 let ptr = self.read_pointer(&args[0])?;
335                 let offset_count = self.read_scalar(&args[1])?.to_machine_isize(self)?;
336                 let pointee_ty = substs.type_at(0);
337
338                 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
339                 let offset_bytes = offset_count.wrapping_mul(pointee_size);
340                 let offset_ptr = ptr.wrapping_signed_offset(offset_bytes, self);
341                 self.write_pointer(offset_ptr, dest)?;
342             }
343             sym::ptr_offset_from => {
344                 let a = self.read_immediate(&args[0])?.to_scalar()?;
345                 let b = self.read_immediate(&args[1])?.to_scalar()?;
346
347                 // Special case: if both scalars are *equal integers*
348                 // and not null, we pretend there is an allocation of size 0 right there,
349                 // and their offset is 0. (There's never a valid object at null, making it an
350                 // exception from the exception.)
351                 // This is the dual to the special exception for offset-by-0
352                 // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
353                 //
354                 // Control flow is weird because we cannot early-return (to reach the
355                 // `go_to_block` at the end).
356                 let done = if let (Ok(a), Ok(b)) = (a.try_to_int(), b.try_to_int()) {
357                     let a = a.try_to_machine_usize(*self.tcx).unwrap();
358                     let b = b.try_to_machine_usize(*self.tcx).unwrap();
359                     if a == b && a != 0 {
360                         self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
361                         true
362                     } else {
363                         false
364                     }
365                 } else {
366                     false
367                 };
368
369                 if !done {
370                     // General case: we need two pointers.
371                     let a = self.scalar_to_ptr(a);
372                     let b = self.scalar_to_ptr(b);
373                     let (a_alloc_id, a_offset, _) = self.memory.ptr_get_alloc(a)?;
374                     let (b_alloc_id, b_offset, _) = self.memory.ptr_get_alloc(b)?;
375                     if a_alloc_id != b_alloc_id {
376                         throw_ub_format!(
377                             "ptr_offset_from cannot compute offset of pointers into different \
378                             allocations.",
379                         );
380                     }
381                     let usize_layout = self.layout_of(self.tcx.types.usize)?;
382                     let isize_layout = self.layout_of(self.tcx.types.isize)?;
383                     let a_offset = ImmTy::from_uint(a_offset.bytes(), usize_layout);
384                     let b_offset = ImmTy::from_uint(b_offset.bytes(), usize_layout);
385                     let (val, _overflowed, _ty) =
386                         self.overflowing_binary_op(BinOp::Sub, &a_offset, &b_offset)?;
387                     let pointee_layout = self.layout_of(substs.type_at(0))?;
388                     let val = ImmTy::from_scalar(val, isize_layout);
389                     let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
390                     self.exact_div(&val, &size, dest)?;
391                 }
392             }
393
394             sym::transmute => {
395                 self.copy_op_transmute(&args[0], dest)?;
396             }
397             sym::assert_inhabited => {
398                 let ty = instance.substs.type_at(0);
399                 let layout = self.layout_of(ty)?;
400
401                 if layout.abi.is_uninhabited() {
402                     // The run-time intrinsic panics just to get a good backtrace; here we abort
403                     // since there is no problem showing a backtrace even for aborts.
404                     M::abort(
405                         self,
406                         format!(
407                             "aborted execution: attempted to instantiate uninhabited type `{}`",
408                             ty
409                         ),
410                     )?;
411                 }
412             }
413             sym::simd_insert => {
414                 let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
415                 let elem = &args[2];
416                 let input = &args[0];
417                 let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
418                 assert!(
419                     index < len,
420                     "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
421                     index,
422                     e_ty,
423                     len
424                 );
425                 assert_eq!(
426                     input.layout, dest.layout,
427                     "Return type `{}` must match vector type `{}`",
428                     dest.layout.ty, input.layout.ty
429                 );
430                 assert_eq!(
431                     elem.layout.ty, e_ty,
432                     "Scalar element type `{}` must match vector element type `{}`",
433                     elem.layout.ty, e_ty
434                 );
435
436                 for i in 0..len {
437                     let place = self.place_index(dest, i)?;
438                     let value = if i == index { *elem } else { self.operand_index(input, i)? };
439                     self.copy_op(&value, &place)?;
440                 }
441             }
442             sym::simd_extract => {
443                 let index = u64::from(self.read_scalar(&args[1])?.to_u32()?);
444                 let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
445                 assert!(
446                     index < len,
447                     "index `{}` is out-of-bounds of vector type `{}` with length `{}`",
448                     index,
449                     e_ty,
450                     len
451                 );
452                 assert_eq!(
453                     e_ty, dest.layout.ty,
454                     "Return type `{}` must match vector element type `{}`",
455                     dest.layout.ty, e_ty
456                 );
457                 self.copy_op(&self.operand_index(&args[0], index)?, dest)?;
458             }
459             sym::likely | sym::unlikely | sym::black_box => {
460                 // These just return their argument
461                 self.copy_op(&args[0], dest)?;
462             }
463             sym::assume => {
464                 let cond = self.read_scalar(&args[0])?.check_init()?.to_bool()?;
465                 if !cond {
466                     throw_ub_format!("`assume` intrinsic called with `false`");
467                 }
468             }
469             sym::raw_eq => {
470                 let result = self.raw_eq_intrinsic(&args[0], &args[1])?;
471                 self.write_scalar(result, dest)?;
472             }
473             _ => return Ok(false),
474         }
475
476         trace!("{:?}", self.dump_place(**dest));
477         self.go_to_block(ret);
478         Ok(true)
479     }
480
481     pub fn exact_div(
482         &mut self,
483         a: &ImmTy<'tcx, M::PointerTag>,
484         b: &ImmTy<'tcx, M::PointerTag>,
485         dest: &PlaceTy<'tcx, M::PointerTag>,
486     ) -> InterpResult<'tcx> {
487         // Performs an exact division, resulting in undefined behavior where
488         // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
489         // First, check x % y != 0 (or if that computation overflows).
490         let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, &a, &b)?;
491         if overflow || res.assert_bits(a.layout.size) != 0 {
492             // Then, check if `b` is -1, which is the "MIN / -1" case.
493             let minus1 = Scalar::from_int(-1, dest.layout.size);
494             let b_scalar = b.to_scalar().unwrap();
495             if b_scalar == minus1 {
496                 throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
497             } else {
498                 throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
499             }
500         }
501         // `Rem` says this is all right, so we can let `Div` do its job.
502         self.binop_ignore_overflow(BinOp::Div, &a, &b, dest)
503     }
504
505     /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
506     /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
507     /// 0, so offset-by-0 (and only 0) is okay -- except that null cannot be offset by _any_ value.
508     pub fn ptr_offset_inbounds(
509         &self,
510         ptr: Pointer<Option<M::PointerTag>>,
511         pointee_ty: Ty<'tcx>,
512         offset_count: i64,
513     ) -> InterpResult<'tcx, Pointer<Option<M::PointerTag>>> {
514         // We cannot overflow i64 as a type's size must be <= isize::MAX.
515         let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
516         // The computed offset, in bytes, cannot overflow an isize.
517         let offset_bytes =
518             offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
519         // The offset being in bounds cannot rely on "wrapping around" the address space.
520         // So, first rule out overflows in the pointer arithmetic.
521         let offset_ptr = ptr.signed_offset(offset_bytes, self)?;
522         // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
523         // memory between these pointers must be accessible. Note that we do not require the
524         // pointers to be properly aligned (unlike a read/write operation).
525         let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
526         let size = offset_bytes.unsigned_abs();
527         // This call handles checking for integer/null pointers.
528         self.memory.check_ptr_access_align(
529             min_ptr,
530             Size::from_bytes(size),
531             Align::ONE,
532             CheckInAllocMsg::PointerArithmeticTest,
533         )?;
534         Ok(offset_ptr)
535     }
536
537     /// Copy `count*size_of::<T>()` many bytes from `*src` to `*dst`.
538     pub(crate) fn copy_intrinsic(
539         &mut self,
540         src: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
541         dst: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
542         count: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
543         nonoverlapping: bool,
544     ) -> InterpResult<'tcx> {
545         let count = self.read_scalar(&count)?.to_machine_usize(self)?;
546         let layout = self.layout_of(src.layout.ty.builtin_deref(true).unwrap().ty)?;
547         let (size, align) = (layout.size, layout.align.abi);
548         let size = size.checked_mul(count, self).ok_or_else(|| {
549             err_ub_format!(
550                 "overflow computing total size of `{}`",
551                 if nonoverlapping { "copy_nonoverlapping" } else { "copy" }
552             )
553         })?;
554
555         let src = self.read_pointer(&src)?;
556         let dst = self.read_pointer(&dst)?;
557
558         self.memory.copy(src, align, dst, align, size, nonoverlapping)
559     }
560
561     pub(crate) fn raw_eq_intrinsic(
562         &mut self,
563         lhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
564         rhs: &OpTy<'tcx, <M as Machine<'mir, 'tcx>>::PointerTag>,
565     ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
566         let layout = self.layout_of(lhs.layout.ty.builtin_deref(true).unwrap().ty)?;
567         assert!(!layout.is_unsized());
568
569         let lhs = self.read_pointer(lhs)?;
570         let rhs = self.read_pointer(rhs)?;
571         let lhs_bytes = self.memory.read_bytes(lhs, layout.size)?;
572         let rhs_bytes = self.memory.read_bytes(rhs, layout.size)?;
573         Ok(Scalar::from_bool(lhs_bytes == rhs_bytes))
574     }
575 }