2 convert::{TryFrom, TryInto},
8 use rustc_apfloat::Float;
9 use rustc_hir::def_id::DefId;
10 use rustc_middle::mir;
12 use rustc_span::symbol::sym;
15 spec::{abi::Abi, PanicStrategy},
18 use super::backtrace::EvalContextExt as _;
20 use helpers::{check_abi, check_arg_count};
22 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
23 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
24 /// Returns the minimum alignment for the target architecture for allocations of the given size.
25 fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
26 let this = self.eval_context_ref();
27 // List taken from `libstd/sys_common/alloc.rs`.
28 let min_align = match this.tcx.sess.target.arch.as_str() {
29 "x86" | "arm" | "mips" | "powerpc" | "powerpc64" | "asmjs" | "wasm32" => 8,
30 "x86_64" | "aarch64" | "mips64" | "s390x" | "sparc64" => 16,
31 arch => bug!("Unsupported target architecture: {}", arch),
33 // Windows always aligns, even small allocations.
34 // Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
35 // But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
36 if kind == MiriMemoryKind::WinHeap || size >= min_align {
37 return Align::from_bytes(min_align).unwrap();
39 // We have `size < min_align`. Round `size` *down* to the next power of two and use that.
40 fn prev_power_of_two(x: u64) -> u64 {
41 let next_pow2 = x.next_power_of_two();
43 // x *is* a power of two, just use that.
46 // x is between two powers, so next = 2*prev.
50 Align::from_bytes(prev_power_of_two(size)).unwrap()
53 fn malloc(&mut self, size: u64, zero_init: bool, kind: MiriMemoryKind) -> Scalar<Tag> {
54 let this = self.eval_context_mut();
56 Scalar::null_ptr(this)
58 let align = this.min_align(size, kind);
59 let ptr = this.memory.allocate(Size::from_bytes(size), align, kind.into());
61 // We just allocated this, the access is definitely in-bounds.
62 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
68 fn free(&mut self, ptr: Scalar<Tag>, kind: MiriMemoryKind) -> InterpResult<'tcx> {
69 let this = self.eval_context_mut();
70 if !this.is_null(ptr)? {
71 let ptr = this.force_ptr(ptr)?;
72 this.memory.deallocate(ptr, None, kind.into())?;
82 ) -> InterpResult<'tcx, Scalar<Tag>> {
83 let this = self.eval_context_mut();
84 let new_align = this.min_align(new_size, kind);
85 if this.is_null(old_ptr)? {
87 Ok(Scalar::null_ptr(this))
90 this.memory.allocate(Size::from_bytes(new_size), new_align, kind.into());
91 Ok(Scalar::Ptr(new_ptr))
94 let old_ptr = this.force_ptr(old_ptr)?;
96 this.memory.deallocate(old_ptr, None, kind.into())?;
97 Ok(Scalar::null_ptr(this))
99 let new_ptr = this.memory.reallocate(
102 Size::from_bytes(new_size),
106 Ok(Scalar::Ptr(new_ptr))
111 /// Emulates calling a foreign item, failing if the item is not supported.
112 /// This function will handle `goto_block` if needed.
113 /// Returns Ok(None) if the foreign item was completely handled
114 /// by this function.
115 /// Returns Ok(Some(body)) if processing the foreign item
116 /// is delegated to another function.
117 fn emulate_foreign_item(
121 args: &[OpTy<'tcx, Tag>],
122 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
123 unwind: StackPopUnwind,
124 ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
125 let this = self.eval_context_mut();
126 let attrs = this.tcx.get_attrs(def_id);
127 let link_name = match this.tcx.sess.first_attr_value_str_by_name(&attrs, sym::link_name) {
128 Some(name) => name.as_str(),
129 None => this.tcx.item_name(def_id).as_str(),
131 // Strip linker suffixes (seen on 32-bit macOS).
132 let link_name = link_name.trim_end_matches("$UNIX2003");
133 let tcx = this.tcx.tcx;
135 // First: functions that diverge.
136 let (dest, ret) = match ret {
137 None => match link_name {
138 "miri_start_panic" => {
139 check_abi(this, abi, Abi::Rust)?;
140 this.handle_miri_start_panic(args, unwind)?;
143 // This matches calls to the foreign item `panic_impl`.
144 // The implementation is provided by the function with the `#[panic_handler]` attribute.
146 check_abi(this, abi, Abi::Rust)?;
147 let panic_impl_id = tcx.lang_items().panic_impl().unwrap();
148 let panic_impl_instance = ty::Instance::mono(tcx, panic_impl_id);
149 return Ok(Some(&*this.load_mir(panic_impl_instance.def, None)?));
155 check_abi(this, abi, if link_name == "exit" { Abi::C { unwind: false } } else { Abi::System { unwind: false } })?;
156 let &[ref code] = check_arg_count(args)?;
157 // it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway
158 let code = this.read_scalar(code)?.to_i32()?;
159 throw_machine_stop!(TerminationInfo::Exit(code.into()));
162 check_abi(this, abi, Abi::C { unwind: false })?;
163 throw_machine_stop!(TerminationInfo::Abort(
164 "the program aborted execution".to_owned()
167 _ => throw_unsup_format!("can't call (diverging) foreign function: {}", link_name),
172 // Second: some functions that we forward to MIR implementations.
174 // This matches calls to the foreign item `__rust_start_panic`, that is,
175 // calls to `extern "Rust" { fn __rust_start_panic(...) }`
176 // (and `__rust_panic_cleanup`, respectively).
177 // We forward this to the underlying *implementation* in the panic runtime crate.
178 // Normally, this will be either `libpanic_unwind` or `libpanic_abort`, but it could
179 // also be a custom user-provided implementation via `#![feature(panic_runtime)]`
181 "__rust_start_panic" |
182 "__rust_panic_cleanup" => {
183 check_abi(this, abi, Abi::C { unwind: false })?;
184 // This replicates some of the logic in `inject_panic_runtime`.
185 // FIXME: is there a way to reuse that logic?
186 let panic_runtime = match this.tcx.sess.panic_strategy() {
187 PanicStrategy::Unwind => sym::panic_unwind,
188 PanicStrategy::Abort => sym::panic_abort,
190 let start_panic_instance =
191 this.resolve_path(&[&*panic_runtime.as_str(), link_name]);
192 return Ok(Some(&*this.load_mir(start_panic_instance.def, None)?));
197 // Third: functions that return.
198 if this.emulate_foreign_item_by_name(link_name, abi, args, dest, ret)? {
199 trace!("{:?}", this.dump_place(**dest));
200 this.go_to_block(ret);
206 /// Emulates calling a foreign item using its name, failing if the item is not supported.
207 /// Returns `true` if the caller is expected to jump to the return block, and `false` if
208 /// jumping has already been taken care of.
209 fn emulate_foreign_item_by_name(
213 args: &[OpTy<'tcx, Tag>],
214 dest: &PlaceTy<'tcx, Tag>,
215 ret: mir::BasicBlock,
216 ) -> InterpResult<'tcx, bool> {
217 let this = self.eval_context_mut();
219 // Here we dispatch all the shims for foreign functions. If you have a platform specific
220 // shim, add it to the corresponding submodule.
222 // Miri-specific extern functions
223 "miri_static_root" => {
224 check_abi(this, abi, Abi::Rust)?;
225 let &[ref ptr] = check_arg_count(args)?;
226 let ptr = this.read_scalar(ptr)?.check_init()?;
227 let ptr = this.force_ptr(ptr)?;
228 if ptr.offset != Size::ZERO {
229 throw_unsup_format!("pointer passed to miri_static_root must point to beginning of an allocated block");
231 this.machine.static_roots.push(ptr.alloc_id);
234 // Obtains a Miri backtrace. See the README for details.
235 "miri_get_backtrace" => {
236 check_abi(this, abi, Abi::Rust)?;
237 this.handle_miri_get_backtrace(args, dest)?;
240 // Resolves a Miri backtrace frame. See the README for details.
241 "miri_resolve_frame" => {
242 check_abi(this, abi, Abi::Rust)?;
243 this.handle_miri_resolve_frame(args, dest)?;
247 // Standard C allocation
249 check_abi(this, abi, Abi::C { unwind: false })?;
250 let &[ref size] = check_arg_count(args)?;
251 let size = this.read_scalar(size)?.to_machine_usize(this)?;
252 let res = this.malloc(size, /*zero_init:*/ false, MiriMemoryKind::C);
253 this.write_scalar(res, dest)?;
256 check_abi(this, abi, Abi::C { unwind: false })?;
257 let &[ref items, ref len] = check_arg_count(args)?;
258 let items = this.read_scalar(items)?.to_machine_usize(this)?;
259 let len = this.read_scalar(len)?.to_machine_usize(this)?;
261 items.checked_mul(len).ok_or_else(|| err_ub_format!("overflow during calloc size computation"))?;
262 let res = this.malloc(size, /*zero_init:*/ true, MiriMemoryKind::C);
263 this.write_scalar(res, dest)?;
266 check_abi(this, abi, Abi::C { unwind: false })?;
267 let &[ref ptr] = check_arg_count(args)?;
268 let ptr = this.read_scalar(ptr)?.check_init()?;
269 this.free(ptr, MiriMemoryKind::C)?;
272 check_abi(this, abi, Abi::C { unwind: false })?;
273 let &[ref old_ptr, ref new_size] = check_arg_count(args)?;
274 let old_ptr = this.read_scalar(old_ptr)?.check_init()?;
275 let new_size = this.read_scalar(new_size)?.to_machine_usize(this)?;
276 let res = this.realloc(old_ptr, new_size, MiriMemoryKind::C)?;
277 this.write_scalar(res, dest)?;
281 // (Usually these would be forwarded to to `#[global_allocator]`; we instead implement a generic
282 // allocation that also checks that all conditions are met, such as not permitting zero-sized allocations.)
284 check_abi(this, abi, Abi::Rust)?;
285 let &[ref size, ref align] = check_arg_count(args)?;
286 let size = this.read_scalar(size)?.to_machine_usize(this)?;
287 let align = this.read_scalar(align)?.to_machine_usize(this)?;
288 Self::check_alloc_request(size, align)?;
289 let ptr = this.memory.allocate(
290 Size::from_bytes(size),
291 Align::from_bytes(align).unwrap(),
292 MiriMemoryKind::Rust.into(),
294 this.write_scalar(ptr, dest)?;
296 "__rust_alloc_zeroed" => {
297 check_abi(this, abi, Abi::Rust)?;
298 let &[ref size, ref align] = check_arg_count(args)?;
299 let size = this.read_scalar(size)?.to_machine_usize(this)?;
300 let align = this.read_scalar(align)?.to_machine_usize(this)?;
301 Self::check_alloc_request(size, align)?;
302 let ptr = this.memory.allocate(
303 Size::from_bytes(size),
304 Align::from_bytes(align).unwrap(),
305 MiriMemoryKind::Rust.into(),
307 // We just allocated this, the access is definitely in-bounds.
308 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(usize::try_from(size).unwrap())).unwrap();
309 this.write_scalar(ptr, dest)?;
311 "__rust_dealloc" => {
312 check_abi(this, abi, Abi::Rust)?;
313 let &[ref ptr, ref old_size, ref align] = check_arg_count(args)?;
314 let ptr = this.read_scalar(ptr)?.check_init()?;
315 let old_size = this.read_scalar(old_size)?.to_machine_usize(this)?;
316 let align = this.read_scalar(align)?.to_machine_usize(this)?;
317 // No need to check old_size/align; we anyway check that they match the allocation.
318 let ptr = this.force_ptr(ptr)?;
319 this.memory.deallocate(
321 Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
322 MiriMemoryKind::Rust.into(),
325 "__rust_realloc" => {
326 check_abi(this, abi, Abi::Rust)?;
327 let &[ref ptr, ref old_size, ref align, ref new_size] = check_arg_count(args)?;
328 let ptr = this.force_ptr(this.read_scalar(ptr)?.check_init()?)?;
329 let old_size = this.read_scalar(old_size)?.to_machine_usize(this)?;
330 let align = this.read_scalar(align)?.to_machine_usize(this)?;
331 let new_size = this.read_scalar(new_size)?.to_machine_usize(this)?;
332 Self::check_alloc_request(new_size, align)?;
333 // No need to check old_size; we anyway check that they match the allocation.
334 let align = Align::from_bytes(align).unwrap();
335 let new_ptr = this.memory.reallocate(
337 Some((Size::from_bytes(old_size), align)),
338 Size::from_bytes(new_size),
340 MiriMemoryKind::Rust.into(),
342 this.write_scalar(new_ptr, dest)?;
345 // C memory handling functions
347 check_abi(this, abi, Abi::C { unwind: false })?;
348 let &[ref left, ref right, ref n] = check_arg_count(args)?;
349 let left = this.read_scalar(left)?.check_init()?;
350 let right = this.read_scalar(right)?.check_init()?;
351 let n = Size::from_bytes(this.read_scalar(n)?.to_machine_usize(this)?);
354 let left_bytes = this.memory.read_bytes(left, n)?;
355 let right_bytes = this.memory.read_bytes(right, n)?;
357 use std::cmp::Ordering::*;
358 match left_bytes.cmp(right_bytes) {
365 this.write_scalar(Scalar::from_i32(result), dest)?;
368 check_abi(this, abi, Abi::C { unwind: false })?;
369 let &[ref ptr, ref val, ref num] = check_arg_count(args)?;
370 let ptr = this.read_scalar(ptr)?.check_init()?;
371 let val = this.read_scalar(val)?.to_i32()? as u8;
372 let num = this.read_scalar(num)?.to_machine_usize(this)?;
373 if let Some(idx) = this
375 .read_bytes(ptr, Size::from_bytes(num))?
378 .position(|&c| c == val)
380 let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), this)?;
381 this.write_scalar(new_ptr, dest)?;
383 this.write_null(dest)?;
387 check_abi(this, abi, Abi::C { unwind: false })?;
388 let &[ref ptr, ref val, ref num] = check_arg_count(args)?;
389 let ptr = this.read_scalar(ptr)?.check_init()?;
390 let val = this.read_scalar(val)?.to_i32()? as u8;
391 let num = this.read_scalar(num)?.to_machine_usize(this)?;
394 .read_bytes(ptr, Size::from_bytes(num))?
396 .position(|&c| c == val);
397 if let Some(idx) = idx {
398 let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
399 this.write_scalar(new_ptr, dest)?;
401 this.write_null(dest)?;
405 check_abi(this, abi, Abi::C { unwind: false })?;
406 let &[ref ptr] = check_arg_count(args)?;
407 let ptr = this.read_scalar(ptr)?.check_init()?;
408 let n = this.read_c_str(ptr)?.len();
409 this.write_scalar(Scalar::from_machine_usize(u64::try_from(n).unwrap(), this), dest)?;
422 check_abi(this, abi, Abi::C { unwind: false })?;
423 let &[ref f] = check_arg_count(args)?;
424 // FIXME: Using host floats.
425 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
426 let f = match link_name {
436 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
443 check_abi(this, abi, Abi::C { unwind: false })?;
444 let &[ref f1, ref f2] = check_arg_count(args)?;
445 // underscore case for windows, here and below
446 // (see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/floating-point-primitives?view=vs-2019)
447 // FIXME: Using host floats.
448 let f1 = f32::from_bits(this.read_scalar(f1)?.to_u32()?);
449 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
450 let n = match link_name {
451 "_hypotf" | "hypotf" => f1.hypot(f2),
452 "atan2f" => f1.atan2(f2),
455 this.write_scalar(Scalar::from_u32(n.to_bits()), dest)?;
466 check_abi(this, abi, Abi::C { unwind: false })?;
467 let &[ref f] = check_arg_count(args)?;
468 // FIXME: Using host floats.
469 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
470 let f = match link_name {
480 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
487 check_abi(this, abi, Abi::C { unwind: false })?;
488 let &[ref f1, ref f2] = check_arg_count(args)?;
489 // FIXME: Using host floats.
490 let f1 = f64::from_bits(this.read_scalar(f1)?.to_u64()?);
491 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
492 let n = match link_name {
493 "_hypot" | "hypot" => f1.hypot(f2),
494 "atan2" => f1.atan2(f2),
497 this.write_scalar(Scalar::from_u64(n.to_bits()), dest)?;
504 check_abi(this, abi, Abi::C { unwind: false })?;
505 let &[ref x, ref exp] = check_arg_count(args)?;
506 // For radix-2 (binary) systems, `ldexp` and `scalbn` are the same.
507 let x = this.read_scalar(x)?.to_f64()?;
508 let exp = this.read_scalar(exp)?.to_i32()?;
510 // Saturating cast to i16. Even those are outside the valid exponent range to
511 // `scalbn` below will do its over/underflow handling.
512 let exp = if exp > i32::from(i16::MAX) {
514 } else if exp < i32::from(i16::MIN) {
517 exp.try_into().unwrap()
520 let res = x.scalbn(exp);
521 this.write_scalar(Scalar::from_f64(res), dest)?;
524 // Architecture-specific shims
525 "llvm.x86.sse2.pause" if this.tcx.sess.target.arch == "x86" || this.tcx.sess.target.arch == "x86_64" => {
526 check_abi(this, abi, Abi::C { unwind: false })?;
527 let &[] = check_arg_count(args)?;
528 this.yield_active_thread();
530 "llvm.aarch64.isb" if this.tcx.sess.target.arch == "aarch64" => {
531 check_abi(this, abi, Abi::C { unwind: false })?;
532 let &[ref arg] = check_arg_count(args)?;
533 let arg = this.read_scalar(arg)?.to_i32()?;
535 15 => { // SY ("full system scope")
536 this.yield_active_thread();
539 throw_unsup_format!("unsupported llvm.aarch64.isb argument {}", arg);
544 // Platform-specific shims
545 _ => match this.tcx.sess.target.os.as_str() {
546 "linux" | "macos" => return shims::posix::foreign_items::EvalContextExt::emulate_foreign_item_by_name(this, link_name, abi, args, dest, ret),
547 "windows" => return shims::windows::foreign_items::EvalContextExt::emulate_foreign_item_by_name(this, link_name, abi, args, dest, ret),
548 target => throw_unsup_format!("the target `{}` is not supported", target),
555 /// Check some basic requirements for this allocation request:
556 /// non-zero size, power-of-two alignment.
557 fn check_alloc_request(size: u64, align: u64) -> InterpResult<'tcx> {
559 throw_ub_format!("creating allocation with size 0");
561 if !align.is_power_of_two() {
562 throw_ub_format!("creating allocation with non-power-of-two alignment {}", align);