1 use std::{convert::{TryInto, TryFrom}, iter};
3 use rustc_hir::def_id::DefId;
4 use rustc_middle::{mir, ty};
5 use rustc_target::abi::{Align, Size};
6 use rustc_apfloat::Float;
7 use rustc_span::symbol::sym;
11 use helpers::check_arg_count;
13 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
14 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
15 /// Returns the minimum alignment for the target architecture for allocations of the given size.
16 fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
17 let this = self.eval_context_ref();
18 // List taken from `libstd/sys_common/alloc.rs`.
19 let min_align = match this.tcx.sess.target.target.arch.as_str() {
20 "x86" | "arm" | "mips" | "powerpc" | "powerpc64" | "asmjs" | "wasm32" => 8,
21 "x86_64" | "aarch64" | "mips64" | "s390x" | "sparc64" => 16,
22 arch => bug!("Unsupported target architecture: {}", arch),
24 // Windows always aligns, even small allocations.
25 // Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
26 // But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
27 if kind == MiriMemoryKind::WinHeap || size >= min_align {
28 return Align::from_bytes(min_align).unwrap();
30 // We have `size < min_align`. Round `size` *down* to the next power of two and use that.
31 fn prev_power_of_two(x: u64) -> u64 {
32 let next_pow2 = x.next_power_of_two();
34 // x *is* a power of two, just use that.
37 // x is between two powers, so next = 2*prev.
41 Align::from_bytes(prev_power_of_two(size)).unwrap()
44 fn malloc(&mut self, size: u64, zero_init: bool, kind: MiriMemoryKind) -> Scalar<Tag> {
45 let this = self.eval_context_mut();
47 Scalar::null_ptr(this)
49 let align = this.min_align(size, kind);
50 let ptr = this.memory.allocate(Size::from_bytes(size), align, kind.into());
52 // We just allocated this, the access is definitely in-bounds.
53 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
59 fn free(&mut self, ptr: Scalar<Tag>, kind: MiriMemoryKind) -> InterpResult<'tcx> {
60 let this = self.eval_context_mut();
61 if !this.is_null(ptr)? {
62 let ptr = this.force_ptr(ptr)?;
63 this.memory.deallocate(ptr, None, kind.into())?;
73 ) -> InterpResult<'tcx, Scalar<Tag>> {
74 let this = self.eval_context_mut();
75 let new_align = this.min_align(new_size, kind);
76 if this.is_null(old_ptr)? {
78 Ok(Scalar::null_ptr(this))
81 this.memory.allocate(Size::from_bytes(new_size), new_align, kind.into());
82 Ok(Scalar::Ptr(new_ptr))
85 let old_ptr = this.force_ptr(old_ptr)?;
87 this.memory.deallocate(old_ptr, None, kind.into())?;
88 Ok(Scalar::null_ptr(this))
90 let new_ptr = this.memory.reallocate(
93 Size::from_bytes(new_size),
97 Ok(Scalar::Ptr(new_ptr))
102 /// Emulates calling a foreign item, failing if the item is not supported.
103 /// This function will handle `goto_block` if needed.
104 /// Returns Ok(None) if the foreign item was completely handled
105 /// by this function.
106 /// Returns Ok(Some(body)) if processing the foreign item
107 /// is delegated to another function.
109 fn emulate_foreign_item(
112 args: &[OpTy<'tcx, Tag>],
113 ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
114 unwind: Option<mir::BasicBlock>,
115 ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
116 let this = self.eval_context_mut();
117 let attrs = this.tcx.get_attrs(def_id);
118 let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) {
119 Some(name) => name.as_str(),
120 None => this.tcx.item_name(def_id).as_str(),
122 // Strip linker suffixes (seen on 32-bit macOS).
123 let link_name = link_name.trim_end_matches("$UNIX2003");
124 let tcx = this.tcx.tcx;
126 // First: functions that diverge.
127 let (dest, ret) = match ret {
128 None => match link_name {
129 "miri_start_panic" => {
130 this.handle_miri_start_panic(args, unwind)?;
133 // This matches calls to the foreign item `panic_impl`.
134 // The implementation is provided by the function with the `#[panic_handler]` attribute.
136 let panic_impl_id = tcx.lang_items().panic_impl().unwrap();
137 let panic_impl_instance = ty::Instance::mono(tcx, panic_impl_id);
138 return Ok(Some(&*this.load_mir(panic_impl_instance.def, None)?));
143 let &[code] = check_arg_count(args)?;
144 // it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway
145 let code = this.read_scalar(code)?.to_i32()?;
146 throw_machine_stop!(TerminationInfo::Exit(code.into()));
148 _ => throw_unsup_format!("can't call (diverging) foreign function: {}", link_name),
153 // Second: some functions that we forward to MIR implementations.
155 // This matches calls to the foreign item `__rust_start_panic`, that is,
156 // calls to `extern "Rust" { fn __rust_start_panic(...) }`
157 // (and `__rust_panic_cleanup`, respectively).
158 // We forward this to the underlying *implementation* in the panic runtime crate.
159 // Normally, this will be either `libpanic_unwind` or `libpanic_abort`, but it could
160 // also be a custom user-provided implementation via `#![feature(panic_runtime)]`
161 "__rust_start_panic" | "__rust_panic_cleanup"=> {
162 // FIXME we might want to cache this... but it's not really performance-critical.
163 let panic_runtime = tcx
166 .find(|cnum| tcx.is_panic_runtime(**cnum))
167 .expect("No panic runtime found!");
168 let panic_runtime = tcx.crate_name(*panic_runtime);
169 let start_panic_instance =
170 this.resolve_path(&[&*panic_runtime.as_str(), link_name]);
171 return Ok(Some(&*this.load_mir(start_panic_instance.def, None)?));
176 // Third: functions that return.
177 if this.emulate_foreign_item_by_name(link_name, args, dest, ret)? {
178 this.dump_place(*dest);
179 this.go_to_block(ret);
185 /// Emulates calling a foreign item using its name, failing if the item is not supported.
186 /// Returns `true` if the caller is expected to jump to the return block, and `false` if
187 /// jumping has already been taken care of.
188 fn emulate_foreign_item_by_name(
191 args: &[OpTy<'tcx, Tag>],
192 dest: PlaceTy<'tcx, Tag>,
193 ret: mir::BasicBlock,
194 ) -> InterpResult<'tcx, bool> {
195 let this = self.eval_context_mut();
197 // Here we dispatch all the shims for foreign functions. If you have a platform specific
198 // shim, add it to the corresponding submodule.
200 // Miri-specific extern functions
201 "miri_static_root" => {
202 let &[ptr] = check_arg_count(args)?;
203 let ptr = this.read_scalar(ptr)?.not_undef()?;
204 let ptr = this.force_ptr(ptr)?;
205 if ptr.offset != Size::ZERO {
206 throw_unsup_format!("pointer passed to miri_static_root must point to beginning of an allocated block");
208 this.machine.static_roots.push(ptr.alloc_id);
211 // Standard C allocation
213 let &[size] = check_arg_count(args)?;
214 let size = this.read_scalar(size)?.to_machine_usize(this)?;
215 let res = this.malloc(size, /*zero_init:*/ false, MiriMemoryKind::C);
216 this.write_scalar(res, dest)?;
219 let &[items, len] = check_arg_count(args)?;
220 let items = this.read_scalar(items)?.to_machine_usize(this)?;
221 let len = this.read_scalar(len)?.to_machine_usize(this)?;
223 items.checked_mul(len).ok_or_else(|| err_ub_format!("overflow during calloc size computation"))?;
224 let res = this.malloc(size, /*zero_init:*/ true, MiriMemoryKind::C);
225 this.write_scalar(res, dest)?;
228 let &[ptr] = check_arg_count(args)?;
229 let ptr = this.read_scalar(ptr)?.check_init()?;
230 this.free(ptr, MiriMemoryKind::C)?;
233 let &[old_ptr, new_size] = check_arg_count(args)?;
234 let old_ptr = this.read_scalar(old_ptr)?.check_init()?;
235 let new_size = this.read_scalar(new_size)?.to_machine_usize(this)?;
236 let res = this.realloc(old_ptr, new_size, MiriMemoryKind::C)?;
237 this.write_scalar(res, dest)?;
241 // (Usually these would be forwarded to to `#[global_allocator]`; we instead implement a generic
242 // allocation that also checks that all conditions are met, such as not permitting zero-sized allocations.)
244 let &[size, align] = check_arg_count(args)?;
245 let size = this.read_scalar(size)?.to_machine_usize(this)?;
246 let align = this.read_scalar(align)?.to_machine_usize(this)?;
247 Self::check_alloc_request(size, align)?;
248 let ptr = this.memory.allocate(
249 Size::from_bytes(size),
250 Align::from_bytes(align).unwrap(),
251 MiriMemoryKind::Rust.into(),
253 this.write_scalar(ptr, dest)?;
255 "__rust_alloc_zeroed" => {
256 let &[size, align] = check_arg_count(args)?;
257 let size = this.read_scalar(size)?.to_machine_usize(this)?;
258 let align = this.read_scalar(align)?.to_machine_usize(this)?;
259 Self::check_alloc_request(size, align)?;
260 let ptr = this.memory.allocate(
261 Size::from_bytes(size),
262 Align::from_bytes(align).unwrap(),
263 MiriMemoryKind::Rust.into(),
265 // We just allocated this, the access is definitely in-bounds.
266 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(usize::try_from(size).unwrap())).unwrap();
267 this.write_scalar(ptr, dest)?;
269 "__rust_dealloc" => {
270 let &[ptr, old_size, align] = check_arg_count(args)?;
271 let ptr = this.read_scalar(ptr)?.check_init()?;
272 let old_size = this.read_scalar(old_size)?.to_machine_usize(this)?;
273 let align = this.read_scalar(align)?.to_machine_usize(this)?;
274 // No need to check old_size/align; we anyway check that they match the allocation.
275 let ptr = this.force_ptr(ptr)?;
276 this.memory.deallocate(
278 Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
279 MiriMemoryKind::Rust.into(),
282 "__rust_realloc" => {
283 let &[ptr, old_size, align, new_size] = check_arg_count(args)?;
284 let ptr = this.force_ptr(this.read_scalar(ptr)?.check_init()?)?;
285 let old_size = this.read_scalar(old_size)?.to_machine_usize(this)?;
286 let align = this.read_scalar(align)?.to_machine_usize(this)?;
287 let new_size = this.read_scalar(new_size)?.to_machine_usize(this)?;
288 Self::check_alloc_request(new_size, align)?;
289 // No need to check old_size; we anyway check that they match the allocation.
290 let align = Align::from_bytes(align).unwrap();
291 let new_ptr = this.memory.reallocate(
293 Some((Size::from_bytes(old_size), align)),
294 Size::from_bytes(new_size),
296 MiriMemoryKind::Rust.into(),
298 this.write_scalar(new_ptr, dest)?;
301 // C memory handling functions
303 let &[left, right, n] = check_arg_count(args)?;
304 let left = this.read_scalar(left)?.check_init()?;
305 let right = this.read_scalar(right)?.check_init()?;
306 let n = Size::from_bytes(this.read_scalar(n)?.to_machine_usize(this)?);
309 let left_bytes = this.memory.read_bytes(left, n)?;
310 let right_bytes = this.memory.read_bytes(right, n)?;
312 use std::cmp::Ordering::*;
313 match left_bytes.cmp(right_bytes) {
320 this.write_scalar(Scalar::from_i32(result), dest)?;
323 let &[ptr, val, num] = check_arg_count(args)?;
324 let ptr = this.read_scalar(ptr)?.check_init()?;
325 let val = this.read_scalar(val)?.to_i32()? as u8;
326 let num = this.read_scalar(num)?.to_machine_usize(this)?;
327 if let Some(idx) = this
329 .read_bytes(ptr, Size::from_bytes(num))?
332 .position(|&c| c == val)
334 let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), this)?;
335 this.write_scalar(new_ptr, dest)?;
337 this.write_null(dest)?;
341 let &[ptr, val, num] = check_arg_count(args)?;
342 let ptr = this.read_scalar(ptr)?.check_init()?;
343 let val = this.read_scalar(val)?.to_i32()? as u8;
344 let num = this.read_scalar(num)?.to_machine_usize(this)?;
347 .read_bytes(ptr, Size::from_bytes(num))?
349 .position(|&c| c == val);
350 if let Some(idx) = idx {
351 let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
352 this.write_scalar(new_ptr, dest)?;
354 this.write_null(dest)?;
358 let &[ptr] = check_arg_count(args)?;
359 let ptr = this.read_scalar(ptr)?.check_init()?;
360 let n = this.memory.read_c_str(ptr)?.len();
361 this.write_scalar(Scalar::from_machine_usize(u64::try_from(n).unwrap(), this), dest)?;
373 let &[f] = check_arg_count(args)?;
374 // FIXME: Using host floats.
375 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
376 let f = match link_name {
386 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
392 let &[f1, f2] = check_arg_count(args)?;
393 // underscore case for windows, here and below
394 // (see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/floating-point-primitives?view=vs-2019)
395 // FIXME: Using host floats.
396 let f1 = f32::from_bits(this.read_scalar(f1)?.to_u32()?);
397 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
398 let n = match link_name {
399 "_hypotf" | "hypotf" => f1.hypot(f2),
400 "atan2f" => f1.atan2(f2),
403 this.write_scalar(Scalar::from_u32(n.to_bits()), dest)?;
413 let &[f] = check_arg_count(args)?;
414 // FIXME: Using host floats.
415 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
416 let f = match link_name {
426 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
432 let &[f1, f2] = check_arg_count(args)?;
433 // FIXME: Using host floats.
434 let f1 = f64::from_bits(this.read_scalar(f1)?.to_u64()?);
435 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
436 let n = match link_name {
437 "_hypot" | "hypot" => f1.hypot(f2),
438 "atan2" => f1.atan2(f2),
441 this.write_scalar(Scalar::from_u64(n.to_bits()), dest)?;
447 let &[x, exp] = check_arg_count(args)?;
448 // For radix-2 (binary) systems, `ldexp` and `scalbn` are the same.
449 let x = this.read_scalar(x)?.to_f64()?;
450 let exp = this.read_scalar(exp)?.to_i32()?;
452 // Saturating cast to i16. Even those are outside the valid exponent range to
453 // `scalbn` below will do its over/underflow handling.
454 let exp = if exp > i32::from(i16::MAX) {
456 } else if exp < i32::from(i16::MIN) {
459 exp.try_into().unwrap()
462 let res = x.scalbn(exp);
463 this.write_scalar(Scalar::from_f64(res), dest)?;
466 // Architecture-specific shims
467 "llvm.x86.sse2.pause" if this.tcx.sess.target.target.arch == "x86" || this.tcx.sess.target.target.arch == "x86_64" => {
468 let &[] = check_arg_count(args)?;
469 this.yield_active_thread();
472 // Platform-specific shims
473 _ => match this.tcx.sess.target.target.target_os.as_str() {
474 "linux" | "macos" => return shims::posix::foreign_items::EvalContextExt::emulate_foreign_item_by_name(this, link_name, args, dest, ret),
475 "windows" => return shims::windows::foreign_items::EvalContextExt::emulate_foreign_item_by_name(this, link_name, args, dest, ret),
476 target => throw_unsup_format!("the target `{}` is not supported", target),
483 /// Check some basic requirements for this allocation request:
484 /// non-zero size, power-of-two alignment.
485 fn check_alloc_request(size: u64, align: u64) -> InterpResult<'tcx> {
487 throw_ub_format!("creating allocation with size 0");
489 if !align.is_power_of_two() {
490 throw_ub_format!("creating allocation with non-power-of-two alignment {}", align);