4 use std::{convert::TryInto, iter};
6 use rustc_hir::def_id::DefId;
9 use rustc::ty::layout::{Align, LayoutOf, Size};
10 use rustc_apfloat::Float;
11 use rustc_span::symbol::sym;
16 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
17 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
18 /// Returns the minimum alignment for the target architecture for allocations of the given size.
19 fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
20 let this = self.eval_context_ref();
21 // List taken from `libstd/sys_common/alloc.rs`.
22 let min_align = match this.tcx.tcx.sess.target.target.arch.as_str() {
23 "x86" | "arm" | "mips" | "powerpc" | "powerpc64" | "asmjs" | "wasm32" => 8,
24 "x86_64" | "aarch64" | "mips64" | "s390x" | "sparc64" => 16,
25 arch => bug!("Unsupported target architecture: {}", arch),
27 // Windows always aligns, even small allocations.
28 // Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
29 // But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
30 if kind == MiriMemoryKind::WinHeap || size >= min_align {
31 return Align::from_bytes(min_align).unwrap();
33 // We have `size < min_align`. Round `size` *down* to the next power of two and use that.
34 fn prev_power_of_two(x: u64) -> u64 {
35 let next_pow2 = x.next_power_of_two();
37 // x *is* a power of two, just use that.
40 // x is between two powers, so next = 2*prev.
44 Align::from_bytes(prev_power_of_two(size)).unwrap()
47 fn malloc(&mut self, size: u64, zero_init: bool, kind: MiriMemoryKind) -> Scalar<Tag> {
48 let this = self.eval_context_mut();
50 Scalar::from_int(0, this.pointer_size())
52 let align = this.min_align(size, kind);
53 let ptr = this.memory.allocate(Size::from_bytes(size), align, kind.into());
55 // We just allocated this, the access is definitely in-bounds.
56 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
62 fn free(&mut self, ptr: Scalar<Tag>, kind: MiriMemoryKind) -> InterpResult<'tcx> {
63 let this = self.eval_context_mut();
64 if !this.is_null(ptr)? {
65 let ptr = this.force_ptr(ptr)?;
66 this.memory.deallocate(ptr, None, kind.into())?;
76 ) -> InterpResult<'tcx, Scalar<Tag>> {
77 let this = self.eval_context_mut();
78 let new_align = this.min_align(new_size, kind);
79 if this.is_null(old_ptr)? {
81 Ok(Scalar::from_int(0, this.pointer_size()))
84 this.memory.allocate(Size::from_bytes(new_size), new_align, kind.into());
85 Ok(Scalar::Ptr(new_ptr))
88 let old_ptr = this.force_ptr(old_ptr)?;
90 this.memory.deallocate(old_ptr, None, kind.into())?;
91 Ok(Scalar::from_int(0, this.pointer_size()))
93 let new_ptr = this.memory.reallocate(
96 Size::from_bytes(new_size),
100 Ok(Scalar::Ptr(new_ptr))
105 /// Emulates calling a foreign item, failing if the item is not supported.
106 /// This function will handle `goto_block` if needed.
107 /// Returns Ok(None) if the foreign item was completely handled
108 /// by this function.
109 /// Returns Ok(Some(body)) if processing the foreign item
110 /// is delegated to another function.
112 fn emulate_foreign_item(
115 args: &[OpTy<'tcx, Tag>],
116 ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
117 _unwind: Option<mir::BasicBlock>,
118 ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
119 let this = self.eval_context_mut();
120 let attrs = this.tcx.get_attrs(def_id);
121 let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) {
122 Some(name) => name.as_str(),
123 None => this.tcx.item_name(def_id).as_str(),
125 // Strip linker suffixes (seen on 32-bit macOS).
126 let link_name = link_name.trim_end_matches("$UNIX2003");
127 let tcx = &{ this.tcx.tcx };
129 // First: functions that diverge.
130 let (dest, ret) = match link_name {
131 // Note that this matches calls to the *foreign* item `__rust_start_panic* -
132 // that is, calls to `extern "Rust" { fn __rust_start_panic(...) }`.
133 // We forward this to the underlying *implementation* in the panic runtime crate.
134 // Normally, this will be either `libpanic_unwind` or `libpanic_abort`, but it could
135 // also be a custom user-provided implementation via `#![feature(panic_runtime)]`
136 "__rust_start_panic" => {
137 // FIXME we might want to cache this... but it's not really performance-critical.
138 let panic_runtime = tcx
141 .find(|cnum| tcx.is_panic_runtime(**cnum))
142 .expect("No panic runtime found!");
143 let panic_runtime = tcx.crate_name(*panic_runtime);
144 let start_panic_instance =
145 this.resolve_path(&[&*panic_runtime.as_str(), "__rust_start_panic"])?;
146 return Ok(Some(&*this.load_mir(start_panic_instance.def, None)?));
148 // Similarly, we forward calls to the `panic_impl` foreign item to its implementation.
149 // The implementation is provided by the function with the `#[panic_handler]` attribute.
151 let panic_impl_id = this.tcx.lang_items().panic_impl().unwrap();
152 let panic_impl_instance = ty::Instance::mono(*this.tcx, panic_impl_id);
153 return Ok(Some(&*this.load_mir(panic_impl_instance.def, None)?));
159 // it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway
160 let code = this.read_scalar(args[0])?.to_i32()?;
161 throw_machine_stop!(TerminationInfo::Exit(code.into()));
164 if let Some(p) = ret {
167 throw_unsup_format!("can't call (diverging) foreign function: {}", link_name);
172 // Next: functions that return.
174 "__rust_maybe_catch_panic" => {
175 this.handle_catch_panic(args, dest, ret)?;
179 _ => this.emulate_foreign_item_by_name(link_name, args, dest)?,
182 this.dump_place(*dest);
183 this.go_to_block(ret);
188 fn emulate_foreign_item_by_name(
191 args: &[OpTy<'tcx, Tag>],
192 dest: PlaceTy<'tcx, Tag>,
193 ) -> InterpResult<'tcx> {
194 let this = self.eval_context_mut();
195 let tcx = &{ this.tcx.tcx };
199 let size = this.read_scalar(args[0])?.to_machine_usize(this)?;
200 let res = this.malloc(size, /*zero_init:*/ false, MiriMemoryKind::C);
201 this.write_scalar(res, dest)?;
204 let items = this.read_scalar(args[0])?.to_machine_usize(this)?;
205 let len = this.read_scalar(args[1])?.to_machine_usize(this)?;
207 items.checked_mul(len).ok_or_else(|| err_ub_format!("overflow during calloc size computation"))?;
208 let res = this.malloc(size, /*zero_init:*/ true, MiriMemoryKind::C);
209 this.write_scalar(res, dest)?;
212 let ptr = this.read_scalar(args[0])?.not_undef()?;
213 this.free(ptr, MiriMemoryKind::C)?;
216 let old_ptr = this.read_scalar(args[0])?.not_undef()?;
217 let new_size = this.read_scalar(args[1])?.to_machine_usize(this)?;
218 let res = this.realloc(old_ptr, new_size, MiriMemoryKind::C)?;
219 this.write_scalar(res, dest)?;
223 let size = this.read_scalar(args[0])?.to_machine_usize(this)?;
224 let align = this.read_scalar(args[1])?.to_machine_usize(this)?;
226 throw_unsup!(HeapAllocZeroBytes);
228 if !align.is_power_of_two() {
229 throw_unsup!(HeapAllocNonPowerOfTwoAlignment(align));
231 let ptr = this.memory.allocate(
232 Size::from_bytes(size),
233 Align::from_bytes(align).unwrap(),
234 MiriMemoryKind::Rust.into(),
236 this.write_scalar(ptr, dest)?;
238 "__rust_alloc_zeroed" => {
239 let size = this.read_scalar(args[0])?.to_machine_usize(this)?;
240 let align = this.read_scalar(args[1])?.to_machine_usize(this)?;
242 throw_unsup!(HeapAllocZeroBytes);
244 if !align.is_power_of_two() {
245 throw_unsup!(HeapAllocNonPowerOfTwoAlignment(align));
247 let ptr = this.memory.allocate(
248 Size::from_bytes(size),
249 Align::from_bytes(align).unwrap(),
250 MiriMemoryKind::Rust.into(),
252 // We just allocated this, the access is definitely in-bounds.
253 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
254 this.write_scalar(ptr, dest)?;
256 "__rust_dealloc" => {
257 let ptr = this.read_scalar(args[0])?.not_undef()?;
258 let old_size = this.read_scalar(args[1])?.to_machine_usize(this)?;
259 let align = this.read_scalar(args[2])?.to_machine_usize(this)?;
261 throw_unsup!(HeapAllocZeroBytes);
263 if !align.is_power_of_two() {
264 throw_unsup!(HeapAllocNonPowerOfTwoAlignment(align));
266 let ptr = this.force_ptr(ptr)?;
267 this.memory.deallocate(
269 Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
270 MiriMemoryKind::Rust.into(),
273 "__rust_realloc" => {
274 let old_size = this.read_scalar(args[1])?.to_machine_usize(this)?;
275 let align = this.read_scalar(args[2])?.to_machine_usize(this)?;
276 let new_size = this.read_scalar(args[3])?.to_machine_usize(this)?;
277 if old_size == 0 || new_size == 0 {
278 throw_unsup!(HeapAllocZeroBytes);
280 if !align.is_power_of_two() {
281 throw_unsup!(HeapAllocNonPowerOfTwoAlignment(align));
283 let ptr = this.force_ptr(this.read_scalar(args[0])?.not_undef()?)?;
284 let align = Align::from_bytes(align).unwrap();
285 let new_ptr = this.memory.reallocate(
287 Some((Size::from_bytes(old_size), align)),
288 Size::from_bytes(new_size),
290 MiriMemoryKind::Rust.into(),
292 this.write_scalar(new_ptr, dest)?;
296 let left = this.read_scalar(args[0])?.not_undef()?;
297 let right = this.read_scalar(args[1])?.not_undef()?;
298 let n = Size::from_bytes(this.read_scalar(args[2])?.to_machine_usize(this)?);
301 let left_bytes = this.memory.read_bytes(left, n)?;
302 let right_bytes = this.memory.read_bytes(right, n)?;
304 use std::cmp::Ordering::*;
305 match left_bytes.cmp(right_bytes) {
312 this.write_scalar(Scalar::from_int(result, Size::from_bits(32)), dest)?;
316 let ptr = this.read_scalar(args[0])?.not_undef()?;
317 let val = this.read_scalar(args[1])?.to_i32()? as u8;
318 let num = this.read_scalar(args[2])?.to_machine_usize(this)?;
321 .read_bytes(ptr, Size::from_bytes(num))?
323 .position(|&c| c == val);
324 if let Some(idx) = idx {
325 let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
326 this.write_scalar(new_ptr, dest)?;
328 this.write_null(dest)?;
334 let result = this.rename(args[0], args[1])?;
335 this.write_scalar(Scalar::from_int(result, dest.layout.size), dest)?;
339 let ptr = this.read_scalar(args[0])?.not_undef()?;
340 let n = this.memory.read_c_str(ptr)?.len();
341 this.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
353 // FIXME: Using host floats.
354 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
355 let f = match link_name {
365 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
367 // underscore case for windows
372 // FIXME: Using host floats.
373 let f1 = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
374 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
375 let n = match link_name {
376 "_hypotf" | "hypotf" => f1.hypot(f2),
377 "atan2f" => f1.atan2(f2),
380 this.write_scalar(Scalar::from_u32(n.to_bits()), dest)?;
391 // FIXME: Using host floats.
392 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
393 let f = match link_name {
403 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
405 // underscore case for windows, here and below
406 // (see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/floating-point-primitives?view=vs-2019)
411 // FIXME: Using host floats.
412 let f1 = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
413 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
414 let n = match link_name {
415 "_hypot" | "hypot" => f1.hypot(f2),
416 "atan2" => f1.atan2(f2),
419 this.write_scalar(Scalar::from_u64(n.to_bits()), dest)?;
421 // For radix-2 (binary) systems, `ldexp` and `scalbn` are the same.
426 let x = this.read_scalar(args[0])?.to_f64()?;
427 let exp = this.read_scalar(args[1])?.to_i32()?;
429 // Saturating cast to i16. Even those are outside the valid exponent range to
430 // `scalbn` below will do its over/underflow handling.
431 let exp = if exp > i16::max_value() as i32 {
433 } else if exp < i16::min_value() as i32 {
436 exp.try_into().unwrap()
439 let res = x.scalbn(exp);
440 this.write_scalar(Scalar::from_f64(res), dest)?;
443 // Some things needed for `sys::thread` initialization to go through.
448 this.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
452 let name = this.read_scalar(args[0])?.to_i32()?;
454 trace!("sysconf() called with name {}", name);
455 // TODO: Cache the sysconf integers via Miri's global cache.
457 (&["libc", "_SC_PAGESIZE"], Scalar::from_int(PAGE_SIZE, dest.layout.size)),
458 (&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
460 &["libc", "_SC_NPROCESSORS_ONLN"],
461 Scalar::from_int(NUM_CPUS, dest.layout.size),
464 let mut result = None;
465 for &(path, path_value) in paths {
466 if let Some(val) = this.eval_path_scalar(path)? {
467 let val = val.to_i32()?;
469 result = Some(path_value);
474 if let Some(result) = result {
475 this.write_scalar(result, dest)?;
477 throw_unsup_format!("Unimplemented sysconf name: {}", name)
481 "sched_getaffinity" => {
482 // Return an error; `num_cpus` then falls back to `sysconf`.
483 this.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
487 this.write_null(dest)?;
490 // Hook pthread calls that go to the thread-local storage memory subsystem.
491 "pthread_key_create" => {
492 let key_place = this.deref_operand(args[0])?;
494 // Extract the function type out of the signature (that seems easier than constructing it ourselves).
495 let dtor = match this.test_null(this.read_scalar(args[1])?.not_undef()?)? {
496 Some(dtor_ptr) => Some(this.memory.get_fn(dtor_ptr)?.as_instance()?),
500 // Figure out how large a pthread TLS key actually is.
501 // This is `libc::pthread_key_t`.
502 let key_type = args[0].layout.ty
504 .ok_or_else(|| err_ub_format!(
505 "wrong signature used for `pthread_key_create`: first argument must be a raw pointer."
508 let key_layout = this.layout_of(key_type)?;
510 // Create key and write it into the memory where `key_ptr` wants it.
511 let key = this.machine.tls.create_tls_key(dtor) as u128;
512 if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128)
514 throw_unsup!(OutOfTls);
517 this.write_scalar(Scalar::from_uint(key, key_layout.size), key_place.into())?;
519 // Return success (`0`).
520 this.write_null(dest)?;
522 "pthread_key_delete" => {
523 let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
524 this.machine.tls.delete_tls_key(key)?;
525 // Return success (0)
526 this.write_null(dest)?;
528 "pthread_getspecific" => {
529 let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
530 let ptr = this.machine.tls.load_tls(key, tcx)?;
531 this.write_scalar(ptr, dest)?;
533 "pthread_setspecific" => {
534 let key = this.read_scalar(args[0])?.to_bits(args[0].layout.size)?;
535 let new_ptr = this.read_scalar(args[1])?.not_undef()?;
536 this.machine.tls.store_tls(key, this.test_null(new_ptr)?)?;
538 // Return success (`0`).
539 this.write_null(dest)?;
542 // Stack size/address stuff.
543 | "pthread_attr_init"
544 | "pthread_attr_destroy"
546 | "pthread_attr_setstacksize" => {
547 this.write_null(dest)?;
549 "pthread_attr_getstack" => {
550 let addr_place = this.deref_operand(args[1])?;
551 let size_place = this.deref_operand(args[2])?;
554 Scalar::from_uint(STACK_ADDR, addr_place.layout.size),
558 Scalar::from_uint(STACK_SIZE, size_place.layout.size),
562 // Return success (`0`).
563 this.write_null(dest)?;
566 // We don't support threading. (Also for Windows.)
570 throw_unsup_format!("Miri does not support threading");
573 // Stub out calls for condvar, mutex and rwlock, to just return `0`.
574 | "pthread_mutexattr_init"
575 | "pthread_mutexattr_settype"
576 | "pthread_mutex_init"
577 | "pthread_mutexattr_destroy"
578 | "pthread_mutex_lock"
579 | "pthread_mutex_unlock"
580 | "pthread_mutex_destroy"
581 | "pthread_rwlock_rdlock"
582 | "pthread_rwlock_unlock"
583 | "pthread_rwlock_wrlock"
584 | "pthread_rwlock_destroy"
585 | "pthread_condattr_init"
586 | "pthread_condattr_setclock"
587 | "pthread_cond_init"
588 | "pthread_condattr_destroy"
589 | "pthread_cond_destroy"
591 this.write_null(dest)?;
594 // We don't support fork so we don't have to do anything for atfork.
595 "pthread_atfork" => {
596 this.write_null(dest)?;
600 // fadvise is only informational, we can ignore it.
601 this.write_null(dest)?;
605 // This is a horrible hack, but since the guard page mechanism calls mmap and expects a particular return value, we just give it that value.
606 let addr = this.read_scalar(args[0])?.not_undef()?;
607 this.write_scalar(addr, dest)?;
610 this.write_null(dest)?;
613 _ => match this.tcx.sess.target.target.target_os.to_lowercase().as_str() {
614 "linux" | "macos" => posix::EvalContextExt::emulate_foreign_item_by_name(this, link_name, args, dest)?,
615 "windows" => windows::EvalContextExt::emulate_foreign_item_by_name(this, link_name, args, dest)?,
616 target => throw_unsup_format!("The {} target platform is not supported", target),
623 /// Evaluates the scalar at the specified path. Returns Some(val)
624 /// if the path could be resolved, and None otherwise
628 ) -> InterpResult<'tcx, Option<ScalarMaybeUndef<Tag>>> {
629 let this = self.eval_context_mut();
630 if let Ok(instance) = this.resolve_path(path) {
631 let cid = GlobalId { instance, promoted: None };
632 let const_val = this.const_eval_raw(cid)?;
633 let const_val = this.read_scalar(const_val.into())?;
634 return Ok(Some(const_val));