2 convert::{TryFrom, TryInto},
8 use rustc_apfloat::Float;
11 def_id::{CrateNum, DefId, LOCAL_CRATE},
13 use rustc_middle::middle::{
14 codegen_fn_attrs::CodegenFnAttrFlags, dependency_format::Linkage,
15 exported_symbols::ExportedSymbol,
17 use rustc_middle::mir;
19 use rustc_session::config::CrateType;
20 use rustc_span::{symbol::sym, Symbol};
26 use super::backtrace::EvalContextExt as _;
28 use helpers::strip_linker_suffix;
30 /// Returned by `emulate_foreign_item_by_name`.
31 pub enum EmulateByNameResult {
32 /// The caller is expected to jump to the return block.
34 /// Jumping has already been taken care of.
36 /// The item is not supported.
40 impl<'mir, 'tcx: 'mir> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
41 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
42 /// Returns the minimum alignment for the target architecture for allocations of the given size.
43 fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
44 let this = self.eval_context_ref();
45 // List taken from `libstd/sys_common/alloc.rs`.
46 let min_align = match this.tcx.sess.target.arch.as_str() {
47 "x86" | "arm" | "mips" | "powerpc" | "powerpc64" | "asmjs" | "wasm32" => 8,
48 "x86_64" | "aarch64" | "mips64" | "s390x" | "sparc64" => 16,
49 arch => bug!("Unsupported target architecture: {}", arch),
51 // Windows always aligns, even small allocations.
52 // Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
53 // But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
54 if kind == MiriMemoryKind::WinHeap || size >= min_align {
55 return Align::from_bytes(min_align).unwrap();
57 // We have `size < min_align`. Round `size` *down* to the next power of two and use that.
58 fn prev_power_of_two(x: u64) -> u64 {
59 let next_pow2 = x.next_power_of_two();
61 // x *is* a power of two, just use that.
64 // x is between two powers, so next = 2*prev.
68 Align::from_bytes(prev_power_of_two(size)).unwrap()
71 fn malloc(&mut self, size: u64, zero_init: bool, kind: MiriMemoryKind) -> Scalar<Tag> {
72 let this = self.eval_context_mut();
74 Scalar::null_ptr(this)
76 let align = this.min_align(size, kind);
77 let ptr = this.memory.allocate(Size::from_bytes(size), align, kind.into());
79 // We just allocated this, the access is definitely in-bounds.
80 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
86 fn free(&mut self, ptr: Scalar<Tag>, kind: MiriMemoryKind) -> InterpResult<'tcx> {
87 let this = self.eval_context_mut();
88 if !this.is_null(ptr)? {
89 let ptr = this.force_ptr(ptr)?;
90 this.memory.deallocate(ptr, None, kind.into())?;
100 ) -> InterpResult<'tcx, Scalar<Tag>> {
101 let this = self.eval_context_mut();
102 let new_align = this.min_align(new_size, kind);
103 if this.is_null(old_ptr)? {
105 Ok(Scalar::null_ptr(this))
108 this.memory.allocate(Size::from_bytes(new_size), new_align, kind.into());
109 Ok(Scalar::Ptr(new_ptr))
112 let old_ptr = this.force_ptr(old_ptr)?;
114 this.memory.deallocate(old_ptr, None, kind.into())?;
115 Ok(Scalar::null_ptr(this))
117 let new_ptr = this.memory.reallocate(
120 Size::from_bytes(new_size),
124 Ok(Scalar::Ptr(new_ptr))
129 /// Lookup the body of a function that has `link_name` as the symbol name.
130 fn lookup_exported_symbol(
133 ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
134 let this = self.eval_context_mut();
135 let tcx = this.tcx.tcx;
137 // If the result was cached, just return it.
138 if let Some(instance) = this.machine.exported_symbols_cache.get(&link_name) {
139 return instance.map(|instance| this.load_mir(instance.def, None)).transpose();
142 // Find it if it was not cached.
143 let mut instance_and_crate: Option<(ty::Instance<'_>, CrateNum)> = None;
144 // `dependency_formats` includes all the transitive informations needed to link a crate,
145 // which is what we need here since we need to dig out `exported_symbols` from all transitive
147 let dependency_formats = tcx.dependency_formats(());
148 let dependency_format = dependency_formats
150 .find(|(crate_type, _)| *crate_type == CrateType::Executable)
151 .expect("interpreting a non-executable crate");
153 iter::once(LOCAL_CRATE).chain(dependency_format.1.iter().enumerate().filter_map(
154 |(num, &linkage)| (linkage != Linkage::NotLinked).then_some(CrateNum::new(num + 1)),
157 // We can ignore `_export_level` here: we are a Rust crate, and everything is exported
158 // from a Rust crate.
159 for &(symbol, _export_level) in tcx.exported_symbols(cnum) {
160 if let ExportedSymbol::NonGeneric(def_id) = symbol {
161 let attrs = tcx.codegen_fn_attrs(def_id);
162 let symbol_name = if let Some(export_name) = attrs.export_name {
164 } else if attrs.flags.contains(CodegenFnAttrFlags::NO_MANGLE) {
165 tcx.item_name(def_id)
167 // Skip over items without an explicitly defined symbol name.
170 if symbol_name == link_name {
171 if let Some((instance, original_cnum)) = instance_and_crate {
172 throw_machine_stop!(TerminationInfo::MultipleSymbolDefinitions {
174 first: tcx.def_span(instance.def_id()).data(),
175 first_crate: tcx.crate_name(original_cnum),
176 second: tcx.def_span(def_id).data(),
177 second_crate: tcx.crate_name(cnum),
180 if tcx.def_kind(def_id) != DefKind::Fn {
182 "attempt to call an exported symbol that is not defined as a function"
185 instance_and_crate = Some((ty::Instance::mono(tcx, def_id), cnum));
191 let instance = instance_and_crate.map(|ic| ic.0);
192 // Cache it and load its MIR, if found.
193 this.machine.exported_symbols_cache.insert(link_name, instance);
194 instance.map(|instance| this.load_mir(instance.def, None)).transpose()
197 /// Emulates calling a foreign item, failing if the item is not supported.
198 /// This function will handle `goto_block` if needed.
199 /// Returns Ok(None) if the foreign item was completely handled
200 /// by this function.
201 /// Returns Ok(Some(body)) if processing the foreign item
202 /// is delegated to another function.
203 fn emulate_foreign_item(
207 args: &[OpTy<'tcx, Tag>],
208 ret: Option<(&PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
209 unwind: StackPopUnwind,
210 ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
211 let this = self.eval_context_mut();
212 let attrs = this.tcx.get_attrs(def_id);
213 let link_name_sym = this
216 .first_attr_value_str_by_name(&attrs, sym::link_name)
217 .unwrap_or_else(|| this.tcx.item_name(def_id));
218 let link_name = link_name_sym.as_str();
219 let link_name = strip_linker_suffix(&link_name);
220 let tcx = this.tcx.tcx;
222 // First: functions that diverge.
223 let (dest, ret) = match ret {
224 None => match link_name {
225 "miri_start_panic" => {
226 // `check_shim` happens inside `handle_miri_start_panic`.
227 this.handle_miri_start_panic(abi, link_name_sym, args, unwind)?;
230 // This matches calls to the foreign item `panic_impl`.
231 // The implementation is provided by the function with the `#[panic_handler]` attribute.
233 // We don't use `check_shim` here because we are just forwarding to the lang
234 // item. Argument count checking will be performed when the returned `Body` is
236 this.check_abi_and_shim_symbol_clash(abi, Abi::Rust, link_name_sym)?;
237 let panic_impl_id = tcx.lang_items().panic_impl().unwrap();
238 let panic_impl_instance = ty::Instance::mono(tcx, panic_impl_id);
239 return Ok(Some(&*this.load_mir(panic_impl_instance.def, None)?));
245 let exp_abi = if link_name == "exit" {
246 Abi::C { unwind: false }
248 Abi::System { unwind: false }
250 let &[ref code] = this.check_shim(abi, exp_abi, link_name_sym, args)?;
251 // it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway
252 let code = this.read_scalar(code)?.to_i32()?;
253 throw_machine_stop!(TerminationInfo::Exit(code.into()));
257 this.check_shim(abi, Abi::C { unwind: false }, link_name_sym, args)?;
258 throw_machine_stop!(TerminationInfo::Abort(
259 "the program aborted execution".to_owned()
263 if let Some(body) = this.lookup_exported_symbol(link_name_sym)? {
264 return Ok(Some(body));
266 this.handle_unsupported(format!(
267 "can't call (diverging) foreign function: {}",
276 // Second: functions that return.
277 match this.emulate_foreign_item_by_name(link_name_sym, abi, args, dest, ret)? {
278 EmulateByNameResult::NeedsJumping => {
279 trace!("{:?}", this.dump_place(**dest));
280 this.go_to_block(ret);
282 EmulateByNameResult::AlreadyJumped => (),
283 EmulateByNameResult::NotSupported => {
284 if let Some(body) = this.lookup_exported_symbol(link_name_sym)? {
285 return Ok(Some(body));
288 this.handle_unsupported(format!("can't call foreign function: {}", link_name))?;
296 /// Emulates calling a foreign item using its name.
297 fn emulate_foreign_item_by_name(
301 args: &[OpTy<'tcx, Tag>],
302 dest: &PlaceTy<'tcx, Tag>,
303 ret: mir::BasicBlock,
304 ) -> InterpResult<'tcx, EmulateByNameResult> {
305 let this = self.eval_context_mut();
307 // Here we dispatch all the shims for foreign functions. If you have a platform specific
308 // shim, add it to the corresponding submodule.
309 let shim_name = link_name.as_str();
310 let shim_name = strip_linker_suffix(&shim_name);
312 // Miri-specific extern functions
313 "miri_static_root" => {
314 let &[ref ptr] = this.check_shim(abi, Abi::Rust, link_name, args)?;
315 let ptr = this.read_scalar(ptr)?.check_init()?;
316 let ptr = this.force_ptr(ptr)?;
317 if ptr.offset != Size::ZERO {
318 throw_unsup_format!("pointer passed to miri_static_root must point to beginning of an allocated block");
320 this.machine.static_roots.push(ptr.alloc_id);
323 // Obtains a Miri backtrace. See the README for details.
324 "miri_get_backtrace" => {
325 // `check_shim` happens inside `handle_miri_get_backtrace`.
326 this.handle_miri_get_backtrace(abi, link_name, args, dest)?;
329 // Resolves a Miri backtrace frame. See the README for details.
330 "miri_resolve_frame" => {
331 // `check_shim` happens inside `handle_miri_resolve_frame`.
332 this.handle_miri_resolve_frame(abi, link_name, args, dest)?;
336 // Standard C allocation
338 let &[ref size] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
339 let size = this.read_scalar(size)?.to_machine_usize(this)?;
340 let res = this.malloc(size, /*zero_init:*/ false, MiriMemoryKind::C);
341 this.write_scalar(res, dest)?;
344 let &[ref items, ref len] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
345 let items = this.read_scalar(items)?.to_machine_usize(this)?;
346 let len = this.read_scalar(len)?.to_machine_usize(this)?;
348 items.checked_mul(len).ok_or_else(|| err_ub_format!("overflow during calloc size computation"))?;
349 let res = this.malloc(size, /*zero_init:*/ true, MiriMemoryKind::C);
350 this.write_scalar(res, dest)?;
353 let &[ref ptr] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
354 let ptr = this.read_scalar(ptr)?.check_init()?;
355 this.free(ptr, MiriMemoryKind::C)?;
358 let &[ref old_ptr, ref new_size] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
359 let old_ptr = this.read_scalar(old_ptr)?.check_init()?;
360 let new_size = this.read_scalar(new_size)?.to_machine_usize(this)?;
361 let res = this.realloc(old_ptr, new_size, MiriMemoryKind::C)?;
362 this.write_scalar(res, dest)?;
366 // (Usually these would be forwarded to to `#[global_allocator]`; we instead implement a generic
367 // allocation that also checks that all conditions are met, such as not permitting zero-sized allocations.)
369 let &[ref size, ref align] = this.check_shim(abi, Abi::Rust, link_name, args)?;
370 let size = this.read_scalar(size)?.to_machine_usize(this)?;
371 let align = this.read_scalar(align)?.to_machine_usize(this)?;
372 Self::check_alloc_request(size, align)?;
373 let ptr = this.memory.allocate(
374 Size::from_bytes(size),
375 Align::from_bytes(align).unwrap(),
376 MiriMemoryKind::Rust.into(),
378 this.write_scalar(ptr, dest)?;
380 "__rust_alloc_zeroed" => {
381 let &[ref size, ref align] = this.check_shim(abi, Abi::Rust, link_name, args)?;
382 let size = this.read_scalar(size)?.to_machine_usize(this)?;
383 let align = this.read_scalar(align)?.to_machine_usize(this)?;
384 Self::check_alloc_request(size, align)?;
385 let ptr = this.memory.allocate(
386 Size::from_bytes(size),
387 Align::from_bytes(align).unwrap(),
388 MiriMemoryKind::Rust.into(),
390 // We just allocated this, the access is definitely in-bounds.
391 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(usize::try_from(size).unwrap())).unwrap();
392 this.write_scalar(ptr, dest)?;
394 "__rust_dealloc" => {
395 let &[ref ptr, ref old_size, ref align] = this.check_shim(abi, Abi::Rust, link_name, args)?;
396 let ptr = this.read_scalar(ptr)?.check_init()?;
397 let old_size = this.read_scalar(old_size)?.to_machine_usize(this)?;
398 let align = this.read_scalar(align)?.to_machine_usize(this)?;
399 // No need to check old_size/align; we anyway check that they match the allocation.
400 let ptr = this.force_ptr(ptr)?;
401 this.memory.deallocate(
403 Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
404 MiriMemoryKind::Rust.into(),
407 "__rust_realloc" => {
408 let &[ref ptr, ref old_size, ref align, ref new_size] = this.check_shim(abi, Abi::Rust, link_name, args)?;
409 let ptr = this.force_ptr(this.read_scalar(ptr)?.check_init()?)?;
410 let old_size = this.read_scalar(old_size)?.to_machine_usize(this)?;
411 let align = this.read_scalar(align)?.to_machine_usize(this)?;
412 let new_size = this.read_scalar(new_size)?.to_machine_usize(this)?;
413 Self::check_alloc_request(new_size, align)?;
414 // No need to check old_size; we anyway check that they match the allocation.
415 let align = Align::from_bytes(align).unwrap();
416 let new_ptr = this.memory.reallocate(
418 Some((Size::from_bytes(old_size), align)),
419 Size::from_bytes(new_size),
421 MiriMemoryKind::Rust.into(),
423 this.write_scalar(new_ptr, dest)?;
426 // C memory handling functions
428 let &[ref left, ref right, ref n] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
429 let left = this.read_scalar(left)?.check_init()?;
430 let right = this.read_scalar(right)?.check_init()?;
431 let n = Size::from_bytes(this.read_scalar(n)?.to_machine_usize(this)?);
434 let left_bytes = this.memory.read_bytes(left, n)?;
435 let right_bytes = this.memory.read_bytes(right, n)?;
437 use std::cmp::Ordering::*;
438 match left_bytes.cmp(right_bytes) {
445 this.write_scalar(Scalar::from_i32(result), dest)?;
448 let &[ref ptr, ref val, ref num] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
449 let ptr = this.read_scalar(ptr)?.check_init()?;
450 let val = this.read_scalar(val)?.to_i32()? as u8;
451 let num = this.read_scalar(num)?.to_machine_usize(this)?;
452 if let Some(idx) = this
454 .read_bytes(ptr, Size::from_bytes(num))?
457 .position(|&c| c == val)
459 let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), this)?;
460 this.write_scalar(new_ptr, dest)?;
462 this.write_null(dest)?;
466 let &[ref ptr, ref val, ref num] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
467 let ptr = this.read_scalar(ptr)?.check_init()?;
468 let val = this.read_scalar(val)?.to_i32()? as u8;
469 let num = this.read_scalar(num)?.to_machine_usize(this)?;
472 .read_bytes(ptr, Size::from_bytes(num))?
474 .position(|&c| c == val);
475 if let Some(idx) = idx {
476 let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
477 this.write_scalar(new_ptr, dest)?;
479 this.write_null(dest)?;
483 let &[ref ptr] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
484 let ptr = this.read_scalar(ptr)?.check_init()?;
485 let n = this.read_c_str(ptr)?.len();
486 this.write_scalar(Scalar::from_machine_usize(u64::try_from(n).unwrap(), this), dest)?;
499 let &[ref f] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
500 // FIXME: Using host floats.
501 let f = f32::from_bits(this.read_scalar(f)?.to_u32()?);
502 let f = match shim_name {
512 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
519 let &[ref f1, ref f2] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
520 // underscore case for windows, here and below
521 // (see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/floating-point-primitives?view=vs-2019)
522 // FIXME: Using host floats.
523 let f1 = f32::from_bits(this.read_scalar(f1)?.to_u32()?);
524 let f2 = f32::from_bits(this.read_scalar(f2)?.to_u32()?);
525 let n = match shim_name {
526 "_hypotf" | "hypotf" => f1.hypot(f2),
527 "atan2f" => f1.atan2(f2),
530 this.write_scalar(Scalar::from_u32(n.to_bits()), dest)?;
541 let &[ref f] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
542 // FIXME: Using host floats.
543 let f = f64::from_bits(this.read_scalar(f)?.to_u64()?);
544 let f = match shim_name {
554 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
561 let &[ref f1, ref f2] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
562 // FIXME: Using host floats.
563 let f1 = f64::from_bits(this.read_scalar(f1)?.to_u64()?);
564 let f2 = f64::from_bits(this.read_scalar(f2)?.to_u64()?);
565 let n = match shim_name {
566 "_hypot" | "hypot" => f1.hypot(f2),
567 "atan2" => f1.atan2(f2),
570 this.write_scalar(Scalar::from_u64(n.to_bits()), dest)?;
577 let &[ref x, ref exp] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
578 // For radix-2 (binary) systems, `ldexp` and `scalbn` are the same.
579 let x = this.read_scalar(x)?.to_f64()?;
580 let exp = this.read_scalar(exp)?.to_i32()?;
582 // Saturating cast to i16. Even those are outside the valid exponent range to
583 // `scalbn` below will do its over/underflow handling.
584 let exp = if exp > i32::from(i16::MAX) {
586 } else if exp < i32::from(i16::MIN) {
589 exp.try_into().unwrap()
592 let res = x.scalbn(exp);
593 this.write_scalar(Scalar::from_f64(res), dest)?;
596 // Architecture-specific shims
597 "llvm.x86.sse2.pause" if this.tcx.sess.target.arch == "x86" || this.tcx.sess.target.arch == "x86_64" => {
598 let &[] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
599 this.yield_active_thread();
601 "llvm.aarch64.isb" if this.tcx.sess.target.arch == "aarch64" => {
602 let &[ref arg] = this.check_shim(abi, Abi::C { unwind: false }, link_name, args)?;
603 let arg = this.read_scalar(arg)?.to_i32()?;
605 15 => { // SY ("full system scope")
606 this.yield_active_thread();
609 throw_unsup_format!("unsupported llvm.aarch64.isb argument {}", arg);
614 // Platform-specific shims
615 _ => match this.tcx.sess.target.os.as_str() {
616 "linux" | "macos" => return shims::posix::foreign_items::EvalContextExt::emulate_foreign_item_by_name(this, link_name, abi, args, dest, ret),
617 "windows" => return shims::windows::foreign_items::EvalContextExt::emulate_foreign_item_by_name(this, link_name, abi, args, dest, ret),
618 target => throw_unsup_format!("the target `{}` is not supported", target),
622 // We only fall through to here if we did *not* hit the `_` arm above,
623 // i.e., if we actually emulated the function.
624 Ok(EmulateByNameResult::NeedsJumping)
627 /// Check some basic requirements for this allocation request:
628 /// non-zero size, power-of-two alignment.
629 fn check_alloc_request(size: u64, align: u64) -> InterpResult<'tcx> {
631 throw_ub_format!("creating allocation with size 0");
633 if !align.is_power_of_two() {
634 throw_ub_format!("creating allocation with non-power-of-two alignment {}", align);