4 use std::{convert::{TryInto, TryFrom}, iter};
6 use rustc_hir::def_id::DefId;
9 use rustc::ty::layout::{Align, Size};
10 use rustc_apfloat::Float;
11 use rustc_span::symbol::sym;
16 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
17 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
18 /// Returns the minimum alignment for the target architecture for allocations of the given size.
19 fn min_align(&self, size: u64, kind: MiriMemoryKind) -> Align {
20 let this = self.eval_context_ref();
21 // List taken from `libstd/sys_common/alloc.rs`.
22 let min_align = match this.tcx.tcx.sess.target.target.arch.as_str() {
23 "x86" | "arm" | "mips" | "powerpc" | "powerpc64" | "asmjs" | "wasm32" => 8,
24 "x86_64" | "aarch64" | "mips64" | "s390x" | "sparc64" => 16,
25 arch => bug!("Unsupported target architecture: {}", arch),
27 // Windows always aligns, even small allocations.
28 // Source: <https://support.microsoft.com/en-us/help/286470/how-to-use-pageheap-exe-in-windows-xp-windows-2000-and-windows-server>
29 // But jemalloc does not, so for the C heap we only align if the allocation is sufficiently big.
30 if kind == MiriMemoryKind::WinHeap || size >= min_align {
31 return Align::from_bytes(min_align).unwrap();
33 // We have `size < min_align`. Round `size` *down* to the next power of two and use that.
34 fn prev_power_of_two(x: u64) -> u64 {
35 let next_pow2 = x.next_power_of_two();
37 // x *is* a power of two, just use that.
40 // x is between two powers, so next = 2*prev.
44 Align::from_bytes(prev_power_of_two(size)).unwrap()
47 fn malloc(&mut self, size: u64, zero_init: bool, kind: MiriMemoryKind) -> Scalar<Tag> {
48 let this = self.eval_context_mut();
50 Scalar::from_int(0, this.pointer_size())
52 let align = this.min_align(size, kind);
53 let ptr = this.memory.allocate(Size::from_bytes(size), align, kind.into());
55 // We just allocated this, the access is definitely in-bounds.
56 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(size as usize)).unwrap();
62 fn free(&mut self, ptr: Scalar<Tag>, kind: MiriMemoryKind) -> InterpResult<'tcx> {
63 let this = self.eval_context_mut();
64 if !this.is_null(ptr)? {
65 let ptr = this.force_ptr(ptr)?;
66 this.memory.deallocate(ptr, None, kind.into())?;
76 ) -> InterpResult<'tcx, Scalar<Tag>> {
77 let this = self.eval_context_mut();
78 let new_align = this.min_align(new_size, kind);
79 if this.is_null(old_ptr)? {
81 Ok(Scalar::from_int(0, this.pointer_size()))
84 this.memory.allocate(Size::from_bytes(new_size), new_align, kind.into());
85 Ok(Scalar::Ptr(new_ptr))
88 let old_ptr = this.force_ptr(old_ptr)?;
90 this.memory.deallocate(old_ptr, None, kind.into())?;
91 Ok(Scalar::from_int(0, this.pointer_size()))
93 let new_ptr = this.memory.reallocate(
96 Size::from_bytes(new_size),
100 Ok(Scalar::Ptr(new_ptr))
105 /// Emulates calling a foreign item, failing if the item is not supported.
106 /// This function will handle `goto_block` if needed.
107 /// Returns Ok(None) if the foreign item was completely handled
108 /// by this function.
109 /// Returns Ok(Some(body)) if processing the foreign item
110 /// is delegated to another function.
112 fn emulate_foreign_item(
115 args: &[OpTy<'tcx, Tag>],
116 ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
117 _unwind: Option<mir::BasicBlock>,
118 ) -> InterpResult<'tcx, Option<&'mir mir::Body<'tcx>>> {
119 let this = self.eval_context_mut();
120 let attrs = this.tcx.get_attrs(def_id);
121 let link_name = match attr::first_attr_value_str_by_name(&attrs, sym::link_name) {
122 Some(name) => name.as_str(),
123 None => this.tcx.item_name(def_id).as_str(),
125 // Strip linker suffixes (seen on 32-bit macOS).
126 let link_name = link_name.trim_end_matches("$UNIX2003");
127 let tcx = &{ this.tcx.tcx };
129 // First: functions that diverge.
130 let (dest, ret) = match ret {
131 None => match link_name {
132 // This matches calls to the foreign item `panic_impl`.
133 // The implementation is provided by the function with the `#[panic_handler]` attribute.
135 this.check_panic_supported()?;
136 let panic_impl_id = this.tcx.lang_items().panic_impl().unwrap();
137 let panic_impl_instance = ty::Instance::mono(*this.tcx, panic_impl_id);
138 return Ok(Some(&*this.load_mir(panic_impl_instance.def, None)?));
143 // it's really u32 for ExitProcess, but we have to put it into the `Exit` variant anyway
144 let code = this.read_scalar(args[0])?.to_i32()?;
145 throw_machine_stop!(TerminationInfo::Exit(code.into()));
147 _ => throw_unsup_format!("can't call (diverging) foreign function: {}", link_name),
152 // Second: some functions that we forward to MIR implementations.
154 // This matches calls to the foreign item `__rust_start_panic`, that is,
155 // calls to `extern "Rust" { fn __rust_start_panic(...) }`
156 // (and `__rust_panic_cleanup`, respectively).
157 // We forward this to the underlying *implementation* in the panic runtime crate.
158 // Normally, this will be either `libpanic_unwind` or `libpanic_abort`, but it could
159 // also be a custom user-provided implementation via `#![feature(panic_runtime)]`
160 "__rust_start_panic" | "__rust_panic_cleanup"=> {
161 // FIXME we might want to cache this... but it's not really performance-critical.
162 let panic_runtime = tcx
165 .find(|cnum| tcx.is_panic_runtime(**cnum))
166 .expect("No panic runtime found!");
167 let panic_runtime = tcx.crate_name(*panic_runtime);
168 let start_panic_instance =
169 this.resolve_path(&[&*panic_runtime.as_str(), link_name]);
170 return Ok(Some(&*this.load_mir(start_panic_instance.def, None)?));
175 // Third: functions that return.
176 if this.emulate_foreign_item_by_name(link_name, args, dest, ret)? {
177 this.dump_place(*dest);
178 this.go_to_block(ret);
184 /// Emulates calling a foreign item using its name, failing if the item is not supported.
185 /// Returns `true` if the caller is expected to jump to the return block, and `false` if
186 /// jumping has already been taken care of.
187 fn emulate_foreign_item_by_name(
190 args: &[OpTy<'tcx, Tag>],
191 dest: PlaceTy<'tcx, Tag>,
192 ret: mir::BasicBlock,
193 ) -> InterpResult<'tcx, bool> {
194 let this = self.eval_context_mut();
196 // Here we dispatch all the shims for foreign functions. If you have a platform specific
197 // shim, add it to the corresponding submodule.
200 let size = this.read_scalar(args[0])?.to_machine_usize(this)?;
201 let res = this.malloc(size, /*zero_init:*/ false, MiriMemoryKind::C);
202 this.write_scalar(res, dest)?;
205 let items = this.read_scalar(args[0])?.to_machine_usize(this)?;
206 let len = this.read_scalar(args[1])?.to_machine_usize(this)?;
208 items.checked_mul(len).ok_or_else(|| err_ub_format!("overflow during calloc size computation"))?;
209 let res = this.malloc(size, /*zero_init:*/ true, MiriMemoryKind::C);
210 this.write_scalar(res, dest)?;
213 let ptr = this.read_scalar(args[0])?.not_undef()?;
214 this.free(ptr, MiriMemoryKind::C)?;
217 let old_ptr = this.read_scalar(args[0])?.not_undef()?;
218 let new_size = this.read_scalar(args[1])?.to_machine_usize(this)?;
219 let res = this.realloc(old_ptr, new_size, MiriMemoryKind::C)?;
220 this.write_scalar(res, dest)?;
224 let size = this.read_scalar(args[0])?.to_machine_usize(this)?;
225 let align = this.read_scalar(args[1])?.to_machine_usize(this)?;
226 Self::check_alloc_request(size, align)?;
227 let ptr = this.memory.allocate(
228 Size::from_bytes(size),
229 Align::from_bytes(align).unwrap(),
230 MiriMemoryKind::Rust.into(),
232 this.write_scalar(ptr, dest)?;
234 "__rust_alloc_zeroed" => {
235 let size = this.read_scalar(args[0])?.to_machine_usize(this)?;
236 let align = this.read_scalar(args[1])?.to_machine_usize(this)?;
237 Self::check_alloc_request(size, align)?;
238 let ptr = this.memory.allocate(
239 Size::from_bytes(size),
240 Align::from_bytes(align).unwrap(),
241 MiriMemoryKind::Rust.into(),
243 // We just allocated this, the access is definitely in-bounds.
244 this.memory.write_bytes(ptr.into(), iter::repeat(0u8).take(usize::try_from(size).unwrap())).unwrap();
245 this.write_scalar(ptr, dest)?;
247 "__rust_dealloc" => {
248 let ptr = this.read_scalar(args[0])?.not_undef()?;
249 let old_size = this.read_scalar(args[1])?.to_machine_usize(this)?;
250 let align = this.read_scalar(args[2])?.to_machine_usize(this)?;
251 // No need to check old_size/align; we anyway check that they match the allocation.
252 let ptr = this.force_ptr(ptr)?;
253 this.memory.deallocate(
255 Some((Size::from_bytes(old_size), Align::from_bytes(align).unwrap())),
256 MiriMemoryKind::Rust.into(),
259 "__rust_realloc" => {
260 let old_size = this.read_scalar(args[1])?.to_machine_usize(this)?;
261 let align = this.read_scalar(args[2])?.to_machine_usize(this)?;
262 let new_size = this.read_scalar(args[3])?.to_machine_usize(this)?;
263 Self::check_alloc_request(new_size, align)?;
264 // No need to check old_size; we anyway check that they match the allocation.
265 let ptr = this.force_ptr(this.read_scalar(args[0])?.not_undef()?)?;
266 let align = Align::from_bytes(align).unwrap();
267 let new_ptr = this.memory.reallocate(
269 Some((Size::from_bytes(old_size), align)),
270 Size::from_bytes(new_size),
272 MiriMemoryKind::Rust.into(),
274 this.write_scalar(new_ptr, dest)?;
278 let left = this.read_scalar(args[0])?.not_undef()?;
279 let right = this.read_scalar(args[1])?.not_undef()?;
280 let n = Size::from_bytes(this.read_scalar(args[2])?.to_machine_usize(this)?);
283 let left_bytes = this.memory.read_bytes(left, n)?;
284 let right_bytes = this.memory.read_bytes(right, n)?;
286 use std::cmp::Ordering::*;
287 match left_bytes.cmp(right_bytes) {
294 this.write_scalar(Scalar::from_int(result, Size::from_bits(32)), dest)?;
298 let ptr = this.read_scalar(args[0])?.not_undef()?;
299 let val = this.read_scalar(args[1])?.to_i32()? as u8;
300 let num = this.read_scalar(args[2])?.to_machine_usize(this)?;
301 if let Some(idx) = this
303 .read_bytes(ptr, Size::from_bytes(num))?
306 .position(|&c| c == val)
308 let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), this)?;
309 this.write_scalar(new_ptr, dest)?;
311 this.write_null(dest)?;
316 let ptr = this.read_scalar(args[0])?.not_undef()?;
317 let val = this.read_scalar(args[1])?.to_i32()? as u8;
318 let num = this.read_scalar(args[2])?.to_machine_usize(this)?;
321 .read_bytes(ptr, Size::from_bytes(num))?
323 .position(|&c| c == val);
324 if let Some(idx) = idx {
325 let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), this)?;
326 this.write_scalar(new_ptr, dest)?;
328 this.write_null(dest)?;
333 let ptr = this.read_scalar(args[0])?.not_undef()?;
334 let n = this.memory.read_c_str(ptr)?.len();
335 this.write_scalar(Scalar::from_uint(u64::try_from(n).unwrap(), dest.layout.size), dest)?;
347 // FIXME: Using host floats.
348 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
349 let f = match link_name {
359 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
361 // underscore case for windows
366 // FIXME: Using host floats.
367 let f1 = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
368 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
369 let n = match link_name {
370 "_hypotf" | "hypotf" => f1.hypot(f2),
371 "atan2f" => f1.atan2(f2),
374 this.write_scalar(Scalar::from_u32(n.to_bits()), dest)?;
385 // FIXME: Using host floats.
386 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
387 let f = match link_name {
397 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
399 // underscore case for windows, here and below
400 // (see https://docs.microsoft.com/en-us/cpp/c-runtime-library/reference/floating-point-primitives?view=vs-2019)
405 // FIXME: Using host floats.
406 let f1 = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
407 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
408 let n = match link_name {
409 "_hypot" | "hypot" => f1.hypot(f2),
410 "atan2" => f1.atan2(f2),
413 this.write_scalar(Scalar::from_u64(n.to_bits()), dest)?;
415 // For radix-2 (binary) systems, `ldexp` and `scalbn` are the same.
420 let x = this.read_scalar(args[0])?.to_f64()?;
421 let exp = this.read_scalar(args[1])?.to_i32()?;
423 // Saturating cast to i16. Even those are outside the valid exponent range to
424 // `scalbn` below will do its over/underflow handling.
425 let exp = if exp > i32::from(i16::MAX) {
427 } else if exp < i32::from(i16::MIN) {
430 exp.try_into().unwrap()
433 let res = x.scalbn(exp);
434 this.write_scalar(Scalar::from_f64(res), dest)?;
437 _ => match this.tcx.sess.target.target.target_os.as_str() {
438 "linux" | "macos" => return posix::EvalContextExt::emulate_foreign_item_by_name(this, link_name, args, dest, ret),
439 "windows" => return windows::EvalContextExt::emulate_foreign_item_by_name(this, link_name, args, dest, ret),
440 target => throw_unsup_format!("the target `{}` is not supported", target),
447 /// Check some basic requirements for this allocation request:
448 /// non-zero size, power-of-two alignment.
449 fn check_alloc_request(size: u64, align: u64) -> InterpResult<'tcx> {
451 throw_ub_format!("creating allocation with size 0");
453 if !align.is_power_of_two() {
454 throw_ub_format!("creating allocation with non-power-of-two alignment {}", align);