1 use rustc::ty::{self, Ty};
2 use rustc::ty::layout::{self, Align, LayoutOf, Size};
3 use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX};
5 use rustc_data_structures::indexed_vec::Idx;
7 use syntax::codemap::Span;
15 use super::memory::MemoryKind;
17 fn write_discriminant_value<'a, 'mir, 'tcx: 'a + 'mir>(
18 ecx: &mut EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>>,
22 ) -> EvalResult<'tcx> {
23 let layout = ecx.layout_of(dest_ty)?;
25 match layout.variants {
26 layout::Variants::Single { index } => {
27 if index != variant_index {
28 // If the layout of an enum is `Single`, all
29 // other variants are necessarily uninhabited.
30 assert_eq!(layout.for_variant(&ecx, variant_index).abi,
31 layout::Abi::Uninhabited);
34 layout::Variants::Tagged { .. } => {
35 let discr_val = dest_ty.ty_adt_def().unwrap()
36 .discriminant_for_variant(*ecx.tcx, variant_index)
39 let (discr_dest, discr) = ecx.place_field(dest, mir::Field::new(0), layout)?;
40 ecx.write_scalar(discr_dest, Scalar::from_uint(discr_val, discr.size), discr.ty)?;
42 layout::Variants::NicheFilling {
48 if variant_index != dataful_variant {
49 let (niche_dest, niche) =
50 ecx.place_field(dest, mir::Field::new(0), layout)?;
51 let niche_value = ((variant_index - niche_variants.start()) as u128)
52 .wrapping_add(niche_start);
53 ecx.write_scalar(niche_dest, Scalar::from_uint(niche_value, niche.size), niche.ty)?;
61 pub trait EvalContextExt<'tcx> {
68 dest_block: mir::BasicBlock,
69 ) -> EvalResult<'tcx>;
71 fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>>;
75 instance: ty::Instance<'tcx>,
76 destination: Option<(Place, mir::BasicBlock)>,
80 ) -> EvalResult<'tcx>;
84 instance: ty::Instance<'tcx>,
85 destination: Option<(Place, mir::BasicBlock)>,
89 ) -> EvalResult<'tcx, bool>;
91 fn write_null(&mut self, dest: Place, dest_layout: TyLayout<'tcx>) -> EvalResult<'tcx>;
94 impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
97 instance: ty::Instance<'tcx>,
98 destination: Option<(Place, mir::BasicBlock)>,
101 sig: ty::FnSig<'tcx>,
102 ) -> EvalResult<'tcx, bool> {
103 trace!("eval_fn_call: {:#?}, {:#?}", instance, destination);
105 let def_id = instance.def_id();
106 let item_path = self.tcx.absolute_item_path_str(def_id);
108 "std::sys::unix::thread::guard::init" | "std::sys::unix::thread::guard::current" => {
109 // Return None, as it doesn't make sense to return Some, because miri detects stack overflow itself.
110 let ret_ty = sig.output();
112 ty::TyAdt(ref adt_def, _) => {
113 assert!(adt_def.is_enum(), "Unexpected return type for {}", item_path);
114 let none_variant_index = adt_def.variants.iter().position(|def| {
115 def.name.as_str() == "None"
116 }).expect("No None variant");
117 let (return_place, return_to_block) = destination.unwrap();
118 write_discriminant_value(self, ret_ty, return_place, none_variant_index)?;
119 self.goto_block(return_to_block);
122 _ => panic!("Unexpected return type for {}", item_path)
125 "std::sys::unix::fast_thread_local::register_dtor" => {
126 // TODO: register the dtor
127 let (_return_place, return_to_block) = destination.unwrap();
128 self.goto_block(return_to_block);
134 if self.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
135 // FIXME: return a real value in case the target allocation has an
136 // alignment bigger than the one requested
137 let n = u128::max_value();
138 let amt = 128 - self.memory.pointer_size().bytes() * 8;
139 let (dest, return_to_block) = destination.unwrap();
140 let ty = self.tcx.types.usize;
141 let ptr_size = self.memory.pointer_size();
142 self.write_scalar(dest, Scalar::from_uint((n << amt) >> amt, ptr_size), ty)?;
143 self.goto_block(return_to_block);
147 let mir = match self.load_mir(instance.def) {
149 Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => {
150 self.call_missing_fn(
159 Err(other) => return Err(other),
162 let (return_place, return_to_block) = match destination {
163 Some((place, block)) => (place, StackPopCleanup::Goto(block)),
164 None => (Place::undef(), StackPopCleanup::None),
167 self.push_stack_frame(
178 fn call_foreign_item(
181 args: &[ValTy<'tcx>],
184 dest_block: mir::BasicBlock,
185 ) -> EvalResult<'tcx> {
186 let attrs = self.tcx.get_attrs(def_id);
187 let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
188 Some(name) => name.as_str(),
189 None => self.tcx.item_name(def_id).as_str(),
191 let dest_layout = self.layout_of(dest_ty)?;
193 match &link_name[..] {
195 let size = self.value_to_scalar(args[0])?.to_usize(self)?;
197 self.write_null(dest, dest_layout)?;
199 let align = self.tcx.data_layout.pointer_align;
200 let ptr = self.memory.allocate(Size::from_bytes(size), align, MemoryKind::C.into())?;
201 self.write_scalar(dest, Scalar::Ptr(ptr), dest_ty)?;
206 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
208 self.memory.deallocate(
211 MemoryKind::C.into(),
217 let size = self.value_to_scalar(args[0])?.to_usize(self)?;
218 let align = self.value_to_scalar(args[1])?.to_usize(self)?;
220 return err!(HeapAllocZeroBytes);
222 if !align.is_power_of_two() {
223 return err!(HeapAllocNonPowerOfTwoAlignment(align));
225 let ptr = self.memory.allocate(Size::from_bytes(size),
226 Align::from_bytes(align, align).unwrap(),
227 MemoryKind::Rust.into())?;
228 self.write_scalar(dest, Scalar::Ptr(ptr), dest_ty)?;
230 "__rust_alloc_zeroed" => {
231 let size = self.value_to_scalar(args[0])?.to_usize(self)?;
232 let align = self.value_to_scalar(args[1])?.to_usize(self)?;
234 return err!(HeapAllocZeroBytes);
236 if !align.is_power_of_two() {
237 return err!(HeapAllocNonPowerOfTwoAlignment(align));
239 let ptr = self.memory.allocate(Size::from_bytes(size),
240 Align::from_bytes(align, align).unwrap(),
241 MemoryKind::Rust.into())?;
242 self.memory.write_repeat(ptr.into(), 0, Size::from_bytes(size))?;
243 self.write_scalar(dest, Scalar::Ptr(ptr), dest_ty)?;
245 "__rust_dealloc" => {
246 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
247 let old_size = self.value_to_scalar(args[1])?.to_usize(self)?;
248 let align = self.value_to_scalar(args[2])?.to_usize(self)?;
250 return err!(HeapAllocZeroBytes);
252 if !align.is_power_of_two() {
253 return err!(HeapAllocNonPowerOfTwoAlignment(align));
255 self.memory.deallocate(
257 Some((Size::from_bytes(old_size), Align::from_bytes(align, align).unwrap())),
258 MemoryKind::Rust.into(),
261 "__rust_realloc" => {
262 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
263 let old_size = self.value_to_scalar(args[1])?.to_usize(self)?;
264 let align = self.value_to_scalar(args[2])?.to_usize(self)?;
265 let new_size = self.value_to_scalar(args[3])?.to_usize(self)?;
266 if old_size == 0 || new_size == 0 {
267 return err!(HeapAllocZeroBytes);
269 if !align.is_power_of_two() {
270 return err!(HeapAllocNonPowerOfTwoAlignment(align));
272 let new_ptr = self.memory.reallocate(
274 Size::from_bytes(old_size),
275 Align::from_bytes(align, align).unwrap(),
276 Size::from_bytes(new_size),
277 Align::from_bytes(align, align).unwrap(),
278 MemoryKind::Rust.into(),
280 self.write_scalar(dest, Scalar::Ptr(new_ptr), dest_ty)?;
284 // TODO: read `syscall` ids like `sysconf` ids and
285 // figure out some way to actually process some of them
287 // libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)
288 // is called if a `HashMap` is created the regular way.
289 match self.value_to_scalar(args[0])?.to_usize(self)? {
291 return err!(Unimplemented(
292 "miri does not support random number generators".to_owned(),
296 return err!(Unimplemented(
297 format!("miri does not support syscall id {}", id),
304 let _handle = self.into_ptr(args[0].value)?;
305 let symbol = self.into_ptr(args[1].value)?.unwrap_or_err()?.to_ptr()?;
306 let symbol_name = self.memory.read_c_str(symbol)?;
307 let err = format!("bad c unicode symbol: {:?}", symbol_name);
308 let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
309 return err!(Unimplemented(format!(
310 "miri does not support dynamically loading libraries (requested symbol: {})",
315 "__rust_maybe_catch_panic" => {
316 // fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32
317 // We abort on panic, so not much is going on here, but we still have to call the closure
318 let u8_ptr_ty = self.tcx.mk_mut_ptr(self.tcx.types.u8);
319 let f = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
320 let data = self.into_ptr(args[1].value)?.unwrap_or_err()?;
321 let f_instance = self.memory.get_fn(f)?;
322 self.write_null(dest, dest_layout)?;
324 // Now we make a function call. TODO: Consider making this re-usable? EvalContext::step does sth. similar for the TLS dtors,
325 // and of course eval_main.
326 let mir = self.load_mir(f_instance.def)?;
327 self.push_stack_frame(
332 StackPopCleanup::Goto(dest_block),
334 let mut args = self.frame().mir.args_iter();
336 let arg_local = args.next().ok_or_else(||
337 EvalErrorKind::AbiViolation(
338 "Argument to __rust_maybe_catch_panic does not take enough arguments."
342 let arg_dest = self.eval_place(&mir::Place::Local(arg_local))?;
343 self.write_ptr(arg_dest, data, u8_ptr_ty)?;
345 assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
347 // We ourselves return 0
348 self.write_null(dest, dest_layout)?;
350 // Don't fall through
354 "__rust_start_panic" => {
359 let left = self.into_ptr(args[0].value)?.unwrap_or_err()?;
360 let right = self.into_ptr(args[1].value)?.unwrap_or_err()?;
361 let n = Size::from_bytes(self.value_to_scalar(args[2])?.to_usize(self)?);
364 let left_bytes = self.memory.read_bytes(left, n)?;
365 let right_bytes = self.memory.read_bytes(right, n)?;
367 use std::cmp::Ordering::*;
368 match left_bytes.cmp(right_bytes) {
377 Scalar::from_i32(result),
383 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
384 let val = self.value_to_scalar(args[1])?.to_bytes()? as u8;
385 let num = self.value_to_scalar(args[2])?.to_usize(self)?;
386 if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().rev().position(
390 let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), &self)?;
391 self.write_ptr(dest, new_ptr, dest_ty)?;
393 self.write_null(dest, dest_layout)?;
398 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
399 let val = self.value_to_scalar(args[1])?.to_bytes()? as u8;
400 let num = self.value_to_scalar(args[2])?.to_usize(self)?;
401 if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().position(
405 let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), &self)?;
406 self.write_ptr(dest, new_ptr, dest_ty)?;
408 self.write_null(dest, dest_layout)?;
414 let name_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
415 let name = self.memory.read_c_str(name_ptr)?;
416 match self.machine.env_vars.get(name) {
417 Some(&var) => Scalar::Ptr(var),
418 None => Scalar::null(self.memory.pointer_size()),
421 self.write_scalar(dest, result, dest_ty)?;
425 let mut success = None;
427 let name_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
428 if !name_ptr.is_null() {
429 let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
430 if !name.is_empty() && !name.contains(&b'=') {
431 success = Some(self.machine.env_vars.remove(name));
435 if let Some(old) = success {
436 if let Some(var) = old {
437 self.memory.deallocate(var, None, MemoryKind::Env.into())?;
439 self.write_null(dest, dest_layout)?;
441 self.write_scalar(dest, Scalar::from_int(-1, dest_layout.size), dest_ty)?;
448 let name_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
449 let value_ptr = self.into_ptr(args[1].value)?.unwrap_or_err()?.to_ptr()?;
450 let value = self.memory.read_c_str(value_ptr)?;
451 if !name_ptr.is_null() {
452 let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
453 if !name.is_empty() && !name.contains(&b'=') {
454 new = Some((name.to_owned(), value.to_owned()));
458 if let Some((name, value)) = new {
459 // +1 for the null terminator
460 let value_copy = self.memory.allocate(
461 Size::from_bytes((value.len() + 1) as u64),
462 Align::from_bytes(1, 1).unwrap(),
463 MemoryKind::Env.into(),
465 self.memory.write_bytes(value_copy.into(), &value)?;
466 let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), &self)?.into();
467 self.memory.write_bytes(trailing_zero_ptr, &[0])?;
468 if let Some(var) = self.machine.env_vars.insert(
473 self.memory.deallocate(var, None, MemoryKind::Env.into())?;
475 self.write_null(dest, dest_layout)?;
477 self.write_scalar(dest, Scalar::from_int(-1, dest_layout.size), dest_ty)?;
482 let fd = self.value_to_scalar(args[0])?.to_bytes()?;
483 let buf = self.into_ptr(args[1].value)?.unwrap_or_err()?;
484 let n = self.value_to_scalar(args[2])?.to_bytes()? as u64;
485 trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
486 let result = if fd == 1 || fd == 2 {
488 use std::io::{self, Write};
490 let buf_cont = self.memory.read_bytes(buf, Size::from_bytes(n))?;
491 let res = if fd == 1 {
492 io::stdout().write(buf_cont)
494 io::stderr().write(buf_cont)
501 warn!("Ignored output to FD {}", fd);
502 n as i64 // pretend it all went well
503 }; // now result is the value we return back to the program
504 let ptr_size = self.memory.pointer_size();
507 Scalar::from_int(result, ptr_size),
513 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?.to_ptr()?;
514 let n = self.memory.read_c_str(ptr)?.len();
515 let ptr_size = self.memory.pointer_size();
516 self.write_scalar(dest, Scalar::from_uint(n as u64, ptr_size), dest_ty)?;
519 // Some things needed for sys::thread initialization to go through
520 "signal" | "sigaction" | "sigaltstack" => {
521 let ptr_size = self.memory.pointer_size();
522 self.write_scalar(dest, Scalar::null(ptr_size), dest_ty)?;
526 let name = self.value_to_scalar(args[0])?.to_usize(self)?;
527 let ptr_size = self.memory.pointer_size();
529 trace!("sysconf() called with name {}", name);
530 // cache the sysconf integers via miri's global cache
532 (&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, ptr_size)),
533 (&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, ptr_size)),
535 let mut result = None;
536 for &(path, path_value) in paths {
537 if let Ok(instance) = self.resolve_path(path) {
542 let const_val = self.const_eval(cid)?;
543 let value = const_val.unwrap_usize(self.tcx.tcx);
545 result = Some(path_value);
550 if let Some(result) = result {
551 self.write_scalar(dest, result, dest_ty)?;
553 return err!(Unimplemented(
554 format!("Unimplemented sysconf name: {}", name),
559 // Hook pthread calls that go to the thread-local storage memory subsystem
560 "pthread_key_create" => {
561 let key_ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
563 // Extract the function type out of the signature (that seems easier than constructing it ourselves...)
564 let dtor = match self.into_ptr(args[1].value)?.unwrap_or_err()? {
565 Scalar::Ptr(dtor_ptr) => Some(self.memory.get_fn(dtor_ptr)?),
566 Scalar::Bits { bits: 0, size } => {
567 assert_eq!(size as u64, self.memory.pointer_size().bytes());
570 Scalar::Bits { .. } => return err!(ReadBytesAsPointer),
573 // Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t.
574 let key_type = args[0].ty.builtin_deref(true)
575 .ok_or_else(|| EvalErrorKind::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty;
576 let key_layout = self.layout_of(key_type)?;
578 // Create key and write it into the memory where key_ptr wants it
579 let key = self.memory.create_tls_key(dtor) as u128;
580 if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
581 return err!(OutOfTls);
583 self.memory.write_scalar(
586 Scalar::from_uint(key, key_layout.size).into(),
592 // Return success (0)
593 self.write_null(dest, dest_layout)?;
595 "pthread_key_delete" => {
596 let key = self.value_to_scalar(args[0])?.to_bytes()?;
597 self.memory.delete_tls_key(key)?;
598 // Return success (0)
599 self.write_null(dest, dest_layout)?;
601 "pthread_getspecific" => {
602 let key = self.value_to_scalar(args[0])?.to_bytes()?;
603 let ptr = self.memory.load_tls(key)?;
604 self.write_ptr(dest, ptr, dest_ty)?;
606 "pthread_setspecific" => {
607 let key = self.value_to_scalar(args[0])?.to_bytes()?;
608 let new_ptr = self.into_ptr(args[1].value)?.unwrap_or_err()?;
609 self.memory.store_tls(key, new_ptr)?;
611 // Return success (0)
612 self.write_null(dest, dest_layout)?;
616 return err!(Unimplemented("Thread-local store is not fully supported on macOS".to_owned()));
619 // Stub out all the other pthread calls to just return 0
620 link_name if link_name.starts_with("pthread_") => {
621 debug!("ignoring C ABI call: {}", link_name);
622 self.write_null(dest, dest_layout)?;
626 // This is a horrible hack, but well... the guard page mechanism calls mmap and expects a particular return value, so we give it that value
627 let addr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
628 self.write_ptr(dest, addr, dest_ty)?;
632 "AddVectoredExceptionHandler" => {
633 // any non zero value works for the stdlib. This is just used for stackoverflows anyway
634 let ptr_size = self.memory.pointer_size();
635 self.write_scalar(dest, Scalar::from_int(1, ptr_size), dest_ty)?;
637 "InitializeCriticalSection" |
638 "EnterCriticalSection" |
639 "LeaveCriticalSection" |
640 "DeleteCriticalSection" |
642 // Function does not return anything, nothing to do
646 "TryEnterCriticalSection" => {
647 // pretend these do not exist/nothing happened, by returning zero
648 self.write_scalar(dest, Scalar::from_int(0, dest_layout.size), dest_ty)?;
651 // this is c::ERROR_CALL_NOT_IMPLEMENTED
652 self.write_scalar(dest, Scalar::from_int(120, dest_layout.size), dest_ty)?;
657 // This just creates a key; Windows does not natively support TLS dtors.
659 // Create key and return it
660 let key = self.memory.create_tls_key(None) as u128;
662 // Figure out how large a TLS key actually is. This is c::DWORD.
663 if dest_layout.size.bits() < 128 && key >= (1u128 << dest_layout.size.bits() as u128) {
664 return err!(OutOfTls);
666 self.write_scalar(dest, Scalar::from_uint(key, dest_layout.size), dest_layout.ty)?;
669 let key = self.value_to_scalar(args[0])?.to_bytes()?;
670 let ptr = self.memory.load_tls(key)?;
671 self.write_ptr(dest, ptr, dest_ty)?;
674 let key = self.value_to_scalar(args[0])?.to_bytes()?;
675 let new_ptr = self.into_ptr(args[1].value)?.unwrap_or_err()?;
676 self.memory.store_tls(key, new_ptr)?;
678 // Return success (1)
679 self.write_scalar(dest, Scalar::from_int(1, dest_layout.size), dest_ty)?;
682 // We can't execute anything else
684 return err!(Unimplemented(
685 format!("can't call foreign function: {}", link_name),
690 // Since we pushed no stack frame, the main loop will act
691 // as if the call just completed and it's returning to the
693 self.dump_local(dest);
694 self.goto_block(dest_block);
698 /// Get an instance for a path.
699 fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> {
703 .find(|&&krate| self.tcx.original_crate_name(krate) == path[0])
707 index: CRATE_DEF_INDEX,
709 let mut items = self.tcx.item_children(krate);
710 let mut path_it = path.iter().skip(1).peekable();
712 while let Some(segment) = path_it.next() {
713 for item in mem::replace(&mut items, Default::default()).iter() {
714 if item.ident.name == *segment {
715 if path_it.peek().is_none() {
716 return Some(ty::Instance::mono(self.tcx.tcx, item.def.def_id()));
719 items = self.tcx.item_children(item.def.def_id());
727 let path = path.iter().map(|&s| s.to_owned()).collect();
728 EvalErrorKind::PathNotFound(path).into()
734 instance: ty::Instance<'tcx>,
735 destination: Option<(Place, mir::BasicBlock)>,
736 args: &[ValTy<'tcx>],
737 sig: ty::FnSig<'tcx>,
739 ) -> EvalResult<'tcx> {
740 // In some cases in non-MIR libstd-mode, not having a destination is legit. Handle these early.
742 "std::panicking::rust_panic_with_hook" |
743 "core::panicking::panic_fmt::::panic_impl" |
744 "std::rt::begin_panic_fmt" => return err!(Panic),
748 let dest_ty = sig.output();
749 let (dest, dest_block) = destination.ok_or_else(
750 || EvalErrorKind::NoMirFor(path.clone()),
753 if self.tcx.is_foreign_item(instance.def_id()) {
754 // An external function
755 // TODO: That functions actually has a similar preamble to what follows here. May make sense to
756 // unify these two mechanisms for "hooking into missing functions".
757 self.call_foreign_item(
768 // A Rust function is missing, which means we are running with MIR missing for libstd (or other dependencies).
769 // Still, we can make many things mostly work by "emulating" or ignoring some functions.
771 "std::io::_eprint" => {
773 "Ignoring output. To run programs that print, make sure you have a libstd with full MIR."
776 "std::thread::Builder::new" => {
777 return err!(Unimplemented("miri does not support threading".to_owned()))
779 "std::env::args" => {
780 return err!(Unimplemented(
781 "miri does not support program arguments".to_owned(),
784 "std::panicking::panicking" |
785 "std::rt::panicking" => {
786 // we abort on panic -> `std::rt::panicking` always returns false
787 let bool = self.tcx.types.bool;
788 self.write_scalar(dest, Scalar::from_bool(false), bool)?;
791 _ => return err!(NoMirFor(path)),
794 // Since we pushed no stack frame, the main loop will act
795 // as if the call just completed and it's returning to the
797 self.dump_local(dest);
798 self.goto_block(dest_block);
802 fn write_null(&mut self, dest: Place, dest_layout: TyLayout<'tcx>) -> EvalResult<'tcx> {
803 self.write_scalar(dest, Scalar::null(dest_layout.size), dest_layout.ty)