]> git.lizzy.rs Git - rust.git/blob - src/fn_call.rs
stacked borrows: track refs and derefs
[rust.git] / src / fn_call.rs
1 use rustc::ty;
2 use rustc::ty::layout::{Align, LayoutOf, Size};
3 use rustc::hir::def_id::{DefId, CRATE_DEF_INDEX};
4 use rustc::mir;
5 use syntax::attr;
6
7 use std::mem;
8
9 use super::*;
10
11 pub trait EvalContextExt<'tcx, 'mir> {
12     /// Emulate calling a foreign item, fail if the item is not supported.
13     /// This function will handle `goto_block` if needed.
14     fn emulate_foreign_item(
15         &mut self,
16         def_id: DefId,
17         args: &[OpTy<'tcx, Borrow>],
18         dest: PlaceTy<'tcx, Borrow>,
19         ret: mir::BasicBlock,
20     ) -> EvalResult<'tcx>;
21
22     fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>>;
23
24     /// Emulate a function that should have MIR but does not.
25     /// This is solely to support execution without full MIR.
26     /// Fail if emulating this function is not supported.
27     /// This function will handle `goto_block` if needed.
28     fn emulate_missing_fn(
29         &mut self,
30         path: String,
31         args: &[OpTy<'tcx, Borrow>],
32         dest: Option<PlaceTy<'tcx, Borrow>>,
33         ret: Option<mir::BasicBlock>,
34     ) -> EvalResult<'tcx>;
35
36     fn find_fn(
37         &mut self,
38         instance: ty::Instance<'tcx>,
39         args: &[OpTy<'tcx, Borrow>],
40         dest: Option<PlaceTy<'tcx, Borrow>>,
41         ret: Option<mir::BasicBlock>,
42     ) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>>;
43
44     fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx>;
45 }
46
47 impl<'a, 'mir, 'tcx: 'mir + 'a> EvalContextExt<'tcx, 'mir> for super::MiriEvalContext<'a, 'mir, 'tcx> {
48     fn find_fn(
49         &mut self,
50         instance: ty::Instance<'tcx>,
51         args: &[OpTy<'tcx, Borrow>],
52         dest: Option<PlaceTy<'tcx, Borrow>>,
53         ret: Option<mir::BasicBlock>,
54     ) -> EvalResult<'tcx, Option<&'mir mir::Mir<'tcx>>> {
55         trace!("eval_fn_call: {:#?}, {:?}", instance, dest.map(|place| *place));
56
57         // first run the common hooks also supported by CTFE
58         if self.hook_fn(instance, args, dest)? {
59             self.goto_block(ret)?;
60             return Ok(None);
61         }
62         // there are some more lang items we want to hook that CTFE does not hook (yet)
63         if self.tcx.lang_items().align_offset_fn() == Some(instance.def.def_id()) {
64             // FIXME: return a real value in case the target allocation has an
65             // alignment bigger than the one requested
66             let n = u128::max_value();
67             let dest = dest.unwrap();
68             let n = self.truncate(n, dest.layout);
69             self.write_scalar(Scalar::from_uint(n, dest.layout.size), dest)?;
70             self.goto_block(ret)?;
71             return Ok(None);
72         }
73
74         // Try to see if we can do something about foreign items
75         if self.tcx.is_foreign_item(instance.def_id()) {
76             // An external function that we cannot find MIR for, but we can still run enough
77             // of them to make miri viable.
78             self.emulate_foreign_item(
79                 instance.def_id(),
80                 args,
81                 dest.unwrap(),
82                 ret.unwrap(),
83             )?;
84             // `goto_block` already handled
85             return Ok(None);
86         }
87
88         // Otherwise we really want to see the MIR -- but if we do not have it, maybe we can
89         // emulate something. This is a HACK to support running without a full-MIR libstd.
90         let mir = match self.load_mir(instance.def) {
91             Ok(mir) => mir,
92             Err(EvalError { kind: EvalErrorKind::NoMirFor(path), .. }) => {
93                 self.emulate_missing_fn(
94                     path,
95                     args,
96                     dest,
97                     ret,
98                 )?;
99                 // `goto_block` already handled
100                 return Ok(None);
101             }
102             Err(other) => return Err(other),
103         };
104
105         Ok(Some(mir))
106     }
107
108     fn emulate_foreign_item(
109         &mut self,
110         def_id: DefId,
111         args: &[OpTy<'tcx, Borrow>],
112         dest: PlaceTy<'tcx, Borrow>,
113         ret: mir::BasicBlock,
114     ) -> EvalResult<'tcx> {
115         let attrs = self.tcx.get_attrs(def_id);
116         let link_name = match attr::first_attr_value_str_by_name(&attrs, "link_name") {
117             Some(name) => name.as_str(),
118             None => self.tcx.item_name(def_id).as_str(),
119         };
120
121         match &link_name[..] {
122             "malloc" => {
123                 let size = self.read_scalar(args[0])?.to_usize(&self)?;
124                 if size == 0 {
125                     self.write_null(dest)?;
126                 } else {
127                     let align = self.tcx.data_layout.pointer_align;
128                     let ptr = self.memory.allocate(Size::from_bytes(size), align, MiriMemoryKind::C.into())?;
129                     self.write_scalar(Scalar::Ptr(ptr), dest)?;
130                 }
131             }
132
133             "free" => {
134                 let ptr = self.read_scalar(args[0])?.not_undef()?;
135                 if !ptr.is_null() {
136                     self.memory.deallocate(
137                         ptr.to_ptr()?,
138                         None,
139                         MiriMemoryKind::C.into(),
140                     )?;
141                 }
142             }
143
144             "__rust_alloc" => {
145                 let size = self.read_scalar(args[0])?.to_usize(&self)?;
146                 let align = self.read_scalar(args[1])?.to_usize(&self)?;
147                 if size == 0 {
148                     return err!(HeapAllocZeroBytes);
149                 }
150                 if !align.is_power_of_two() {
151                     return err!(HeapAllocNonPowerOfTwoAlignment(align));
152                 }
153                 let ptr = self.memory.allocate(Size::from_bytes(size),
154                                                Align::from_bytes(align, align).unwrap(),
155                                                MiriMemoryKind::Rust.into())?;
156                 self.write_scalar(Scalar::Ptr(ptr), dest)?;
157             }
158             "__rust_alloc_zeroed" => {
159                 let size = self.read_scalar(args[0])?.to_usize(&self)?;
160                 let align = self.read_scalar(args[1])?.to_usize(&self)?;
161                 if size == 0 {
162                     return err!(HeapAllocZeroBytes);
163                 }
164                 if !align.is_power_of_two() {
165                     return err!(HeapAllocNonPowerOfTwoAlignment(align));
166                 }
167                 let ptr = self.memory.allocate(Size::from_bytes(size),
168                                                Align::from_bytes(align, align).unwrap(),
169                                                MiriMemoryKind::Rust.into())?;
170                 self.memory.write_repeat(ptr.into(), 0, Size::from_bytes(size))?;
171                 self.write_scalar(Scalar::Ptr(ptr), dest)?;
172             }
173             "__rust_dealloc" => {
174                 let ptr = self.read_scalar(args[0])?.to_ptr()?;
175                 let old_size = self.read_scalar(args[1])?.to_usize(&self)?;
176                 let align = self.read_scalar(args[2])?.to_usize(&self)?;
177                 if old_size == 0 {
178                     return err!(HeapAllocZeroBytes);
179                 }
180                 if !align.is_power_of_two() {
181                     return err!(HeapAllocNonPowerOfTwoAlignment(align));
182                 }
183                 self.memory.deallocate(
184                     ptr,
185                     Some((Size::from_bytes(old_size), Align::from_bytes(align, align).unwrap())),
186                     MiriMemoryKind::Rust.into(),
187                 )?;
188             }
189             "__rust_realloc" => {
190                 let ptr = self.read_scalar(args[0])?.to_ptr()?;
191                 let old_size = self.read_scalar(args[1])?.to_usize(&self)?;
192                 let align = self.read_scalar(args[2])?.to_usize(&self)?;
193                 let new_size = self.read_scalar(args[3])?.to_usize(&self)?;
194                 if old_size == 0 || new_size == 0 {
195                     return err!(HeapAllocZeroBytes);
196                 }
197                 if !align.is_power_of_two() {
198                     return err!(HeapAllocNonPowerOfTwoAlignment(align));
199                 }
200                 let new_ptr = self.memory.reallocate(
201                     ptr,
202                     Size::from_bytes(old_size),
203                     Align::from_bytes(align, align).unwrap(),
204                     Size::from_bytes(new_size),
205                     Align::from_bytes(align, align).unwrap(),
206                     MiriMemoryKind::Rust.into(),
207                 )?;
208                 self.write_scalar(Scalar::Ptr(new_ptr), dest)?;
209             }
210
211             "syscall" => {
212                 // TODO: read `syscall` ids like `sysconf` ids and
213                 // figure out some way to actually process some of them
214                 //
215                 // libc::syscall(NR_GETRANDOM, buf.as_mut_ptr(), buf.len(), GRND_NONBLOCK)
216                 // is called if a `HashMap` is created the regular way.
217                 match self.read_scalar(args[0])?.to_usize(&self)? {
218                     318 | 511 => {
219                         return err!(Unimplemented(
220                             "miri does not support random number generators".to_owned(),
221                         ))
222                     }
223                     id => {
224                         return err!(Unimplemented(
225                             format!("miri does not support syscall id {}", id),
226                         ))
227                     }
228                 }
229             }
230
231             "dlsym" => {
232                 let _handle = self.read_scalar(args[0])?;
233                 let symbol = self.read_scalar(args[1])?.to_ptr()?;
234                 let symbol_name = self.memory.read_c_str(symbol)?;
235                 let err = format!("bad c unicode symbol: {:?}", symbol_name);
236                 let symbol_name = ::std::str::from_utf8(symbol_name).unwrap_or(&err);
237                 return err!(Unimplemented(format!(
238                     "miri does not support dynamically loading libraries (requested symbol: {})",
239                     symbol_name
240                 )));
241             }
242
243             "__rust_maybe_catch_panic" => {
244                 // fn __rust_maybe_catch_panic(f: fn(*mut u8), data: *mut u8, data_ptr: *mut usize, vtable_ptr: *mut usize) -> u32
245                 // We abort on panic, so not much is going on here, but we still have to call the closure
246                 let f = self.read_scalar(args[0])?.to_ptr()?;
247                 let data = self.read_scalar(args[1])?.not_undef()?;
248                 let f_instance = self.memory.get_fn(f)?;
249                 self.write_null(dest)?;
250                 trace!("__rust_maybe_catch_panic: {:?}", f_instance);
251
252                 // Now we make a function call.  TODO: Consider making this re-usable?  EvalContext::step does sth. similar for the TLS dtors,
253                 // and of course eval_main.
254                 let mir = self.load_mir(f_instance.def)?;
255                 let ret_place = MPlaceTy::dangling(self.layout_of(self.tcx.mk_unit())?, &self).into();
256                 self.push_stack_frame(
257                     f_instance,
258                     mir.span,
259                     mir,
260                     Some(ret_place),
261                     StackPopCleanup::Goto(Some(ret)), // directly return to caller
262                 )?;
263                 let mut args = self.frame().mir.args_iter();
264
265                 let arg_local = args.next().ok_or_else(||
266                     EvalErrorKind::AbiViolation(
267                         "Argument to __rust_maybe_catch_panic does not take enough arguments."
268                             .to_owned(),
269                     ),
270                 )?;
271                 let arg_dest = self.eval_place(&mir::Place::Local(arg_local))?;
272                 self.write_scalar(data, arg_dest)?;
273
274                 assert!(args.next().is_none(), "__rust_maybe_catch_panic argument has more arguments than expected");
275
276                 // We ourselves will return 0, eventually (because we will not return if we paniced)
277                 self.write_null(dest)?;
278
279                 // Don't fall through, we do NOT want to `goto_block`!
280                 return Ok(());
281             }
282
283             "__rust_start_panic" =>
284                 return err!(MachineError("the evaluated program panicked".to_string())),
285
286             "memcmp" => {
287                 let left = self.read_scalar(args[0])?.not_undef()?;
288                 let right = self.read_scalar(args[1])?.not_undef()?;
289                 let n = Size::from_bytes(self.read_scalar(args[2])?.to_usize(&self)?);
290
291                 let result = {
292                     let left_bytes = self.memory.read_bytes(left, n)?;
293                     let right_bytes = self.memory.read_bytes(right, n)?;
294
295                     use std::cmp::Ordering::*;
296                     match left_bytes.cmp(right_bytes) {
297                         Less => -1i32,
298                         Equal => 0,
299                         Greater => 1,
300                     }
301                 };
302
303                 self.write_scalar(
304                     Scalar::from_int(result, Size::from_bits(32)),
305                     dest,
306                 )?;
307             }
308
309             "memrchr" => {
310                 let ptr = self.read_scalar(args[0])?.not_undef()?;
311                 let val = self.read_scalar(args[1])?.to_bytes()? as u8;
312                 let num = self.read_scalar(args[2])?.to_usize(&self)?;
313                 if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().rev().position(
314                     |&c| c == val,
315                 )
316                 {
317                     let new_ptr = ptr.ptr_offset(Size::from_bytes(num - idx as u64 - 1), &self)?;
318                     self.write_scalar(new_ptr, dest)?;
319                 } else {
320                     self.write_null(dest)?;
321                 }
322             }
323
324             "memchr" => {
325                 let ptr = self.read_scalar(args[0])?.not_undef()?;
326                 let val = self.read_scalar(args[1])?.to_bytes()? as u8;
327                 let num = self.read_scalar(args[2])?.to_usize(&self)?;
328                 if let Some(idx) = self.memory.read_bytes(ptr, Size::from_bytes(num))?.iter().position(
329                     |&c| c == val,
330                 )
331                 {
332                     let new_ptr = ptr.ptr_offset(Size::from_bytes(idx as u64), &self)?;
333                     self.write_scalar(new_ptr, dest)?;
334                 } else {
335                     self.write_null(dest)?;
336                 }
337             }
338
339             "getenv" => {
340                 let result = {
341                     let name_ptr = self.read_scalar(args[0])?.to_ptr()?;
342                     let name = self.memory.read_c_str(name_ptr)?;
343                     match self.machine.env_vars.get(name) {
344                         Some(&var) => Scalar::Ptr(var),
345                         None => Scalar::ptr_null(*self.tcx),
346                     }
347                 };
348                 self.write_scalar(result, dest)?;
349             }
350
351             "unsetenv" => {
352                 let mut success = None;
353                 {
354                     let name_ptr = self.read_scalar(args[0])?.not_undef()?;
355                     if !name_ptr.is_null() {
356                         let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
357                         if !name.is_empty() && !name.contains(&b'=') {
358                             success = Some(self.machine.env_vars.remove(name));
359                         }
360                     }
361                 }
362                 if let Some(old) = success {
363                     if let Some(var) = old {
364                         self.memory.deallocate(var, None, MiriMemoryKind::Env.into())?;
365                     }
366                     self.write_null(dest)?;
367                 } else {
368                     self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
369                 }
370             }
371
372             "setenv" => {
373                 let mut new = None;
374                 {
375                     let name_ptr = self.read_scalar(args[0])?.not_undef()?;
376                     let value_ptr = self.read_scalar(args[1])?.to_ptr()?;
377                     let value = self.memory.read_c_str(value_ptr)?;
378                     if !name_ptr.is_null() {
379                         let name = self.memory.read_c_str(name_ptr.to_ptr()?)?;
380                         if !name.is_empty() && !name.contains(&b'=') {
381                             new = Some((name.to_owned(), value.to_owned()));
382                         }
383                     }
384                 }
385                 if let Some((name, value)) = new {
386                     // +1 for the null terminator
387                     let value_copy = self.memory.allocate(
388                         Size::from_bytes((value.len() + 1) as u64),
389                         Align::from_bytes(1, 1).unwrap(),
390                         MiriMemoryKind::Env.into(),
391                     )?;
392                     self.memory.write_bytes(value_copy.into(), &value)?;
393                     let trailing_zero_ptr = value_copy.offset(Size::from_bytes(value.len() as u64), &self)?.into();
394                     self.memory.write_bytes(trailing_zero_ptr, &[0])?;
395                     if let Some(var) = self.machine.env_vars.insert(
396                         name.to_owned(),
397                         value_copy,
398                     )
399                     {
400                         self.memory.deallocate(var, None, MiriMemoryKind::Env.into())?;
401                     }
402                     self.write_null(dest)?;
403                 } else {
404                     self.write_scalar(Scalar::from_int(-1, dest.layout.size), dest)?;
405                 }
406             }
407
408             "write" => {
409                 let fd = self.read_scalar(args[0])?.to_bytes()?;
410                 let buf = self.read_scalar(args[1])?.not_undef()?;
411                 let n = self.read_scalar(args[2])?.to_bytes()? as u64;
412                 trace!("Called write({:?}, {:?}, {:?})", fd, buf, n);
413                 let result = if fd == 1 || fd == 2 {
414                     // stdout/stderr
415                     use std::io::{self, Write};
416
417                     let buf_cont = self.memory.read_bytes(buf, Size::from_bytes(n))?;
418                     let res = if fd == 1 {
419                         io::stdout().write(buf_cont)
420                     } else {
421                         io::stderr().write(buf_cont)
422                     };
423                     match res {
424                         Ok(n) => n as i64,
425                         Err(_) => -1,
426                     }
427                 } else {
428                     warn!("Ignored output to FD {}", fd);
429                     n as i64 // pretend it all went well
430                 }; // now result is the value we return back to the program
431                 self.write_scalar(
432                     Scalar::from_int(result, dest.layout.size),
433                     dest,
434                 )?;
435             }
436
437             "strlen" => {
438                 let ptr = self.read_scalar(args[0])?.to_ptr()?;
439                 let n = self.memory.read_c_str(ptr)?.len();
440                 self.write_scalar(Scalar::from_uint(n as u64, dest.layout.size), dest)?;
441             }
442
443             // Some things needed for sys::thread initialization to go through
444             "signal" | "sigaction" | "sigaltstack" => {
445                 self.write_scalar(Scalar::from_int(0, dest.layout.size), dest)?;
446             }
447
448             "sysconf" => {
449                 let name = self.read_scalar(args[0])?.to_i32()?;
450
451                 trace!("sysconf() called with name {}", name);
452                 // cache the sysconf integers via miri's global cache
453                 let paths = &[
454                     (&["libc", "_SC_PAGESIZE"], Scalar::from_int(4096, dest.layout.size)),
455                     (&["libc", "_SC_GETPW_R_SIZE_MAX"], Scalar::from_int(-1, dest.layout.size)),
456                 ];
457                 let mut result = None;
458                 for &(path, path_value) in paths {
459                     if let Ok(instance) = self.resolve_path(path) {
460                         let cid = GlobalId {
461                             instance,
462                             promoted: None,
463                         };
464                         let const_val = self.const_eval(cid)?;
465                         let value = const_val.unwrap_bits(
466                             self.tcx.tcx,
467                             ty::ParamEnv::empty().and(self.tcx.types.i32)) as i32;
468                         if value == name {
469                             result = Some(path_value);
470                             break;
471                         }
472                     }
473                 }
474                 if let Some(result) = result {
475                     self.write_scalar(result, dest)?;
476                 } else {
477                     return err!(Unimplemented(
478                         format!("Unimplemented sysconf name: {}", name),
479                     ));
480                 }
481             }
482
483             // Hook pthread calls that go to the thread-local storage memory subsystem
484             "pthread_key_create" => {
485                 let key_ptr = self.read_scalar(args[0])?.to_ptr()?;
486
487                 // Extract the function type out of the signature (that seems easier than constructing it ourselves...)
488                 let dtor = match self.read_scalar(args[1])?.not_undef()? {
489                     Scalar::Ptr(dtor_ptr) => Some(self.memory.get_fn(dtor_ptr)?),
490                     Scalar::Bits { bits: 0, size } => {
491                         assert_eq!(size as u64, self.memory.pointer_size().bytes());
492                         None
493                     },
494                     Scalar::Bits { .. } => return err!(ReadBytesAsPointer),
495                 };
496
497                 // Figure out how large a pthread TLS key actually is. This is libc::pthread_key_t.
498                 let key_type = args[0].layout.ty.builtin_deref(true)
499                                    .ok_or_else(|| EvalErrorKind::AbiViolation("Wrong signature used for pthread_key_create: First argument must be a raw pointer.".to_owned()))?.ty;
500                 let key_layout = self.layout_of(key_type)?;
501
502                 // Create key and write it into the memory where key_ptr wants it
503                 let key = self.machine.tls.create_tls_key(dtor, *self.tcx) as u128;
504                 if key_layout.size.bits() < 128 && key >= (1u128 << key_layout.size.bits() as u128) {
505                     return err!(OutOfTls);
506                 }
507                 self.memory.write_scalar(
508                     key_ptr,
509                     key_layout.align,
510                     Scalar::from_uint(key, key_layout.size).into(),
511                     key_layout.size,
512                 )?;
513
514                 // Return success (0)
515                 self.write_null(dest)?;
516             }
517             "pthread_key_delete" => {
518                 let key = self.read_scalar(args[0])?.to_bytes()?;
519                 self.machine.tls.delete_tls_key(key)?;
520                 // Return success (0)
521                 self.write_null(dest)?;
522             }
523             "pthread_getspecific" => {
524                 let key = self.read_scalar(args[0])?.to_bytes()?;
525                 let ptr = self.machine.tls.load_tls(key)?;
526                 self.write_scalar(ptr, dest)?;
527             }
528             "pthread_setspecific" => {
529                 let key = self.read_scalar(args[0])?.to_bytes()?;
530                 let new_ptr = self.read_scalar(args[1])?.not_undef()?;
531                 self.machine.tls.store_tls(key, new_ptr)?;
532
533                 // Return success (0)
534                 self.write_null(dest)?;
535             }
536
537             "_tlv_atexit" => {
538                 // FIXME: Register the dtor
539             },
540
541             // Determining stack base address
542             "pthread_attr_init" | "pthread_attr_destroy" | "pthread_attr_get_np" |
543             "pthread_getattr_np" | "pthread_self" | "pthread_get_stacksize_np" => {
544                 self.write_null(dest)?;
545             }
546             "pthread_attr_getstack" => {
547                 // second argument is where we are supposed to write the stack size
548                 let ptr = self.ref_to_mplace(self.read_value(args[1])?)?;
549                 let stackaddr = Scalar::from_int(0x80000, args[1].layout.size); // just any address
550                 self.write_scalar(stackaddr, ptr.into())?;
551                 // return 0
552                 self.write_null(dest)?;
553             }
554             "pthread_get_stackaddr_np" => {
555                 let stackaddr = Scalar::from_int(0x80000, dest.layout.size); // just any address
556                 self.write_scalar(stackaddr, dest)?;
557             }
558
559             // Stub out calls for condvar, mutex and rwlock to just return 0
560             "pthread_mutexattr_init" | "pthread_mutexattr_settype" | "pthread_mutex_init" |
561             "pthread_mutexattr_destroy" | "pthread_mutex_lock" | "pthread_mutex_unlock" |
562             "pthread_mutex_destroy" | "pthread_rwlock_rdlock" | "pthread_rwlock_unlock" |
563             "pthread_rwlock_wrlock" | "pthread_rwlock_destroy" | "pthread_condattr_init" |
564             "pthread_condattr_setclock" | "pthread_cond_init" | "pthread_condattr_destroy" |
565             "pthread_cond_destroy" => {
566                 self.write_null(dest)?;
567             }
568
569             "mmap" => {
570                 // This is a horrible hack, but well... the guard page mechanism calls mmap and expects a particular return value, so we give it that value
571                 let addr = self.read_scalar(args[0])?.not_undef()?;
572                 self.write_scalar(addr, dest)?;
573             }
574             "mprotect" => {
575                 self.write_null(dest)?;
576             }
577
578             // Windows API subs
579             "AddVectoredExceptionHandler" => {
580                 // any non zero value works for the stdlib. This is just used for stackoverflows anyway
581                 self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
582             },
583             "InitializeCriticalSection" |
584             "EnterCriticalSection" |
585             "LeaveCriticalSection" |
586             "DeleteCriticalSection" |
587             "SetLastError" => {
588                 // Function does not return anything, nothing to do
589             },
590             "GetModuleHandleW" |
591             "GetProcAddress" |
592             "TryEnterCriticalSection" => {
593                 // pretend these do not exist/nothing happened, by returning zero
594                 self.write_null(dest)?;
595             },
596             "GetLastError" => {
597                 // this is c::ERROR_CALL_NOT_IMPLEMENTED
598                 self.write_scalar(Scalar::from_int(120, dest.layout.size), dest)?;
599             },
600
601             // Windows TLS
602             "TlsAlloc" => {
603                 // This just creates a key; Windows does not natively support TLS dtors.
604
605                 // Create key and return it
606                 let key = self.machine.tls.create_tls_key(None, *self.tcx) as u128;
607
608                 // Figure out how large a TLS key actually is. This is c::DWORD.
609                 if dest.layout.size.bits() < 128 && key >= (1u128 << dest.layout.size.bits() as u128) {
610                     return err!(OutOfTls);
611                 }
612                 self.write_scalar(Scalar::from_uint(key, dest.layout.size), dest)?;
613             }
614             "TlsGetValue" => {
615                 let key = self.read_scalar(args[0])?.to_bytes()?;
616                 let ptr = self.machine.tls.load_tls(key)?;
617                 self.write_scalar(ptr, dest)?;
618             }
619             "TlsSetValue" => {
620                 let key = self.read_scalar(args[0])?.to_bytes()?;
621                 let new_ptr = self.read_scalar(args[1])?.not_undef()?;
622                 self.machine.tls.store_tls(key, new_ptr)?;
623
624                 // Return success (1)
625                 self.write_scalar(Scalar::from_int(1, dest.layout.size), dest)?;
626             }
627
628             // We can't execute anything else
629             _ => {
630                 return err!(Unimplemented(
631                     format!("can't call foreign function: {}", link_name),
632                 ));
633             }
634         }
635
636         self.goto_block(Some(ret))?;
637         self.dump_place(*dest);
638         Ok(())
639     }
640
641     /// Get an instance for a path.
642     fn resolve_path(&self, path: &[&str]) -> EvalResult<'tcx, ty::Instance<'tcx>> {
643         self.tcx
644             .crates()
645             .iter()
646             .find(|&&krate| self.tcx.original_crate_name(krate) == path[0])
647             .and_then(|krate| {
648                 let krate = DefId {
649                     krate: *krate,
650                     index: CRATE_DEF_INDEX,
651                 };
652                 let mut items = self.tcx.item_children(krate);
653                 let mut path_it = path.iter().skip(1).peekable();
654
655                 while let Some(segment) = path_it.next() {
656                     for item in mem::replace(&mut items, Default::default()).iter() {
657                         if item.ident.name == *segment {
658                             if path_it.peek().is_none() {
659                                 return Some(ty::Instance::mono(self.tcx.tcx, item.def.def_id()));
660                             }
661
662                             items = self.tcx.item_children(item.def.def_id());
663                             break;
664                         }
665                     }
666                 }
667                 None
668             })
669             .ok_or_else(|| {
670                 let path = path.iter().map(|&s| s.to_owned()).collect();
671                 EvalErrorKind::PathNotFound(path).into()
672             })
673     }
674
675     fn emulate_missing_fn(
676         &mut self,
677         path: String,
678         _args: &[OpTy<'tcx, Borrow>],
679         dest: Option<PlaceTy<'tcx, Borrow>>,
680         ret: Option<mir::BasicBlock>,
681     ) -> EvalResult<'tcx> {
682         // In some cases in non-MIR libstd-mode, not having a destination is legit.  Handle these early.
683         match &path[..] {
684             "std::panicking::rust_panic_with_hook" |
685             "core::panicking::panic_fmt::::panic_impl" |
686             "std::rt::begin_panic_fmt" =>
687                 return err!(MachineError("the evaluated program panicked".to_string())),
688             _ => {}
689         }
690
691         let dest = dest.ok_or_else(
692             // Must be some function we do not support
693             || EvalErrorKind::NoMirFor(path.clone()),
694         )?;
695
696         match &path[..] {
697             // A Rust function is missing, which means we are running with MIR missing for libstd (or other dependencies).
698             // Still, we can make many things mostly work by "emulating" or ignoring some functions.
699             "std::io::_print" |
700             "std::io::_eprint" => {
701                 warn!(
702                     "Ignoring output.  To run programs that print, make sure you have a libstd with full MIR."
703                 );
704             }
705             "std::thread::Builder::new" => {
706                 return err!(Unimplemented("miri does not support threading".to_owned()))
707             }
708             "std::env::args" => {
709                 return err!(Unimplemented(
710                     "miri does not support program arguments".to_owned(),
711                 ))
712             }
713             "std::panicking::panicking" |
714             "std::rt::panicking" => {
715                 // we abort on panic -> `std::rt::panicking` always returns false
716                 self.write_scalar(Scalar::from_bool(false), dest)?;
717             }
718
719             _ => return err!(NoMirFor(path)),
720         }
721
722         self.goto_block(ret)?;
723         self.dump_place(*dest);
724         Ok(())
725     }
726
727     fn write_null(&mut self, dest: PlaceTy<'tcx, Borrow>) -> EvalResult<'tcx> {
728         self.write_scalar(Scalar::from_int(0, dest.layout.size), dest)
729     }
730 }