]> git.lizzy.rs Git - rust.git/blob - src/shims/intrinsics.rs
bde2dd4655bf7c852a66a2e0f4efae9635221b5b
[rust.git] / src / shims / intrinsics.rs
1 use std::iter;
2
3 use rustc::mir;
4 use rustc::mir::interpret::{InterpResult, PointerArithmetic};
5 use rustc::ty;
6 use rustc::ty::layout::{self, Align, LayoutOf, Size};
7 use rustc_apfloat::Float;
8 use syntax::source_map::Span;
9
10 use crate::*;
11
12 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
13 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
14     fn call_intrinsic(
15         &mut self,
16         span: Span,
17         instance: ty::Instance<'tcx>,
18         args: &[OpTy<'tcx, Tag>],
19         ret: Option<(PlaceTy<'tcx, Tag>, mir::BasicBlock)>,
20         unwind: Option<mir::BasicBlock>,
21     ) -> InterpResult<'tcx> {
22         let this = self.eval_context_mut();
23         if this.emulate_intrinsic(span, instance, args, ret)? {
24             return Ok(());
25         }
26         let tcx = &{ this.tcx.tcx };
27         let substs = instance.substs;
28
29         // All these intrinsics take raw pointers, so if we access memory directly
30         // (as opposed to through a place), we have to remember to erase any tag
31         // that might still hang around!
32         let intrinsic_name = &*tcx.item_name(instance.def_id()).as_str();
33
34         // Handle diverging intrinsics.
35         let (dest, ret) = match intrinsic_name {
36             "abort" => {
37                 throw_machine_stop!(TerminationInfo::Abort);
38             }
39             "miri_start_panic" => return this.handle_miri_start_panic(args, unwind),
40             _ =>
41                 if let Some(p) = ret {
42                     p
43                 } else {
44                     throw_unsup_format!("unimplemented (diverging) intrinsic: {}", intrinsic_name);
45                 },
46         };
47
48         match intrinsic_name {
49             "arith_offset" => {
50                 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
51                 let ptr = this.read_scalar(args[0])?.not_undef()?;
52
53                 let pointee_ty = substs.type_at(0);
54                 let pointee_size = this.layout_of(pointee_ty)?.size.bytes() as i64;
55                 let offset = offset.overflowing_mul(pointee_size).0;
56                 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
57                 this.write_scalar(result_ptr, dest)?;
58             }
59
60             "assume" => {
61                 let cond = this.read_scalar(args[0])?.to_bool()?;
62                 if !cond {
63                     throw_ub_format!("`assume` intrinsic called with `false`");
64                 }
65             }
66
67             "volatile_load" => {
68                 let place = this.deref_operand(args[0])?;
69                 this.copy_op(place.into(), dest)?;
70             }
71
72             "volatile_store" => {
73                 let place = this.deref_operand(args[0])?;
74                 this.copy_op(args[1], place.into())?;
75             }
76
77             #[rustfmt::skip]
78             | "atomic_load"
79             | "atomic_load_relaxed"
80             | "atomic_load_acq"
81             => {
82                 let place = this.deref_operand(args[0])?;
83                 let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
84
85                 // Check alignment requirements. Atomics must always be aligned to their size,
86                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
87                 // be 8-aligned).
88                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
89                 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
90
91                 this.write_scalar(val, dest)?;
92             }
93
94             #[rustfmt::skip]
95             | "atomic_store"
96             | "atomic_store_relaxed"
97             | "atomic_store_rel"
98             => {
99                 let place = this.deref_operand(args[0])?;
100                 let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
101
102                 // Check alignment requirements. Atomics must always be aligned to their size,
103                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
104                 // be 8-aligned).
105                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
106                 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
107
108                 this.write_scalar(val, place.into())?;
109             }
110
111             #[rustfmt::skip]
112             | "atomic_fence_acq"
113             | "atomic_fence_rel"
114             | "atomic_fence_acqrel"
115             | "atomic_fence"
116             => {
117                 // we are inherently singlethreaded and singlecored, this is a nop
118             }
119
120             _ if intrinsic_name.starts_with("atomic_xchg") => {
121                 let place = this.deref_operand(args[0])?;
122                 let new = this.read_scalar(args[1])?;
123                 let old = this.read_scalar(place.into())?;
124
125                 // Check alignment requirements. Atomics must always be aligned to their size,
126                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
127                 // be 8-aligned).
128                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
129                 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
130
131                 this.write_scalar(old, dest)?; // old value is returned
132                 this.write_scalar(new, place.into())?;
133             }
134
135             _ if intrinsic_name.starts_with("atomic_cxchg") => {
136                 let place = this.deref_operand(args[0])?;
137                 let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
138                 let new = this.read_scalar(args[2])?;
139                 let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
140
141                 // Check alignment requirements. Atomics must always be aligned to their size,
142                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
143                 // be 8-aligned).
144                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
145                 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
146
147                 // `binary_op` will bail if either of them is not a scalar.
148                 let eq = this.overflowing_binary_op(mir::BinOp::Eq, old, expect_old)?.0;
149                 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
150                 // Return old value.
151                 this.write_immediate(res, dest)?;
152                 // Update ptr depending on comparison.
153                 if eq.to_bool()? {
154                     this.write_scalar(new, place.into())?;
155                 }
156             }
157
158             #[rustfmt::skip]
159             | "atomic_or"
160             | "atomic_or_acq"
161             | "atomic_or_rel"
162             | "atomic_or_acqrel"
163             | "atomic_or_relaxed"
164             | "atomic_xor"
165             | "atomic_xor_acq"
166             | "atomic_xor_rel"
167             | "atomic_xor_acqrel"
168             | "atomic_xor_relaxed"
169             | "atomic_and"
170             | "atomic_and_acq"
171             | "atomic_and_rel"
172             | "atomic_and_acqrel"
173             | "atomic_and_relaxed"
174             | "atomic_nand"
175             | "atomic_nand_acq"
176             | "atomic_nand_rel"
177             | "atomic_nand_acqrel"
178             | "atomic_nand_relaxed"
179             | "atomic_xadd"
180             | "atomic_xadd_acq"
181             | "atomic_xadd_rel"
182             | "atomic_xadd_acqrel"
183             | "atomic_xadd_relaxed"
184             | "atomic_xsub"
185             | "atomic_xsub_acq"
186             | "atomic_xsub_rel"
187             | "atomic_xsub_acqrel"
188             | "atomic_xsub_relaxed"
189             => {
190                 let place = this.deref_operand(args[0])?;
191                 if !place.layout.ty.is_integral() {
192                     bug!("Atomic arithmetic operations only work on integer types");
193                 }
194                 let rhs = this.read_immediate(args[1])?;
195                 let old = this.read_immediate(place.into())?;
196
197                 // Check alignment requirements. Atomics must always be aligned to their size,
198                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
199                 // be 8-aligned).
200                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
201                 this.memory.check_ptr_access(place.ptr, place.layout.size, align)?;
202
203                 this.write_immediate(*old, dest)?; // old value is returned
204                 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
205                     "or" => (mir::BinOp::BitOr, false),
206                     "xor" => (mir::BinOp::BitXor, false),
207                     "and" => (mir::BinOp::BitAnd, false),
208                     "xadd" => (mir::BinOp::Add, false),
209                     "xsub" => (mir::BinOp::Sub, false),
210                     "nand" => (mir::BinOp::BitAnd, true),
211                     _ => bug!(),
212                 };
213                 // Atomics wrap around on overflow.
214                 let val = this.binary_op(op, old, rhs)?;
215                 let val = if neg { this.unary_op(mir::UnOp::Not, val)? } else { val };
216                 this.write_immediate(*val, place.into())?;
217             }
218
219             "breakpoint" => unimplemented!(), // halt miri
220
221             #[rustfmt::skip]
222             | "copy"
223             | "copy_nonoverlapping"
224             => {
225                 let elem_ty = substs.type_at(0);
226                 let elem_layout = this.layout_of(elem_ty)?;
227                 let elem_size = elem_layout.size.bytes();
228                 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
229                 let elem_align = elem_layout.align.abi;
230
231                 let size = Size::from_bytes(count * elem_size);
232                 let src = this.read_scalar(args[0])?.not_undef()?;
233                 let src = this.memory.check_ptr_access(src, size, elem_align)?;
234                 let dest = this.read_scalar(args[1])?.not_undef()?;
235                 let dest = this.memory.check_ptr_access(dest, size, elem_align)?;
236
237                 if let (Some(src), Some(dest)) = (src, dest) {
238                     this.memory.copy(
239                         src,
240                         dest,
241                         size,
242                         intrinsic_name.ends_with("_nonoverlapping"),
243                     )?;
244                 }
245             }
246
247             "discriminant_value" => {
248                 let place = this.deref_operand(args[0])?;
249                 let discr_val = this.read_discriminant(place.into())?.0;
250                 this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
251             }
252
253             #[rustfmt::skip]
254             | "sinf32"
255             | "fabsf32"
256             | "cosf32"
257             | "sqrtf32"
258             | "expf32"
259             | "exp2f32"
260             | "logf32"
261             | "log10f32"
262             | "log2f32"
263             | "floorf32"
264             | "ceilf32"
265             | "truncf32"
266             | "roundf32"
267             => {
268                 // FIXME: Using host floats.
269                 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
270                 let f = match intrinsic_name {
271                     "sinf32" => f.sin(),
272                     "fabsf32" => f.abs(),
273                     "cosf32" => f.cos(),
274                     "sqrtf32" => f.sqrt(),
275                     "expf32" => f.exp(),
276                     "exp2f32" => f.exp2(),
277                     "logf32" => f.ln(),
278                     "log10f32" => f.log10(),
279                     "log2f32" => f.log2(),
280                     "floorf32" => f.floor(),
281                     "ceilf32" => f.ceil(),
282                     "truncf32" => f.trunc(),
283                     "roundf32" => f.round(),
284                     _ => bug!(),
285                 };
286                 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
287             }
288
289             #[rustfmt::skip]
290             | "sinf64"
291             | "fabsf64"
292             | "cosf64"
293             | "sqrtf64"
294             | "expf64"
295             | "exp2f64"
296             | "logf64"
297             | "log10f64"
298             | "log2f64"
299             | "floorf64"
300             | "ceilf64"
301             | "truncf64"
302             | "roundf64"
303             => {
304                 // FIXME: Using host floats.
305                 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
306                 let f = match intrinsic_name {
307                     "sinf64" => f.sin(),
308                     "fabsf64" => f.abs(),
309                     "cosf64" => f.cos(),
310                     "sqrtf64" => f.sqrt(),
311                     "expf64" => f.exp(),
312                     "exp2f64" => f.exp2(),
313                     "logf64" => f.ln(),
314                     "log10f64" => f.log10(),
315                     "log2f64" => f.log2(),
316                     "floorf64" => f.floor(),
317                     "ceilf64" => f.ceil(),
318                     "truncf64" => f.trunc(),
319                     "roundf64" => f.round(),
320                     _ => bug!(),
321                 };
322                 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
323             }
324
325             #[rustfmt::skip]
326             | "fadd_fast"
327             | "fsub_fast"
328             | "fmul_fast"
329             | "fdiv_fast"
330             | "frem_fast"
331             => {
332                 let a = this.read_immediate(args[0])?;
333                 let b = this.read_immediate(args[1])?;
334                 let op = match intrinsic_name {
335                     "fadd_fast" => mir::BinOp::Add,
336                     "fsub_fast" => mir::BinOp::Sub,
337                     "fmul_fast" => mir::BinOp::Mul,
338                     "fdiv_fast" => mir::BinOp::Div,
339                     "frem_fast" => mir::BinOp::Rem,
340                     _ => bug!(),
341                 };
342                 this.binop_ignore_overflow(op, a, b, dest)?;
343             }
344
345             #[rustfmt::skip]
346             | "minnumf32"
347             | "maxnumf32"
348             | "copysignf32"
349             => {
350                 let a = this.read_scalar(args[0])?.to_f32()?;
351                 let b = this.read_scalar(args[1])?.to_f32()?;
352                 let res = match intrinsic_name {
353                     "minnumf32" => a.min(b),
354                     "maxnumf32" => a.max(b),
355                     "copysignf32" => a.copy_sign(b),
356                     _ => bug!(),
357                 };
358                 this.write_scalar(Scalar::from_f32(res), dest)?;
359             }
360
361             #[rustfmt::skip]
362             | "minnumf64"
363             | "maxnumf64"
364             | "copysignf64"
365             => {
366                 let a = this.read_scalar(args[0])?.to_f64()?;
367                 let b = this.read_scalar(args[1])?.to_f64()?;
368                 let res = match intrinsic_name {
369                     "minnumf64" => a.min(b),
370                     "maxnumf64" => a.max(b),
371                     "copysignf64" => a.copy_sign(b),
372                     _ => bug!(),
373                 };
374                 this.write_scalar(Scalar::from_f64(res), dest)?;
375             }
376
377             "exact_div" =>
378                 this.exact_div(this.read_immediate(args[0])?, this.read_immediate(args[1])?, dest)?,
379
380             "forget" => {}
381
382             #[rustfmt::skip]
383             | "likely"
384             | "unlikely"
385             => {
386                 // These just return their argument
387                 let b = this.read_immediate(args[0])?;
388                 this.write_immediate(*b, dest)?;
389             }
390
391             "init" => {
392                 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
393                 // but we also do not want to create a new allocation with 0s and then copy that over.
394                 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
395                 // However, this only affects direct calls of the intrinsic; calls to the stable
396                 // functions wrapping them do get their validation.
397                 // FIXME: should we check that the destination pointer is aligned even for ZSTs?
398                 if !dest.layout.is_zst() {
399                     match dest.layout.abi {
400                         layout::Abi::Scalar(ref s) => {
401                             let x = Scalar::from_int(0, s.value.size(this));
402                             this.write_scalar(x, dest)?;
403                         }
404                         layout::Abi::ScalarPair(ref s1, ref s2) => {
405                             let x = Scalar::from_int(0, s1.value.size(this));
406                             let y = Scalar::from_int(0, s2.value.size(this));
407                             this.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
408                         }
409                         _ => {
410                             // Do it in memory
411                             let mplace = this.force_allocation(dest)?;
412                             mplace.meta.unwrap_none(); // must be sized
413                             this.memory.write_bytes(
414                                 mplace.ptr,
415                                 iter::repeat(0u8).take(dest.layout.size.bytes() as usize),
416                             )?;
417                         }
418                     }
419                 }
420             }
421
422             "pref_align_of" => {
423                 let ty = substs.type_at(0);
424                 let layout = this.layout_of(ty)?;
425                 let align = layout.align.pref.bytes();
426                 let ptr_size = this.pointer_size();
427                 let align_val = Scalar::from_uint(align as u128, ptr_size);
428                 this.write_scalar(align_val, dest)?;
429             }
430
431             "move_val_init" => {
432                 let place = this.deref_operand(args[0])?;
433                 this.copy_op(args[1], place.into())?;
434             }
435
436             "offset" => {
437                 let offset = this.read_scalar(args[1])?.to_machine_isize(this)?;
438                 let ptr = this.read_scalar(args[0])?.not_undef()?;
439                 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
440                 this.write_scalar(result_ptr, dest)?;
441             }
442
443             "panic_if_uninhabited" => {
444                 let ty = substs.type_at(0);
445                 let layout = this.layout_of(ty)?;
446                 if layout.abi.is_uninhabited() {
447                     // FIXME: This should throw a panic in the interpreted program instead.
448                     throw_unsup_format!("Trying to instantiate uninhabited type {}", ty)
449                 }
450             }
451
452             "powf32" => {
453                 // FIXME: Using host floats.
454                 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
455                 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
456                 this.write_scalar(Scalar::from_u32(f.powf(f2).to_bits()), dest)?;
457             }
458
459             "powf64" => {
460                 // FIXME: Using host floats.
461                 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
462                 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
463                 this.write_scalar(Scalar::from_u64(f.powf(f2).to_bits()), dest)?;
464             }
465
466             "fmaf32" => {
467                 let a = this.read_scalar(args[0])?.to_f32()?;
468                 let b = this.read_scalar(args[1])?.to_f32()?;
469                 let c = this.read_scalar(args[2])?.to_f32()?;
470                 let res = a.mul_add(b, c).value;
471                 this.write_scalar(Scalar::from_f32(res), dest)?;
472             }
473
474             "fmaf64" => {
475                 let a = this.read_scalar(args[0])?.to_f64()?;
476                 let b = this.read_scalar(args[1])?.to_f64()?;
477                 let c = this.read_scalar(args[2])?.to_f64()?;
478                 let res = a.mul_add(b, c).value;
479                 this.write_scalar(Scalar::from_f64(res), dest)?;
480             }
481
482             "powif32" => {
483                 // FIXME: Using host floats.
484                 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
485                 let i = this.read_scalar(args[1])?.to_i32()?;
486                 this.write_scalar(Scalar::from_u32(f.powi(i).to_bits()), dest)?;
487             }
488
489             "powif64" => {
490                 // FIXME: Using host floats.
491                 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
492                 let i = this.read_scalar(args[1])?.to_i32()?;
493                 this.write_scalar(Scalar::from_u64(f.powi(i).to_bits()), dest)?;
494             }
495
496             "size_of_val" => {
497                 let mplace = this.deref_operand(args[0])?;
498                 let (size, _) = this
499                     .size_and_align_of_mplace(mplace)?
500                     .expect("size_of_val called on extern type");
501                 let ptr_size = this.pointer_size();
502                 this.write_scalar(Scalar::from_uint(size.bytes() as u128, ptr_size), dest)?;
503             }
504
505             #[rustfmt::skip]
506             | "min_align_of_val"
507             | "align_of_val"
508             => {
509                 let mplace = this.deref_operand(args[0])?;
510                 let (_, align) = this
511                     .size_and_align_of_mplace(mplace)?
512                     .expect("size_of_val called on extern type");
513                 let ptr_size = this.pointer_size();
514                 this.write_scalar(Scalar::from_uint(align.bytes(), ptr_size), dest)?;
515             }
516
517             "unchecked_div" => {
518                 let l = this.read_immediate(args[0])?;
519                 let r = this.read_immediate(args[1])?;
520                 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
521                 if rval == 0 {
522                     throw_ub_format!("Division by 0 in unchecked_div");
523                 }
524                 this.binop_ignore_overflow(mir::BinOp::Div, l, r, dest)?;
525             }
526
527             "unchecked_rem" => {
528                 let l = this.read_immediate(args[0])?;
529                 let r = this.read_immediate(args[1])?;
530                 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
531                 if rval == 0 {
532                     throw_ub_format!("Division by 0 in unchecked_rem");
533                 }
534                 this.binop_ignore_overflow(mir::BinOp::Rem, l, r, dest)?;
535             }
536
537             #[rustfmt::skip]
538             | "unchecked_add"
539             | "unchecked_sub"
540             | "unchecked_mul"
541             => {
542                 let l = this.read_immediate(args[0])?;
543                 let r = this.read_immediate(args[1])?;
544                 let op = match intrinsic_name {
545                     "unchecked_add" => mir::BinOp::Add,
546                     "unchecked_sub" => mir::BinOp::Sub,
547                     "unchecked_mul" => mir::BinOp::Mul,
548                     _ => bug!(),
549                 };
550                 let (res, overflowed, _ty) = this.overflowing_binary_op(op, l, r)?;
551                 if overflowed {
552                     throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name);
553                 }
554                 this.write_scalar(res, dest)?;
555             }
556
557             "uninit" => {
558                 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
559                 // but we also do not want to create a new allocation with 0s and then copy that over.
560                 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
561                 // However, this only affects direct calls of the intrinsic; calls to the stable
562                 // functions wrapping them do get their validation.
563                 // FIXME: should we check alignment for ZSTs?
564                 if !dest.layout.is_zst() {
565                     match dest.layout.abi {
566                         layout::Abi::Scalar(..) => {
567                             let x = ScalarMaybeUndef::Undef;
568                             this.write_immediate(Immediate::Scalar(x), dest)?;
569                         }
570                         layout::Abi::ScalarPair(..) => {
571                             let x = ScalarMaybeUndef::Undef;
572                             this.write_immediate(Immediate::ScalarPair(x, x), dest)?;
573                         }
574                         _ => {
575                             // Do it in memory
576                             let mplace = this.force_allocation(dest)?;
577                             mplace.meta.unwrap_none();
578                             let ptr = mplace.ptr.to_ptr()?;
579                             // We know the return place is in-bounds
580                             this.memory.get_raw_mut(ptr.alloc_id)?.mark_definedness(
581                                 ptr,
582                                 dest.layout.size,
583                                 false,
584                             );
585                         }
586                     }
587                 }
588             }
589
590             "write_bytes" => {
591                 let ty = substs.type_at(0);
592                 let ty_layout = this.layout_of(ty)?;
593                 let val_byte = this.read_scalar(args[1])?.to_u8()?;
594                 let ptr = this.read_scalar(args[0])?.not_undef()?;
595                 let count = this.read_scalar(args[2])?.to_machine_usize(this)?;
596                 let byte_count = ty_layout.size * count;
597                 this.memory
598                     .write_bytes(ptr, iter::repeat(val_byte).take(byte_count.bytes() as usize))?;
599             }
600
601             name => throw_unsup_format!("unimplemented intrinsic: {}", name),
602         }
603
604         this.dump_place(*dest);
605         this.go_to_block(ret);
606         Ok(())
607     }
608 }