]> git.lizzy.rs Git - rust.git/blob - src/shims/intrinsics.rs
Auto merge of #871 - RalfJung:miri-rustc-cleanup, r=RalfJung
[rust.git] / src / shims / intrinsics.rs
1 use rustc_apfloat::Float;
2 use rustc::mir;
3 use rustc::mir::interpret::{InterpResult, PointerArithmetic};
4 use rustc::ty::layout::{self, LayoutOf, Size, Align};
5 use rustc::ty;
6
7 use crate::{
8     PlaceTy, OpTy, ImmTy, Immediate, Scalar, Tag,
9     OperatorEvalContextExt
10 };
11
12 impl<'mir, 'tcx> EvalContextExt<'mir, 'tcx> for crate::MiriEvalContext<'mir, 'tcx> {}
13 pub trait EvalContextExt<'mir, 'tcx: 'mir>: crate::MiriEvalContextExt<'mir, 'tcx> {
14     fn call_intrinsic(
15         &mut self,
16         instance: ty::Instance<'tcx>,
17         args: &[OpTy<'tcx, Tag>],
18         dest: PlaceTy<'tcx, Tag>,
19     ) -> InterpResult<'tcx> {
20         let this = self.eval_context_mut();
21         if this.emulate_intrinsic(instance, args, dest)? {
22             return Ok(());
23         }
24         let tcx = &{this.tcx.tcx};
25         let substs = instance.substs;
26
27         // All these intrinsics take raw pointers, so if we access memory directly
28         // (as opposed to through a place), we have to remember to erase any tag
29         // that might still hang around!
30
31         let intrinsic_name = this.tcx.item_name(instance.def_id()).as_str();
32         match intrinsic_name.get() {
33             "arith_offset" => {
34                 let offset = this.read_scalar(args[1])?.to_isize(this)?;
35                 let ptr = this.read_scalar(args[0])?.not_undef()?;
36
37                 let pointee_ty = substs.type_at(0);
38                 let pointee_size = this.layout_of(pointee_ty)?.size.bytes() as i64;
39                 let offset = offset.overflowing_mul(pointee_size).0;
40                 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, this);
41                 this.write_scalar(result_ptr, dest)?;
42             }
43
44             "assume" => {
45                 let cond = this.read_scalar(args[0])?.to_bool()?;
46                 if !cond {
47                     throw_ub_format!("`assume` intrinsic called with `false`");
48                 }
49             }
50
51             "volatile_load" => {
52                 let place = this.deref_operand(args[0])?;
53                 this.copy_op(place.into(), dest)?;
54             }
55
56             "volatile_store" => {
57                 let place = this.deref_operand(args[0])?;
58                 this.copy_op(args[1], place.into())?;
59             }
60
61             "atomic_load" |
62             "atomic_load_relaxed" |
63             "atomic_load_acq" => {
64                 let place = this.deref_operand(args[0])?;
65                 let val = this.read_scalar(place.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
66
67                 // Check alignment requirements. Atomics must always be aligned to their size,
68                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
69                 // be 8-aligned).
70                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
71                 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
72
73                 this.write_scalar(val, dest)?;
74             }
75
76             "atomic_store" |
77             "atomic_store_relaxed" |
78             "atomic_store_rel" => {
79                 let place = this.deref_operand(args[0])?;
80                 let val = this.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
81
82                 // Check alignment requirements. Atomics must always be aligned to their size,
83                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
84                 // be 8-aligned).
85                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
86                 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
87
88                 this.write_scalar(val, place.into())?;
89             }
90
91             "atomic_fence_acq" => {
92                 // we are inherently singlethreaded and singlecored, this is a nop
93             }
94
95             _ if intrinsic_name.starts_with("atomic_xchg") => {
96                 let place = this.deref_operand(args[0])?;
97                 let new = this.read_scalar(args[1])?;
98                 let old = this.read_scalar(place.into())?;
99
100                 // Check alignment requirements. Atomics must always be aligned to their size,
101                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
102                 // be 8-aligned).
103                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
104                 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
105
106                 this.write_scalar(old, dest)?; // old value is returned
107                 this.write_scalar(new, place.into())?;
108             }
109
110             _ if intrinsic_name.starts_with("atomic_cxchg") => {
111                 let place = this.deref_operand(args[0])?;
112                 let expect_old = this.read_immediate(args[1])?; // read as immediate for the sake of `binary_op()`
113                 let new = this.read_scalar(args[2])?;
114                 let old = this.read_immediate(place.into())?; // read as immediate for the sake of `binary_op()`
115
116                 // Check alignment requirements. Atomics must always be aligned to their size,
117                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
118                 // be 8-aligned).
119                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
120                 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
121
122                 // binary_op will bail if either of them is not a scalar
123                 let (eq, _) = this.binary_op(mir::BinOp::Eq, old, expect_old)?;
124                 let res = Immediate::ScalarPair(old.to_scalar_or_undef(), eq.into());
125                 this.write_immediate(res, dest)?; // old value is returned
126                 // update ptr depending on comparison
127                 if eq.to_bool()? {
128                     this.write_scalar(new, place.into())?;
129                 }
130             }
131
132             "atomic_or" |
133             "atomic_or_acq" |
134             "atomic_or_rel" |
135             "atomic_or_acqrel" |
136             "atomic_or_relaxed" |
137             "atomic_xor" |
138             "atomic_xor_acq" |
139             "atomic_xor_rel" |
140             "atomic_xor_acqrel" |
141             "atomic_xor_relaxed" |
142             "atomic_and" |
143             "atomic_and_acq" |
144             "atomic_and_rel" |
145             "atomic_and_acqrel" |
146             "atomic_and_relaxed" |
147             "atomic_nand" |
148             "atomic_nand_acq" |
149             "atomic_nand_rel" |
150             "atomic_nand_acqrel" |
151             "atomic_nand_relaxed" |
152             "atomic_xadd" |
153             "atomic_xadd_acq" |
154             "atomic_xadd_rel" |
155             "atomic_xadd_acqrel" |
156             "atomic_xadd_relaxed" |
157             "atomic_xsub" |
158             "atomic_xsub_acq" |
159             "atomic_xsub_rel" |
160             "atomic_xsub_acqrel" |
161             "atomic_xsub_relaxed" => {
162                 let place = this.deref_operand(args[0])?;
163                 if !place.layout.ty.is_integral() {
164                     bug!("Atomic arithmetic operations only work on integer types");
165                 }
166                 let rhs = this.read_immediate(args[1])?;
167                 let old = this.read_immediate(place.into())?;
168
169                 // Check alignment requirements. Atomics must always be aligned to their size,
170                 // even if the type they wrap would be less aligned (e.g. AtomicU64 on 32bit must
171                 // be 8-aligned).
172                 let align = Align::from_bytes(place.layout.size.bytes()).unwrap();
173                 this.memory().check_ptr_access(place.ptr, place.layout.size, align)?;
174
175                 this.write_immediate(*old, dest)?; // old value is returned
176                 let (op, neg) = match intrinsic_name.split('_').nth(1).unwrap() {
177                     "or" => (mir::BinOp::BitOr, false),
178                     "xor" => (mir::BinOp::BitXor, false),
179                     "and" => (mir::BinOp::BitAnd, false),
180                     "xadd" => (mir::BinOp::Add, false),
181                     "xsub" => (mir::BinOp::Sub, false),
182                     "nand" => (mir::BinOp::BitAnd, true),
183                     _ => bug!(),
184                 };
185                 // Atomics wrap around on overflow.
186                 let (val, _overflowed) = this.binary_op(op, old, rhs)?;
187                 let val = if neg {
188                     this.unary_op(mir::UnOp::Not, ImmTy::from_scalar(val, old.layout))?
189                 } else {
190                     val
191                 };
192                 this.write_scalar(val, place.into())?;
193             }
194
195             "breakpoint" => unimplemented!(), // halt miri
196
197             "copy" |
198             "copy_nonoverlapping" => {
199                 let elem_ty = substs.type_at(0);
200                 let elem_layout = this.layout_of(elem_ty)?;
201                 let elem_size = elem_layout.size.bytes();
202                 let count = this.read_scalar(args[2])?.to_usize(this)?;
203                 let elem_align = elem_layout.align.abi;
204
205                 let size = Size::from_bytes(count * elem_size);
206                 let src = this.read_scalar(args[0])?.not_undef()?;
207                 let src = this.memory().check_ptr_access(src, size, elem_align)?;
208                 let dest = this.read_scalar(args[1])?.not_undef()?;
209                 let dest = this.memory().check_ptr_access(dest, size, elem_align)?;
210
211                 if let (Some(src), Some(dest)) = (src, dest) {
212                     this.memory_mut().copy(
213                         src,
214                         dest,
215                         size,
216                         intrinsic_name.ends_with("_nonoverlapping"),
217                     )?;
218                 }
219             }
220
221             "discriminant_value" => {
222                 let place = this.deref_operand(args[0])?;
223                 let discr_val = this.read_discriminant(place.into())?.0;
224                 this.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
225             }
226
227             "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
228             "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" | "roundf32" => {
229                 // FIXME: Using host floats.
230                 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
231                 let f = match intrinsic_name.get() {
232                     "sinf32" => f.sin(),
233                     "fabsf32" => f.abs(),
234                     "cosf32" => f.cos(),
235                     "sqrtf32" => f.sqrt(),
236                     "expf32" => f.exp(),
237                     "exp2f32" => f.exp2(),
238                     "logf32" => f.ln(),
239                     "log10f32" => f.log10(),
240                     "log2f32" => f.log2(),
241                     "floorf32" => f.floor(),
242                     "ceilf32" => f.ceil(),
243                     "truncf32" => f.trunc(),
244                     "roundf32" => f.round(),
245                     _ => bug!(),
246                 };
247                 this.write_scalar(Scalar::from_u32(f.to_bits()), dest)?;
248             }
249
250             "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
251             "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" | "roundf64" => {
252                 // FIXME: Using host floats.
253                 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
254                 let f = match intrinsic_name.get() {
255                     "sinf64" => f.sin(),
256                     "fabsf64" => f.abs(),
257                     "cosf64" => f.cos(),
258                     "sqrtf64" => f.sqrt(),
259                     "expf64" => f.exp(),
260                     "exp2f64" => f.exp2(),
261                     "logf64" => f.ln(),
262                     "log10f64" => f.log10(),
263                     "log2f64" => f.log2(),
264                     "floorf64" => f.floor(),
265                     "ceilf64" => f.ceil(),
266                     "truncf64" => f.trunc(),
267                     "roundf64" => f.round(),
268                     _ => bug!(),
269                 };
270                 this.write_scalar(Scalar::from_u64(f.to_bits()), dest)?;
271             }
272
273             "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
274                 let a = this.read_immediate(args[0])?;
275                 let b = this.read_immediate(args[1])?;
276                 let op = match intrinsic_name.get() {
277                     "fadd_fast" => mir::BinOp::Add,
278                     "fsub_fast" => mir::BinOp::Sub,
279                     "fmul_fast" => mir::BinOp::Mul,
280                     "fdiv_fast" => mir::BinOp::Div,
281                     "frem_fast" => mir::BinOp::Rem,
282                     _ => bug!(),
283                 };
284                 this.binop_ignore_overflow(op, a, b, dest)?;
285             }
286
287             "minnumf32" | "maxnumf32" => {
288                 let a = this.read_scalar(args[0])?.to_f32()?;
289                 let b = this.read_scalar(args[1])?.to_f32()?;
290                 let res = if intrinsic_name.get().starts_with("min") {
291                     a.min(b)
292                 } else {
293                     a.max(b)
294                 };
295                 this.write_scalar(Scalar::from_f32(res), dest)?;
296             }
297
298             "minnumf64" | "maxnumf64" => {
299                 let a = this.read_scalar(args[0])?.to_f64()?;
300                 let b = this.read_scalar(args[1])?.to_f64()?;
301                 let res = if intrinsic_name.get().starts_with("min") {
302                     a.min(b)
303                 } else {
304                     a.max(b)
305                 };
306                 this.write_scalar(Scalar::from_f64(res), dest)?;
307             }
308
309             "exact_div" => {
310                 // Performs an exact division, resulting in undefined behavior where
311                 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
312                 let a = this.read_immediate(args[0])?;
313                 let b = this.read_immediate(args[1])?;
314                 // check x % y != 0
315                 if this.binary_op(mir::BinOp::Rem, a, b)?.0.to_bits(dest.layout.size)? != 0 {
316                     // Check if `b` is -1, which is the "min_value / -1" case.
317                     let minus1 = Scalar::from_int(-1, dest.layout.size);
318                     return Err(if b.to_scalar().unwrap() == minus1 {
319                         err_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
320                     } else {
321                         err_ub_format!("exact_div: {:?} cannot be divided by {:?} without remainder", *a, *b)
322                     }.into());
323                 }
324                 this.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?;
325             },
326
327             "forget" => {}
328
329             "likely" | "unlikely" => {
330                 // These just return their argument
331                 let b = this.read_immediate(args[0])?;
332                 this.write_immediate(*b, dest)?;
333             }
334
335             "init" => {
336                 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
337                 // but we also do not want to create a new allocation with 0s and then copy that over.
338                 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
339                 // However, this only affects direct calls of the intrinsic; calls to the stable
340                 // functions wrapping them do get their validation.
341                 // FIXME: should we check that the destination pointer is aligned even for ZSTs?
342                 if !dest.layout.is_zst() {
343                     match dest.layout.abi {
344                         layout::Abi::Scalar(ref s) => {
345                             let x = Scalar::from_int(0, s.value.size(this));
346                             this.write_scalar(x, dest)?;
347                         }
348                         layout::Abi::ScalarPair(ref s1, ref s2) => {
349                             let x = Scalar::from_int(0, s1.value.size(this));
350                             let y = Scalar::from_int(0, s2.value.size(this));
351                             this.write_immediate(Immediate::ScalarPair(x.into(), y.into()), dest)?;
352                         }
353                         _ => {
354                             // Do it in memory
355                             let mplace = this.force_allocation(dest)?;
356                             assert!(mplace.meta.is_none());
357                             // not a zst, must be valid pointer
358                             let ptr = mplace.ptr.to_ptr()?;
359                             this.memory_mut().get_mut(ptr.alloc_id)?.write_repeat(tcx, ptr, 0, dest.layout.size)?;
360                         }
361                     }
362                 }
363             }
364
365             "pref_align_of" => {
366                 let ty = substs.type_at(0);
367                 let layout = this.layout_of(ty)?;
368                 let align = layout.align.pref.bytes();
369                 let ptr_size = this.pointer_size();
370                 let align_val = Scalar::from_uint(align as u128, ptr_size);
371                 this.write_scalar(align_val, dest)?;
372             }
373
374             "move_val_init" => {
375                 let place = this.deref_operand(args[0])?;
376                 this.copy_op(args[1], place.into())?;
377             }
378
379             "offset" => {
380                 let offset = this.read_scalar(args[1])?.to_isize(this)?;
381                 let ptr = this.read_scalar(args[0])?.not_undef()?;
382                 let result_ptr = this.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
383                 this.write_scalar(result_ptr, dest)?;
384             }
385
386             "panic_if_uninhabited" => {
387                 let ty = substs.type_at(0);
388                 let layout = this.layout_of(ty)?;
389                 if layout.abi.is_uninhabited() {
390                     throw_ub_format!("Trying to instantiate uninhabited type {}", ty)
391                 }
392             }
393
394             "powf32" => {
395                 // FIXME: Using host floats.
396                 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
397                 let f2 = f32::from_bits(this.read_scalar(args[1])?.to_u32()?);
398                 this.write_scalar(
399                     Scalar::from_u32(f.powf(f2).to_bits()),
400                     dest,
401                 )?;
402             }
403
404             "powf64" => {
405                 // FIXME: Using host floats.
406                 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
407                 let f2 = f64::from_bits(this.read_scalar(args[1])?.to_u64()?);
408                 this.write_scalar(
409                     Scalar::from_u64(f.powf(f2).to_bits()),
410                     dest,
411                 )?;
412             }
413
414             "fmaf32" => {
415                 let a = this.read_scalar(args[0])?.to_f32()?;
416                 let b = this.read_scalar(args[1])?.to_f32()?;
417                 let c = this.read_scalar(args[2])?.to_f32()?;
418                 let res = a.mul_add(b, c).value;
419                 this.write_scalar(
420                     Scalar::from_f32(res),
421                     dest,
422                 )?;
423             }
424
425             "fmaf64" => {
426                 let a = this.read_scalar(args[0])?.to_f64()?;
427                 let b = this.read_scalar(args[1])?.to_f64()?;
428                 let c = this.read_scalar(args[2])?.to_f64()?;
429                 let res = a.mul_add(b, c).value;
430                 this.write_scalar(
431                     Scalar::from_f64(res),
432                     dest,
433                 )?;
434             }
435
436             "powif32" => {
437                 // FIXME: Using host floats.
438                 let f = f32::from_bits(this.read_scalar(args[0])?.to_u32()?);
439                 let i = this.read_scalar(args[1])?.to_i32()?;
440                 this.write_scalar(
441                     Scalar::from_u32(f.powi(i).to_bits()),
442                     dest,
443                 )?;
444             }
445
446             "powif64" => {
447                 // FIXME: Using host floats.
448                 let f = f64::from_bits(this.read_scalar(args[0])?.to_u64()?);
449                 let i = this.read_scalar(args[1])?.to_i32()?;
450                 this.write_scalar(
451                     Scalar::from_u64(f.powi(i).to_bits()),
452                     dest,
453                 )?;
454             }
455
456             "size_of_val" => {
457                 let mplace = this.deref_operand(args[0])?;
458                 let (size, _) = this.size_and_align_of_mplace(mplace)?
459                     .expect("size_of_val called on extern type");
460                 let ptr_size = this.pointer_size();
461                 this.write_scalar(
462                     Scalar::from_uint(size.bytes() as u128, ptr_size),
463                     dest,
464                 )?;
465             }
466
467             "min_align_of_val" |
468             "align_of_val" => {
469                 let mplace = this.deref_operand(args[0])?;
470                 let (_, align) = this.size_and_align_of_mplace(mplace)?
471                     .expect("size_of_val called on extern type");
472                 let ptr_size = this.pointer_size();
473                 this.write_scalar(
474                     Scalar::from_uint(align.bytes(), ptr_size),
475                     dest,
476                 )?;
477             }
478
479             "unchecked_div" => {
480                 let l = this.read_immediate(args[0])?;
481                 let r = this.read_immediate(args[1])?;
482                 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
483                 if rval == 0 {
484                     throw_ub_format!("Division by 0 in unchecked_div");
485                 }
486                 this.binop_ignore_overflow(
487                     mir::BinOp::Div,
488                     l,
489                     r,
490                     dest,
491                 )?;
492             }
493
494             "unchecked_rem" => {
495                 let l = this.read_immediate(args[0])?;
496                 let r = this.read_immediate(args[1])?;
497                 let rval = r.to_scalar()?.to_bits(args[1].layout.size)?;
498                 if rval == 0 {
499                     throw_ub_format!("Division by 0 in unchecked_rem");
500                 }
501                 this.binop_ignore_overflow(
502                     mir::BinOp::Rem,
503                     l,
504                     r,
505                     dest,
506                 )?;
507             }
508
509             "unchecked_add" | "unchecked_sub" | "unchecked_mul" => {
510                 let l = this.read_immediate(args[0])?;
511                 let r = this.read_immediate(args[1])?;
512                 let op = match intrinsic_name.get() {
513                     "unchecked_add" => mir::BinOp::Add,
514                     "unchecked_sub" => mir::BinOp::Sub,
515                     "unchecked_mul" => mir::BinOp::Mul,
516                     _ => bug!(),
517                 };
518                 let (res, overflowed) = this.binary_op(op, l, r)?;
519                 if overflowed {
520                     throw_ub_format!("Overflowing arithmetic in {}", intrinsic_name.get());
521                 }
522                 this.write_scalar(res, dest)?;
523             }
524
525             "uninit" => {
526                 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
527                 // but we also do not want to create a new allocation with 0s and then copy that over.
528                 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
529                 // However, this only affects direct calls of the intrinsic; calls to the stable
530                 // functions wrapping them do get their validation.
531                 // FIXME: should we check alignment for ZSTs?
532                 use crate::ScalarMaybeUndef;
533                 if !dest.layout.is_zst() {
534                     match dest.layout.abi {
535                         layout::Abi::Scalar(..) => {
536                             let x = ScalarMaybeUndef::Undef;
537                             this.write_immediate(Immediate::Scalar(x), dest)?;
538                         }
539                         layout::Abi::ScalarPair(..) => {
540                             let x = ScalarMaybeUndef::Undef;
541                             this.write_immediate(Immediate::ScalarPair(x, x), dest)?;
542                         }
543                         _ => {
544                             // Do it in memory
545                             let mplace = this.force_allocation(dest)?;
546                             assert!(mplace.meta.is_none());
547                             let ptr = mplace.ptr.to_ptr()?;
548                             this.memory_mut()
549                                 .get_mut(ptr.alloc_id)?
550                                 .mark_definedness(ptr, dest.layout.size, false);
551                         }
552                     }
553                 }
554             }
555
556             "write_bytes" => {
557                 let ty = substs.type_at(0);
558                 let ty_layout = this.layout_of(ty)?;
559                 let val_byte = this.read_scalar(args[1])?.to_u8()?;
560                 let ptr = this.read_scalar(args[0])?.not_undef()?;
561                 let count = this.read_scalar(args[2])?.to_usize(this)?;
562                 let byte_count = ty_layout.size * count;
563                 match this.memory().check_ptr_access(ptr, byte_count, ty_layout.align.abi)? {
564                     Some(ptr) => {
565                         this.memory_mut()
566                             .get_mut(ptr.alloc_id)?
567                             .write_repeat(tcx, ptr, val_byte, byte_count)?;
568                     }
569                     None => {
570                         // Size is 0, nothing to do.
571                     }
572                 }
573             }
574
575             name => throw_unsup_format!("unimplemented intrinsic: {}", name),
576         }
577
578         Ok(())
579     }
580 }