]> git.lizzy.rs Git - rust.git/blob - src/intrinsic.rs
stacked borrows: track refs and derefs
[rust.git] / src / intrinsic.rs
1 use rustc::mir;
2 use rustc::ty::layout::{self, LayoutOf, Size};
3 use rustc::ty;
4
5 use rustc::mir::interpret::{EvalResult, PointerArithmetic};
6
7 use super::{
8     PlaceTy, OpTy, Value, Scalar, ScalarMaybeUndef, Borrow,
9     FalibleScalarExt, OperatorEvalContextExt
10 };
11
12 pub trait EvalContextExt<'tcx> {
13     fn call_intrinsic(
14         &mut self,
15         instance: ty::Instance<'tcx>,
16         args: &[OpTy<'tcx, Borrow>],
17         dest: PlaceTy<'tcx, Borrow>,
18     ) -> EvalResult<'tcx>;
19 }
20
21 impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for super::MiriEvalContext<'a, 'mir, 'tcx> {
22     fn call_intrinsic(
23         &mut self,
24         instance: ty::Instance<'tcx>,
25         args: &[OpTy<'tcx, Borrow>],
26         dest: PlaceTy<'tcx, Borrow>,
27     ) -> EvalResult<'tcx> {
28         if self.emulate_intrinsic(instance, args, dest)? {
29             return Ok(());
30         }
31
32         let substs = instance.substs;
33
34         let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
35         match intrinsic_name {
36             "arith_offset" => {
37                 let offset = self.read_scalar(args[1])?.to_isize(&self)?;
38                 let ptr = self.read_scalar(args[0])?.not_undef()?;
39
40                 let pointee_ty = substs.type_at(0);
41                 let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
42                 let offset = offset.overflowing_mul(pointee_size).0;
43                 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, &self);
44                 self.write_scalar(result_ptr, dest)?;
45             }
46
47             "assume" => {
48                 let cond = self.read_scalar(args[0])?.to_bool()?;
49                 if !cond {
50                     return err!(AssumptionNotHeld);
51                 }
52             }
53
54             "atomic_load" |
55             "atomic_load_relaxed" |
56             "atomic_load_acq" |
57             "volatile_load" => {
58                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
59                 let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
60                 self.write_scalar(val, dest)?;
61             }
62
63             "atomic_store" |
64             "atomic_store_relaxed" |
65             "atomic_store_rel" |
66             "volatile_store" => {
67                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
68                 let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
69                 self.write_scalar(val, ptr.into())?;
70             }
71
72             "atomic_fence_acq" => {
73                 // we are inherently singlethreaded and singlecored, this is a nop
74             }
75
76             _ if intrinsic_name.starts_with("atomic_xchg") => {
77                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
78                 let new = self.read_scalar(args[1])?;
79                 let old = self.read_scalar(ptr.into())?;
80                 self.write_scalar(old, dest)?; // old value is returned
81                 self.write_scalar(new, ptr.into())?;
82             }
83
84             _ if intrinsic_name.starts_with("atomic_cxchg") => {
85                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
86                 let expect_old = self.read_value(args[1])?; // read as value for the sake of `binary_op_val()`
87                 let new = self.read_scalar(args[2])?;
88                 let old = self.read_value(ptr.into())?; // read as value for the sake of `binary_op_val()`
89                 // binary_op_val will bail if either of them is not a scalar
90                 let (eq, _) = self.binary_op_val(mir::BinOp::Eq, old, expect_old)?;
91                 let res = Value::ScalarPair(old.to_scalar_or_undef(), eq.into());
92                 self.write_value(res, dest)?; // old value is returned
93                 // update ptr depending on comparison
94                 if eq.to_bool()? {
95                     self.write_scalar(new, ptr.into())?;
96                 }
97             }
98
99             "atomic_or" |
100             "atomic_or_acq" |
101             "atomic_or_rel" |
102             "atomic_or_acqrel" |
103             "atomic_or_relaxed" |
104             "atomic_xor" |
105             "atomic_xor_acq" |
106             "atomic_xor_rel" |
107             "atomic_xor_acqrel" |
108             "atomic_xor_relaxed" |
109             "atomic_and" |
110             "atomic_and_acq" |
111             "atomic_and_rel" |
112             "atomic_and_acqrel" |
113             "atomic_and_relaxed" |
114             "atomic_xadd" |
115             "atomic_xadd_acq" |
116             "atomic_xadd_rel" |
117             "atomic_xadd_acqrel" |
118             "atomic_xadd_relaxed" |
119             "atomic_xsub" |
120             "atomic_xsub_acq" |
121             "atomic_xsub_rel" |
122             "atomic_xsub_acqrel" |
123             "atomic_xsub_relaxed" => {
124                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
125                 let rhs = self.read_value(args[1])?;
126                 let old = self.read_value(ptr.into())?;
127                 self.write_value(*old, dest)?; // old value is returned
128                 let op = match intrinsic_name.split('_').nth(1).unwrap() {
129                     "or" => mir::BinOp::BitOr,
130                     "xor" => mir::BinOp::BitXor,
131                     "and" => mir::BinOp::BitAnd,
132                     "xadd" => mir::BinOp::Add,
133                     "xsub" => mir::BinOp::Sub,
134                     _ => bug!(),
135                 };
136                 // Atomics wrap around on overflow.
137                 self.binop_ignore_overflow(op, old, rhs, ptr.into())?;
138             }
139
140             "breakpoint" => unimplemented!(), // halt miri
141
142             "copy" |
143             "copy_nonoverlapping" => {
144                 let elem_ty = substs.type_at(0);
145                 let elem_layout = self.layout_of(elem_ty)?;
146                 let elem_size = elem_layout.size.bytes();
147                 let count = self.read_scalar(args[2])?.to_usize(&self)?;
148                 let elem_align = elem_layout.align;
149                 let src = self.read_scalar(args[0])?.not_undef()?;
150                 let dest = self.read_scalar(args[1])?.not_undef()?;
151                 self.memory.copy(
152                     src,
153                     elem_align,
154                     dest,
155                     elem_align,
156                     Size::from_bytes(count * elem_size),
157                     intrinsic_name.ends_with("_nonoverlapping"),
158                 )?;
159             }
160
161             "discriminant_value" => {
162                 let place = self.ref_to_mplace(self.read_value(args[0])?)?;
163                 let discr_val = self.read_discriminant(place.into())?.0;
164                 self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
165             }
166
167             "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
168             "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
169                 let f = self.read_scalar(args[0])?.to_f32()?;
170                 let f = match intrinsic_name {
171                     "sinf32" => f.sin(),
172                     "fabsf32" => f.abs(),
173                     "cosf32" => f.cos(),
174                     "sqrtf32" => f.sqrt(),
175                     "expf32" => f.exp(),
176                     "exp2f32" => f.exp2(),
177                     "logf32" => f.ln(),
178                     "log10f32" => f.log10(),
179                     "log2f32" => f.log2(),
180                     "floorf32" => f.floor(),
181                     "ceilf32" => f.ceil(),
182                     "truncf32" => f.trunc(),
183                     _ => bug!(),
184                 };
185                 self.write_scalar(Scalar::from_f32(f), dest)?;
186             }
187
188             "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
189             "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
190                 let f = self.read_scalar(args[0])?.to_f64()?;
191                 let f = match intrinsic_name {
192                     "sinf64" => f.sin(),
193                     "fabsf64" => f.abs(),
194                     "cosf64" => f.cos(),
195                     "sqrtf64" => f.sqrt(),
196                     "expf64" => f.exp(),
197                     "exp2f64" => f.exp2(),
198                     "logf64" => f.ln(),
199                     "log10f64" => f.log10(),
200                     "log2f64" => f.log2(),
201                     "floorf64" => f.floor(),
202                     "ceilf64" => f.ceil(),
203                     "truncf64" => f.trunc(),
204                     _ => bug!(),
205                 };
206                 self.write_scalar(Scalar::from_f64(f), dest)?;
207             }
208
209             "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
210                 let a = self.read_value(args[0])?;
211                 let b = self.read_value(args[1])?;
212                 let op = match intrinsic_name {
213                     "fadd_fast" => mir::BinOp::Add,
214                     "fsub_fast" => mir::BinOp::Sub,
215                     "fmul_fast" => mir::BinOp::Mul,
216                     "fdiv_fast" => mir::BinOp::Div,
217                     "frem_fast" => mir::BinOp::Rem,
218                     _ => bug!(),
219                 };
220                 self.binop_ignore_overflow(op, a, b, dest)?;
221             }
222
223             "exact_div" => {
224                 // Performs an exact division, resulting in undefined behavior where
225                 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
226                 let a = self.read_value(args[0])?;
227                 let b = self.read_value(args[1])?;
228                 // check x % y != 0
229                 if !self.binary_op_val(mir::BinOp::Rem, a, b)?.0.is_null() {
230                     return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
231                 }
232                 self.binop_ignore_overflow(mir::BinOp::Div, a, b, dest)?;
233             },
234
235             "likely" | "unlikely" | "forget" => {}
236
237             "init" => {
238                 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
239                 // but we also do not want to create a new allocation with 0s and then copy that over.
240                 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
241                 // However, this only affects direct calls of the intrinsic; calls to the stable
242                 // functions wrapping them do get their validation.
243                 if !dest.layout.is_zst() { // nothing to do for ZST
244                     match dest.layout.abi {
245                         layout::Abi::Scalar(ref s) => {
246                             let x = Scalar::from_int(0, s.value.size(&self));
247                             self.write_value(Value::Scalar(x.into()), dest)?;
248                         }
249                         layout::Abi::ScalarPair(ref s1, ref s2) => {
250                             let x = Scalar::from_int(0, s1.value.size(&self));
251                             let y = Scalar::from_int(0, s2.value.size(&self));
252                             self.write_value(Value::ScalarPair(x.into(), y.into()), dest)?;
253                         }
254                         _ => {
255                             // Do it in memory
256                             let mplace = self.force_allocation(dest)?;
257                             assert!(mplace.meta.is_none());
258                             self.memory.write_repeat(mplace.ptr, 0, dest.layout.size)?;
259                         }
260                     }
261                 }
262             }
263
264             "pref_align_of" => {
265                 let ty = substs.type_at(0);
266                 let layout = self.layout_of(ty)?;
267                 let align = layout.align.pref();
268                 let ptr_size = self.pointer_size();
269                 let align_val = Scalar::from_uint(align as u128, ptr_size);
270                 self.write_scalar(align_val, dest)?;
271             }
272
273             "move_val_init" => {
274                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
275                 self.copy_op(args[1], ptr.into())?;
276             }
277
278             "offset" => {
279                 let offset = self.read_scalar(args[1])?.to_isize(&self)?;
280                 let ptr = self.read_scalar(args[0])?.not_undef()?;
281                 let result_ptr = self.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
282                 self.write_scalar(result_ptr, dest)?;
283             }
284
285             "powf32" => {
286                 let f = self.read_scalar(args[0])?.to_f32()?;
287                 let f2 = self.read_scalar(args[1])?.to_f32()?;
288                 self.write_scalar(
289                     Scalar::from_f32(f.powf(f2)),
290                     dest,
291                 )?;
292             }
293
294             "powf64" => {
295                 let f = self.read_scalar(args[0])?.to_f64()?;
296                 let f2 = self.read_scalar(args[1])?.to_f64()?;
297                 self.write_scalar(
298                     Scalar::from_f64(f.powf(f2)),
299                     dest,
300                 )?;
301             }
302
303             "fmaf32" => {
304                 let a = self.read_scalar(args[0])?.to_f32()?;
305                 let b = self.read_scalar(args[1])?.to_f32()?;
306                 let c = self.read_scalar(args[2])?.to_f32()?;
307                 self.write_scalar(
308                     Scalar::from_f32(a * b + c),
309                     dest,
310                 )?;
311             }
312
313             "fmaf64" => {
314                 let a = self.read_scalar(args[0])?.to_f64()?;
315                 let b = self.read_scalar(args[1])?.to_f64()?;
316                 let c = self.read_scalar(args[2])?.to_f64()?;
317                 self.write_scalar(
318                     Scalar::from_f64(a * b + c),
319                     dest,
320                 )?;
321             }
322
323             "powif32" => {
324                 let f = self.read_scalar(args[0])?.to_f32()?;
325                 let i = self.read_scalar(args[1])?.to_i32()?;
326                 self.write_scalar(
327                     Scalar::from_f32(f.powi(i)),
328                     dest,
329                 )?;
330             }
331
332             "powif64" => {
333                 let f = self.read_scalar(args[0])?.to_f64()?;
334                 let i = self.read_scalar(args[1])?.to_i32()?;
335                 self.write_scalar(
336                     Scalar::from_f64(f.powi(i)),
337                     dest,
338                 )?;
339             }
340
341             "size_of_val" => {
342                 let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
343                 let (size, _) = self.size_and_align_of_mplace(mplace)?
344                     .expect("size_of_val called on extern type");
345                 let ptr_size = self.pointer_size();
346                 self.write_scalar(
347                     Scalar::from_uint(size.bytes() as u128, ptr_size),
348                     dest,
349                 )?;
350             }
351
352             "min_align_of_val" |
353             "align_of_val" => {
354                 let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
355                 let (_, align) = self.size_and_align_of_mplace(mplace)?
356                     .expect("size_of_val called on extern type");
357                 let ptr_size = self.pointer_size();
358                 self.write_scalar(
359                     Scalar::from_uint(align.abi(), ptr_size),
360                     dest,
361                 )?;
362             }
363
364             "type_name" => {
365                 let ty = substs.type_at(0);
366                 let ty_name = ty.to_string();
367                 let value = self.str_to_value(&ty_name)?;
368                 self.write_value(value, dest)?;
369             }
370
371             "unchecked_div" => {
372                 let l = self.read_value(args[0])?;
373                 let r = self.read_value(args[1])?;
374                 let rval = r.to_scalar()?.to_bytes()?;
375                 if rval == 0 {
376                     return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
377                 }
378                 self.binop_ignore_overflow(
379                     mir::BinOp::Div,
380                     l,
381                     r,
382                     dest,
383                 )?;
384             }
385
386             "unchecked_rem" => {
387                 let l = self.read_value(args[0])?;
388                 let r = self.read_value(args[1])?;
389                 let rval = r.to_scalar()?.to_bytes()?;
390                 if rval == 0 {
391                     return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
392                 }
393                 self.binop_ignore_overflow(
394                     mir::BinOp::Rem,
395                     l,
396                     r,
397                     dest,
398                 )?;
399             }
400
401             "uninit" => {
402                 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
403                 // but we also do not want to create a new allocation with 0s and then copy that over.
404                 // FIXME: We do not properly validate in case of ZSTs and when doing it in memory!
405                 // However, this only affects direct calls of the intrinsic; calls to the stable
406                 // functions wrapping them do get their validation.
407                 if !dest.layout.is_zst() { // nothing to do for ZST
408                     match dest.layout.abi {
409                         layout::Abi::Scalar(..) => {
410                             let x = ScalarMaybeUndef::Undef;
411                             self.write_value(Value::Scalar(x), dest)?;
412                         }
413                         layout::Abi::ScalarPair(..) => {
414                             let x = ScalarMaybeUndef::Undef;
415                             self.write_value(Value::ScalarPair(x, x), dest)?;
416                         }
417                         _ => {
418                             // Do it in memory
419                             let mplace = self.force_allocation(dest)?;
420                             assert!(mplace.meta.is_none());
421                             self.memory.mark_definedness(mplace.ptr.to_ptr()?, dest.layout.size, false)?;
422                         }
423                     }
424                 }
425             }
426
427             "write_bytes" => {
428                 let ty = substs.type_at(0);
429                 let ty_layout = self.layout_of(ty)?;
430                 let val_byte = self.read_scalar(args[1])?.to_u8()?;
431                 let ptr = self.read_scalar(args[0])?.not_undef()?;
432                 let count = self.read_scalar(args[2])?.to_usize(&self)?;
433                 self.memory.check_align(ptr, ty_layout.align)?;
434                 self.memory.write_repeat(ptr, val_byte, ty_layout.size * count)?;
435             }
436
437             name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
438         }
439
440         Ok(())
441     }
442 }