]> git.lizzy.rs Git - rust.git/blob - src/intrinsic.rs
cd953ba7c569351e5ba2ea39f975118a622895a5
[rust.git] / src / intrinsic.rs
1 use rustc::mir;
2 use rustc::ty::layout::{TyLayout, LayoutOf, Size, Primitive, Integer::*};
3 use rustc::ty;
4
5 use rustc::mir::interpret::{EvalResult, Scalar, Value, ScalarMaybeUndef};
6 use rustc_mir::interpret::{Place, PlaceExtra, HasMemory, EvalContext, ValTy};
7
8 use helpers::EvalContextExt as HelperEvalContextExt;
9
10 use super::ScalarExt;
11
12 pub trait EvalContextExt<'tcx> {
13     fn call_intrinsic(
14         &mut self,
15         instance: ty::Instance<'tcx>,
16         args: &[ValTy<'tcx>],
17         dest: Place,
18         dest_layout: TyLayout<'tcx>,
19         target: mir::BasicBlock,
20     ) -> EvalResult<'tcx>;
21 }
22
23 impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
24     fn call_intrinsic(
25         &mut self,
26         instance: ty::Instance<'tcx>,
27         args: &[ValTy<'tcx>],
28         dest: Place,
29         dest_layout: TyLayout<'tcx>,
30         target: mir::BasicBlock,
31     ) -> EvalResult<'tcx> {
32         let substs = instance.substs;
33
34         let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
35         match intrinsic_name {
36             "add_with_overflow" => {
37                 self.intrinsic_with_overflow(
38                     mir::BinOp::Add,
39                     args[0],
40                     args[1],
41                     dest,
42                     dest_layout.ty,
43                 )?
44             }
45
46             "sub_with_overflow" => {
47                 self.intrinsic_with_overflow(
48                     mir::BinOp::Sub,
49                     args[0],
50                     args[1],
51                     dest,
52                     dest_layout.ty,
53                 )?
54             }
55
56             "mul_with_overflow" => {
57                 self.intrinsic_with_overflow(
58                     mir::BinOp::Mul,
59                     args[0],
60                     args[1],
61                     dest,
62                     dest_layout.ty,
63                 )?
64             }
65
66             "arith_offset" => {
67                 let offset = self.value_to_isize(args[1])?;
68                 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
69                 let result_ptr = self.wrapping_pointer_offset(ptr, substs.type_at(0), offset)?;
70                 self.write_ptr(dest, result_ptr, dest_layout.ty)?;
71             }
72
73             "assume" => {
74                 let cond = self.value_to_scalar(args[0])?.to_bool()?;
75                 if !cond {
76                     return err!(AssumptionNotHeld);
77                 }
78             }
79
80             "atomic_load" |
81             "atomic_load_relaxed" |
82             "atomic_load_acq" |
83             "volatile_load" => {
84                 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
85                 let align = self.layout_of(args[0].ty)?.align;
86
87                 let valty = ValTy {
88                     value: Value::ByRef(ptr, align),
89                     ty: substs.type_at(0),
90                 };
91                 self.write_value(valty, dest)?;
92             }
93
94             "atomic_store" |
95             "atomic_store_relaxed" |
96             "atomic_store_rel" |
97             "volatile_store" => {
98                 let ty = substs.type_at(0);
99                 let align = self.layout_of(ty)?.align;
100                 let dest = self.into_ptr(args[0].value)?.unwrap_or_err()?;
101                 self.write_value_to_ptr(args[1].value, dest, align, ty)?;
102             }
103
104             "atomic_fence_acq" => {
105                 // we are inherently singlethreaded and singlecored, this is a nop
106             }
107
108             _ if intrinsic_name.starts_with("atomic_xchg") => {
109                 let ty = substs.type_at(0);
110                 let align = self.layout_of(ty)?.align;
111                 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
112                 let change = self.value_to_scalar(args[1])?;
113                 let old = self.read_value(ptr, align, ty)?;
114                 let old = match old {
115                     Value::Scalar(val) => val,
116                     Value::ByRef { .. } => bug!("just read the value, can't be byref"),
117                     Value::ScalarPair(..) => bug!("atomic_xchg doesn't work with nonprimitives"),
118                 };
119                 self.write_scalar(dest, old, ty)?;
120                 self.write_scalar(
121                     Place::from_scalar_ptr(ptr.into(), align),
122                     change,
123                     ty,
124                 )?;
125             }
126
127             _ if intrinsic_name.starts_with("atomic_cxchg") => {
128                 let ty = substs.type_at(0);
129                 let align = self.layout_of(ty)?.align;
130                 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
131                 let expect_old = self.value_to_scalar(args[1])?;
132                 let change = self.value_to_scalar(args[2])?;
133                 let old = self.read_value(ptr, align, ty)?;
134                 let old = match old {
135                     Value::Scalar(val) => val.unwrap_or_err()?,
136                     Value::ByRef { .. } => bug!("just read the value, can't be byref"),
137                     Value::ScalarPair(..) => bug!("atomic_cxchg doesn't work with nonprimitives"),
138                 };
139                 let (val, _) = self.binary_op(mir::BinOp::Eq, old, ty, expect_old, ty)?;
140                 let valty = ValTy {
141                     value: Value::ScalarPair(old.into(), val.into()),
142                     ty: dest_layout.ty,
143                 };
144                 self.write_value(valty, dest)?;
145                 self.write_scalar(
146                     Place::from_scalar_ptr(ptr.into(), dest_layout.align),
147                     change,
148                     ty,
149                 )?;
150             }
151
152             "atomic_or" |
153             "atomic_or_acq" |
154             "atomic_or_rel" |
155             "atomic_or_acqrel" |
156             "atomic_or_relaxed" |
157             "atomic_xor" |
158             "atomic_xor_acq" |
159             "atomic_xor_rel" |
160             "atomic_xor_acqrel" |
161             "atomic_xor_relaxed" |
162             "atomic_and" |
163             "atomic_and_acq" |
164             "atomic_and_rel" |
165             "atomic_and_acqrel" |
166             "atomic_and_relaxed" |
167             "atomic_xadd" |
168             "atomic_xadd_acq" |
169             "atomic_xadd_rel" |
170             "atomic_xadd_acqrel" |
171             "atomic_xadd_relaxed" |
172             "atomic_xsub" |
173             "atomic_xsub_acq" |
174             "atomic_xsub_rel" |
175             "atomic_xsub_acqrel" |
176             "atomic_xsub_relaxed" => {
177                 let ty = substs.type_at(0);
178                 let align = self.layout_of(ty)?.align;
179                 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
180                 let change = self.value_to_scalar(args[1])?;
181                 let old = self.read_value(ptr, align, ty)?;
182                 let old = match old {
183                     Value::Scalar(val) => val,
184                     Value::ByRef { .. } => bug!("just read the value, can't be byref"),
185                     Value::ScalarPair(..) => {
186                         bug!("atomic_xadd_relaxed doesn't work with nonprimitives")
187                     }
188                 };
189                 self.write_scalar(dest, old, ty)?;
190                 let op = match intrinsic_name.split('_').nth(1).unwrap() {
191                     "or" => mir::BinOp::BitOr,
192                     "xor" => mir::BinOp::BitXor,
193                     "and" => mir::BinOp::BitAnd,
194                     "xadd" => mir::BinOp::Add,
195                     "xsub" => mir::BinOp::Sub,
196                     _ => bug!(),
197                 };
198                 // FIXME: what do atomics do on overflow?
199                 let (val, _) = self.binary_op(op, old.unwrap_or_err()?, ty, change, ty)?;
200                 self.write_scalar(Place::from_scalar_ptr(ptr.into(), dest_layout.align), val, ty)?;
201             }
202
203             "breakpoint" => unimplemented!(), // halt miri
204
205             "copy" |
206             "copy_nonoverlapping" => {
207                 let elem_ty = substs.type_at(0);
208                 let elem_layout = self.layout_of(elem_ty)?;
209                 let elem_size = elem_layout.size.bytes();
210                 let count = self.value_to_usize(args[2])?;
211                 if count * elem_size != 0 {
212                     // TODO: We do not even validate alignment for the 0-bytes case.  libstd relies on this in vec::IntoIter::next.
213                     // Also see the write_bytes intrinsic.
214                     let elem_align = elem_layout.align;
215                     let src = self.into_ptr(args[0].value)?.unwrap_or_err()?;
216                     let dest = self.into_ptr(args[1].value)?.unwrap_or_err()?;
217                     self.memory.copy(
218                         src,
219                         elem_align,
220                         dest,
221                         elem_align,
222                         Size::from_bytes(count * elem_size),
223                         intrinsic_name.ends_with("_nonoverlapping"),
224                     )?;
225                 }
226             }
227
228             "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
229                 let ty = substs.type_at(0);
230                 let num = self.value_to_scalar(args[0])?.to_bytes()?;
231                 let kind = match self.layout_of(ty)?.abi {
232                     ty::layout::Abi::Scalar(ref scalar) => scalar.value,
233                     _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?,
234                 };
235                 let num = if intrinsic_name.ends_with("_nonzero") {
236                     if num == 0 {
237                         return err!(Intrinsic(format!("{} called on 0", intrinsic_name)));
238                     }
239                     numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)?
240                 } else {
241                     numeric_intrinsic(intrinsic_name, num, kind)?
242                 };
243                 self.write_scalar(dest, num, ty)?;
244             }
245
246             "discriminant_value" => {
247                 let ty = substs.type_at(0);
248                 let layout = self.layout_of(ty)?;
249                 let adt_ptr = self.into_ptr(args[0].value)?;
250                 let adt_align = self.layout_of(args[0].ty)?.align;
251                 let place = Place::from_scalar_ptr(adt_ptr, adt_align);
252                 let discr_val = self.read_discriminant_value(place, layout)?;
253                 self.write_scalar(dest, Scalar::from_uint(discr_val, dest_layout.size), dest_layout.ty)?;
254             }
255
256             "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
257             "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
258                 let f = self.value_to_scalar(args[0])?.to_bytes()?;
259                 let f = f32::from_bits(f as u32);
260                 let f = match intrinsic_name {
261                     "sinf32" => f.sin(),
262                     "fabsf32" => f.abs(),
263                     "cosf32" => f.cos(),
264                     "sqrtf32" => f.sqrt(),
265                     "expf32" => f.exp(),
266                     "exp2f32" => f.exp2(),
267                     "logf32" => f.ln(),
268                     "log10f32" => f.log10(),
269                     "log2f32" => f.log2(),
270                     "floorf32" => f.floor(),
271                     "ceilf32" => f.ceil(),
272                     "truncf32" => f.trunc(),
273                     _ => bug!(),
274                 };
275                 self.write_scalar(dest, Scalar::from_f32(f), dest_layout.ty)?;
276             }
277
278             "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
279             "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
280                 let f = self.value_to_scalar(args[0])?.to_bytes()?;
281                 let f = f64::from_bits(f as u64);
282                 let f = match intrinsic_name {
283                     "sinf64" => f.sin(),
284                     "fabsf64" => f.abs(),
285                     "cosf64" => f.cos(),
286                     "sqrtf64" => f.sqrt(),
287                     "expf64" => f.exp(),
288                     "exp2f64" => f.exp2(),
289                     "logf64" => f.ln(),
290                     "log10f64" => f.log10(),
291                     "log2f64" => f.log2(),
292                     "floorf64" => f.floor(),
293                     "ceilf64" => f.ceil(),
294                     "truncf64" => f.trunc(),
295                     _ => bug!(),
296                 };
297                 self.write_scalar(dest, Scalar::from_f64(f), dest_layout.ty)?;
298             }
299
300             "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
301                 let ty = substs.type_at(0);
302                 let a = self.value_to_scalar(args[0])?;
303                 let b = self.value_to_scalar(args[1])?;
304                 let op = match intrinsic_name {
305                     "fadd_fast" => mir::BinOp::Add,
306                     "fsub_fast" => mir::BinOp::Sub,
307                     "fmul_fast" => mir::BinOp::Mul,
308                     "fdiv_fast" => mir::BinOp::Div,
309                     "frem_fast" => mir::BinOp::Rem,
310                     _ => bug!(),
311                 };
312                 let result = self.binary_op(op, a, ty, b, ty)?;
313                 self.write_scalar(dest, result.0, dest_layout.ty)?;
314             }
315
316             "exact_div" => {
317                 // Performs an exact division, resulting in undefined behavior where
318                 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
319                 let ty = substs.type_at(0);
320                 let a = self.value_to_scalar(args[0])?;
321                 let b = self.value_to_scalar(args[1])?;
322                 // check x % y != 0
323                 if !self.binary_op(mir::BinOp::Rem, a, ty, b, ty)?.0.is_null() {
324                     return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
325                 }
326                 let result = self.binary_op(mir::BinOp::Div, a, ty, b, ty)?;
327                 self.write_scalar(dest, result.0, dest_layout.ty)?;
328             },
329
330             "likely" | "unlikely" | "forget" => {}
331
332             "init" => {
333                 // we don't want to force an allocation in case the destination is a simple value
334                 match dest {
335                     Place::Local { frame, local } => {
336                         match self.stack()[frame].locals[local].access()? {
337                             Value::ByRef(ptr, _) => {
338                                 // These writes have no alignment restriction anyway.
339                                 self.memory.write_repeat(ptr, 0, dest_layout.size)?;
340                             }
341                             Value::Scalar(_) => self.write_value(ValTy { value: Value::Scalar(Scalar::null(dest_layout.size).into()), ty: dest_layout.ty }, dest)?,
342                             Value::ScalarPair(..) => {
343                                 self.write_value(ValTy { value: Value::ScalarPair(Scalar::null(dest_layout.size).into(), Scalar::null(dest_layout.size).into()), ty: dest_layout.ty }, dest)?;
344                             }
345                         }
346                     },
347                     Place::Ptr {
348                         ptr,
349                         align: _align,
350                         extra: PlaceExtra::None,
351                     } => self.memory.write_repeat(ptr.unwrap_or_err()?, 0, dest_layout.size)?,
352                     Place::Ptr { .. } => {
353                         bug!("init intrinsic tried to write to fat or unaligned ptr target")
354                     }
355                 }
356             }
357
358             "min_align_of" => {
359                 let elem_ty = substs.type_at(0);
360                 let elem_align = self.layout_of(elem_ty)?.align.abi();
361                 let ptr_size = self.memory.pointer_size();
362                 let align_val = Scalar::from_uint(elem_align as u128, ptr_size);
363                 self.write_scalar(dest, align_val, dest_layout.ty)?;
364             }
365
366             "pref_align_of" => {
367                 let ty = substs.type_at(0);
368                 let layout = self.layout_of(ty)?;
369                 let align = layout.align.pref();
370                 let ptr_size = self.memory.pointer_size();
371                 let align_val = Scalar::from_uint(align as u128, ptr_size);
372                 self.write_scalar(dest, align_val, dest_layout.ty)?;
373             }
374
375             "move_val_init" => {
376                 let ty = substs.type_at(0);
377                 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
378                 let align = self.layout_of(args[0].ty)?.align;
379                 self.write_value_to_ptr(args[1].value, ptr, align, ty)?;
380             }
381
382             "needs_drop" => {
383                 let ty = substs.type_at(0);
384                 let env = ty::ParamEnv::reveal_all();
385                 let needs_drop = ty.needs_drop(self.tcx.tcx, env);
386                 self.write_scalar(
387                     dest,
388                     Scalar::from_bool(needs_drop),
389                     dest_layout.ty,
390                 )?;
391             }
392
393             "offset" => {
394                 let offset = self.value_to_isize(args[1])?;
395                 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
396                 let result_ptr = self.pointer_offset(ptr, substs.type_at(0), offset)?;
397                 self.write_ptr(dest, result_ptr, dest_layout.ty)?;
398             }
399
400             "overflowing_sub" => {
401                 self.intrinsic_overflowing(
402                     mir::BinOp::Sub,
403                     args[0],
404                     args[1],
405                     dest,
406                     dest_layout.ty,
407                 )?;
408             }
409
410             "overflowing_mul" => {
411                 self.intrinsic_overflowing(
412                     mir::BinOp::Mul,
413                     args[0],
414                     args[1],
415                     dest,
416                     dest_layout.ty,
417                 )?;
418             }
419
420             "overflowing_add" => {
421                 self.intrinsic_overflowing(
422                     mir::BinOp::Add,
423                     args[0],
424                     args[1],
425                     dest,
426                     dest_layout.ty,
427                 )?;
428             }
429
430             "powf32" => {
431                 let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
432                 let f = f32::from_bits(f as u32);
433                 let f2 = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(32))?;
434                 let f2 = f32::from_bits(f2 as u32);
435                 self.write_scalar(
436                     dest,
437                     Scalar::from_f32(f.powf(f2)),
438                     dest_layout.ty,
439                 )?;
440             }
441
442             "powf64" => {
443                 let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
444                 let f = f64::from_bits(f as u64);
445                 let f2 = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(64))?;
446                 let f2 = f64::from_bits(f2 as u64);
447                 self.write_scalar(
448                     dest,
449                     Scalar::from_f64(f.powf(f2)),
450                     dest_layout.ty,
451                 )?;
452             }
453
454             "fmaf32" => {
455                 let a = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
456                 let a = f32::from_bits(a as u32);
457                 let b = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(32))?;
458                 let b = f32::from_bits(b as u32);
459                 let c = self.value_to_scalar(args[2])?.to_bits(Size::from_bits(32))?;
460                 let c = f32::from_bits(c as u32);
461                 self.write_scalar(
462                     dest,
463                     Scalar::from_f32(a * b + c),
464                     dest_layout.ty,
465                 )?;
466             }
467
468             "fmaf64" => {
469                 let a = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
470                 let a = f64::from_bits(a as u64);
471                 let b = self.value_to_scalar(args[1])?.to_bits(Size::from_bits(64))?;
472                 let b = f64::from_bits(b as u64);
473                 let c = self.value_to_scalar(args[2])?.to_bits(Size::from_bits(64))?;
474                 let c = f64::from_bits(c as u64);
475                 self.write_scalar(
476                     dest,
477                     Scalar::from_f64(a * b + c),
478                     dest_layout.ty,
479                 )?;
480             }
481
482             "powif32" => {
483                 let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(32))?;
484                 let f = f32::from_bits(f as u32);
485                 let i = self.value_to_i32(args[1])?;
486                 self.write_scalar(
487                     dest,
488                     Scalar::from_f32(f.powi(i)),
489                     dest_layout.ty,
490                 )?;
491             }
492
493             "powif64" => {
494                 let f = self.value_to_scalar(args[0])?.to_bits(Size::from_bits(64))?;
495                 let f = f64::from_bits(f as u64);
496                 let i = self.value_to_i32(args[1])?;
497                 self.write_scalar(
498                     dest,
499                     Scalar::from_f64(f.powi(i)),
500                     dest_layout.ty,
501                 )?;
502             }
503
504             "size_of" => {
505                 let ty = substs.type_at(0);
506                 let size = self.layout_of(ty)?.size.bytes();
507                 let ptr_size = self.memory.pointer_size();
508                 self.write_scalar(dest, Scalar::from_uint(size, ptr_size), dest_layout.ty)?;
509             }
510
511             "size_of_val" => {
512                 let ty = substs.type_at(0);
513                 let (size, _) = self.size_and_align_of_dst(ty, args[0].value)?;
514                 let ptr_size = self.memory.pointer_size();
515                 self.write_scalar(
516                     dest,
517                     Scalar::from_uint(size.bytes() as u128, ptr_size),
518                     dest_layout.ty,
519                 )?;
520             }
521
522             "min_align_of_val" |
523             "align_of_val" => {
524                 let ty = substs.type_at(0);
525                 let (_, align) = self.size_and_align_of_dst(ty, args[0].value)?;
526                 let ptr_size = self.memory.pointer_size();
527                 self.write_scalar(
528                     dest,
529                     Scalar::from_uint(align.abi(), ptr_size),
530                     dest_layout.ty,
531                 )?;
532             }
533
534             "type_name" => {
535                 let ty = substs.type_at(0);
536                 let ty_name = ty.to_string();
537                 let value = self.str_to_value(&ty_name)?;
538                 self.write_value(ValTy { value, ty: dest_layout.ty }, dest)?;
539             }
540             "type_id" => {
541                 let ty = substs.type_at(0);
542                 let n = self.tcx.type_id_hash(ty);
543                 self.write_scalar(dest, Scalar::Bits { bits: n as u128, size: 8 }, dest_layout.ty)?;
544             }
545
546             "transmute" => {
547                 let src_ty = substs.type_at(0);
548                 let _src_align = self.layout_of(src_ty)?.align;
549                 let ptr = self.force_allocation(dest)?.to_ptr()?;
550                 let dest_align = self.layout_of(substs.type_at(1))?.align;
551                 self.write_value_to_ptr(args[0].value, ptr.into(), dest_align, src_ty).unwrap();
552             }
553
554             "unchecked_shl" => {
555                 let bits = dest_layout.size.bytes() as u128 * 8;
556                 let rhs = self.value_to_scalar(args[1])?
557                     .to_bytes()?;
558                 if rhs >= bits {
559                     return err!(Intrinsic(
560                         format!("Overflowing shift by {} in unchecked_shl", rhs),
561                     ));
562                 }
563                 self.intrinsic_overflowing(
564                     mir::BinOp::Shl,
565                     args[0],
566                     args[1],
567                     dest,
568                     dest_layout.ty,
569                 )?;
570             }
571
572             "unchecked_shr" => {
573                 let bits = dest_layout.size.bytes() as u128 * 8;
574                 let rhs = self.value_to_scalar(args[1])?
575                     .to_bytes()?;
576                 if rhs >= bits {
577                     return err!(Intrinsic(
578                         format!("Overflowing shift by {} in unchecked_shr", rhs),
579                     ));
580                 }
581                 self.intrinsic_overflowing(
582                     mir::BinOp::Shr,
583                     args[0],
584                     args[1],
585                     dest,
586                     dest_layout.ty,
587                 )?;
588             }
589
590             "unchecked_div" => {
591                 let rhs = self.value_to_scalar(args[1])?
592                     .to_bytes()?;
593                 if rhs == 0 {
594                     return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
595                 }
596                 self.intrinsic_overflowing(
597                     mir::BinOp::Div,
598                     args[0],
599                     args[1],
600                     dest,
601                     dest_layout.ty,
602                 )?;
603             }
604
605             "unchecked_rem" => {
606                 let rhs = self.value_to_scalar(args[1])?
607                     .to_bytes()?;
608                 if rhs == 0 {
609                     return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
610                 }
611                 self.intrinsic_overflowing(
612                     mir::BinOp::Rem,
613                     args[0],
614                     args[1],
615                     dest,
616                     dest_layout.ty,
617                 )?;
618             }
619
620             "uninit" => {
621                 // we don't want to force an allocation in case the destination is a simple value
622                 match dest {
623                     Place::Local { frame, local } => {
624                         match self.stack()[frame].locals[local].access()? {
625                             Value::ByRef(ptr, _) => {
626                                 // These writes have no alignment restriction anyway.
627                                 self.memory.mark_definedness(ptr, dest_layout.size, false)?;
628                             }
629                             Value::Scalar(_) => self.write_value(ValTy { value: Value::Scalar(ScalarMaybeUndef::Undef), ty: dest_layout.ty }, dest)?,
630                             Value::ScalarPair(..) => {
631                                 self.write_value(ValTy { value: Value::ScalarPair(ScalarMaybeUndef::Undef, ScalarMaybeUndef::Undef), ty: dest_layout.ty }, dest)?;
632                             }
633                         }
634                     },
635                     Place::Ptr {
636                         ptr,
637                         align: _align,
638                         extra: PlaceExtra::None,
639                     } => self.memory.mark_definedness(ptr.unwrap_or_err()?, dest_layout.size, false)?,
640                     Place::Ptr { .. } => {
641                         bug!("uninit intrinsic tried to write to fat or unaligned ptr target")
642                     }
643                 }
644             }
645
646             "write_bytes" => {
647                 let ty = substs.type_at(0);
648                 let ty_layout = self.layout_of(ty)?;
649                 let val_byte = self.value_to_u8(args[1])?;
650                 let ptr = self.into_ptr(args[0].value)?.unwrap_or_err()?;
651                 let count = self.value_to_usize(args[2])?;
652                 if count > 0 {
653                     // HashMap relies on write_bytes on a NULL ptr with count == 0 to work
654                     // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
655                     self.memory.check_align(ptr, ty_layout.align)?;
656                     self.memory.write_repeat(ptr, val_byte, ty_layout.size * count)?;
657                 }
658             }
659
660             name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
661         }
662
663         self.goto_block(target);
664
665         // Since we pushed no stack frame, the main loop will act
666         // as if the call just completed and it's returning to the
667         // current frame.
668         Ok(())
669     }
670 }
671
672 fn numeric_intrinsic<'tcx>(
673     name: &str,
674     bytes: u128,
675     kind: Primitive,
676 ) -> EvalResult<'tcx, Scalar> {
677     macro_rules! integer_intrinsic {
678         ($method:ident) => ({
679             let (result_bytes, size) = match kind {
680                 Primitive::Int(I8, true) => ((bytes as i8).$method() as u128, 1),
681                 Primitive::Int(I8, false) => ((bytes as u8).$method() as u128, 1),
682                 Primitive::Int(I16, true) => ((bytes as i16).$method() as u128, 2),
683                 Primitive::Int(I16, false) => ((bytes as u16).$method() as u128, 2),
684                 Primitive::Int(I32, true) => ((bytes as i32).$method() as u128, 4),
685                 Primitive::Int(I32, false) => ((bytes as u32).$method() as u128, 4),
686                 Primitive::Int(I64, true) => ((bytes as i64).$method() as u128, 8),
687                 Primitive::Int(I64, false) => ((bytes as u64).$method() as u128, 8),
688                 Primitive::Int(I128, true) => ((bytes as i128).$method() as u128, 16),
689                 Primitive::Int(I128, false) => (bytes.$method() as u128, 16),
690                 _ => bug!("invalid `{}` argument: {:?}", name, bytes),
691             };
692
693             Scalar::from_uint(result_bytes, Size::from_bytes(size))
694         });
695     }
696
697     let result_val = match name {
698         "bswap" => integer_intrinsic!(swap_bytes),
699         "ctlz" => integer_intrinsic!(leading_zeros),
700         "ctpop" => integer_intrinsic!(count_ones),
701         "cttz" => integer_intrinsic!(trailing_zeros),
702         _ => bug!("not a numeric intrinsic: {}", name),
703     };
704
705     Ok(result_val)
706 }