]> git.lizzy.rs Git - rust.git/blob - src/intrinsic.rs
rustup for big refactor; kill most of validation
[rust.git] / src / intrinsic.rs
1 use rustc::mir;
2 use rustc::ty::layout::{self, LayoutOf, Size, Primitive, Integer::*};
3 use rustc::ty;
4
5 use rustc::mir::interpret::{EvalResult, Scalar, ScalarMaybeUndef};
6 use rustc_mir::interpret::{
7     PlaceExtra, PlaceTy, EvalContext, OpTy, Value
8 };
9
10 use super::{ScalarExt, FalibleScalarExt, OperatorEvalContextExt};
11
12 pub trait EvalContextExt<'tcx> {
13     fn call_intrinsic(
14         &mut self,
15         instance: ty::Instance<'tcx>,
16         args: &[OpTy<'tcx>],
17         dest: PlaceTy<'tcx>,
18         target: mir::BasicBlock,
19     ) -> EvalResult<'tcx>;
20 }
21
22 impl<'a, 'mir, 'tcx> EvalContextExt<'tcx> for EvalContext<'a, 'mir, 'tcx, super::Evaluator<'tcx>> {
23     fn call_intrinsic(
24         &mut self,
25         instance: ty::Instance<'tcx>,
26         args: &[OpTy<'tcx>],
27         dest: PlaceTy<'tcx>,
28         target: mir::BasicBlock,
29     ) -> EvalResult<'tcx> {
30         let substs = instance.substs;
31
32         let intrinsic_name = &self.tcx.item_name(instance.def_id()).as_str()[..];
33         match intrinsic_name {
34             "add_with_overflow" => {
35                 let l = self.read_value(args[0])?;
36                 let r = self.read_value(args[1])?;
37                 self.binop_with_overflow(
38                     mir::BinOp::Add,
39                     l,
40                     r,
41                     dest,
42                 )?
43             }
44
45             "sub_with_overflow" => {
46                 let l = self.read_value(args[0])?;
47                 let r = self.read_value(args[1])?;
48                 self.binop_with_overflow(
49                     mir::BinOp::Sub,
50                     l,
51                     r,
52                     dest,
53                 )?
54             }
55
56             "mul_with_overflow" => {
57                 let l = self.read_value(args[0])?;
58                 let r = self.read_value(args[1])?;
59                 self.binop_with_overflow(
60                     mir::BinOp::Mul,
61                     l,
62                     r,
63                     dest,
64                 )?
65             }
66
67             "arith_offset" => {
68                 let offset = self.read_scalar(args[1])?.to_isize(&self)?;
69                 let ptr = self.read_scalar(args[0])?.not_undef()?;
70
71                 let pointee_ty = substs.type_at(0);
72                 let pointee_size = self.layout_of(pointee_ty)?.size.bytes() as i64;
73                 let offset = offset.overflowing_mul(pointee_size).0;
74                 let result_ptr = ptr.ptr_wrapping_signed_offset(offset, &self);
75                 self.write_scalar(result_ptr, dest)?;
76             }
77
78             "assume" => {
79                 let cond = self.read_scalar(args[0])?.to_bool()?;
80                 if !cond {
81                     return err!(AssumptionNotHeld);
82                 }
83             }
84
85             "atomic_load" |
86             "atomic_load_relaxed" |
87             "atomic_load_acq" |
88             "volatile_load" => {
89                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
90                 let val = self.read_scalar(ptr.into())?; // make sure it fits into a scalar; otherwise it cannot be atomic
91                 self.write_scalar(val, dest)?;
92             }
93
94             "atomic_store" |
95             "atomic_store_relaxed" |
96             "atomic_store_rel" |
97             "volatile_store" => {
98                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
99                 let val = self.read_scalar(args[1])?; // make sure it fits into a scalar; otherwise it cannot be atomic
100                 self.write_scalar(val, ptr.into())?;
101             }
102
103             "atomic_fence_acq" => {
104                 // we are inherently singlethreaded and singlecored, this is a nop
105             }
106
107             _ if intrinsic_name.starts_with("atomic_xchg") => {
108                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
109                 let new = self.read_scalar(args[1])?;
110                 let old = self.read_scalar(ptr.into())?;
111                 self.write_scalar(old, dest)?; // old value is returned
112                 self.write_scalar(new, ptr.into())?;
113             }
114
115             _ if intrinsic_name.starts_with("atomic_cxchg") => {
116                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
117                 let expect_old = self.read_value(args[1])?; // read as value for the sake of `binary_op()`
118                 let new = self.read_scalar(args[2])?;
119                 let old = self.read_value(ptr.into())?; // read as value for the sake of `binary_op()`
120                 // binary_op will bail if either of them is not a scalar
121                 let (eq, _) = self.binary_op(mir::BinOp::Eq, old, expect_old)?;
122                 let res = Value::ScalarPair(old.to_scalar_or_undef(), eq.into());
123                 self.write_value(res, dest)?; // old value is returned
124                 // update ptr depending on comparison
125                 if eq.to_bool()? {
126                     self.write_scalar(new, ptr.into())?;
127                 }
128             }
129
130             "atomic_or" |
131             "atomic_or_acq" |
132             "atomic_or_rel" |
133             "atomic_or_acqrel" |
134             "atomic_or_relaxed" |
135             "atomic_xor" |
136             "atomic_xor_acq" |
137             "atomic_xor_rel" |
138             "atomic_xor_acqrel" |
139             "atomic_xor_relaxed" |
140             "atomic_and" |
141             "atomic_and_acq" |
142             "atomic_and_rel" |
143             "atomic_and_acqrel" |
144             "atomic_and_relaxed" |
145             "atomic_xadd" |
146             "atomic_xadd_acq" |
147             "atomic_xadd_rel" |
148             "atomic_xadd_acqrel" |
149             "atomic_xadd_relaxed" |
150             "atomic_xsub" |
151             "atomic_xsub_acq" |
152             "atomic_xsub_rel" |
153             "atomic_xsub_acqrel" |
154             "atomic_xsub_relaxed" => {
155                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
156                 let rhs = self.read_value(args[1])?;
157                 let old = self.read_value(ptr.into())?;
158                 self.write_value(*old, dest)?; // old value is returned
159                 let op = match intrinsic_name.split('_').nth(1).unwrap() {
160                     "or" => mir::BinOp::BitOr,
161                     "xor" => mir::BinOp::BitXor,
162                     "and" => mir::BinOp::BitAnd,
163                     "xadd" => mir::BinOp::Add,
164                     "xsub" => mir::BinOp::Sub,
165                     _ => bug!(),
166                 };
167                 // FIXME: what do atomics do on overflow?
168                 let (val, _) = self.binary_op(op, old, rhs)?;
169                 self.write_scalar(val, ptr.into())?;
170             }
171
172             "breakpoint" => unimplemented!(), // halt miri
173
174             "copy" |
175             "copy_nonoverlapping" => {
176                 let elem_ty = substs.type_at(0);
177                 let elem_layout = self.layout_of(elem_ty)?;
178                 let elem_size = elem_layout.size.bytes();
179                 let count = self.read_scalar(args[2])?.to_usize(&self)?;
180                 if count * elem_size != 0 {
181                     // TODO: We do not even validate alignment for the 0-bytes case.  libstd relies on this in vec::IntoIter::next.
182                     // Also see the write_bytes intrinsic.
183                     let elem_align = elem_layout.align;
184                     let src = self.read_scalar(args[0])?.not_undef()?;
185                     let dest = self.read_scalar(args[1])?.not_undef()?;
186                     self.memory.copy(
187                         src,
188                         elem_align,
189                         dest,
190                         elem_align,
191                         Size::from_bytes(count * elem_size),
192                         intrinsic_name.ends_with("_nonoverlapping"),
193                     )?;
194                 }
195             }
196
197             "ctpop" | "cttz" | "cttz_nonzero" | "ctlz" | "ctlz_nonzero" | "bswap" => {
198                 let ty = substs.type_at(0);
199                 let num = self.read_scalar(args[0])?.to_bytes()?;
200                 let kind = match self.layout_of(ty)?.abi {
201                     ty::layout::Abi::Scalar(ref scalar) => scalar.value,
202                     _ => Err(::rustc::mir::interpret::EvalErrorKind::TypeNotPrimitive(ty))?,
203                 };
204                 let num = if intrinsic_name.ends_with("_nonzero") {
205                     if num == 0 {
206                         return err!(Intrinsic(format!("{} called on 0", intrinsic_name)));
207                     }
208                     numeric_intrinsic(intrinsic_name.trim_right_matches("_nonzero"), num, kind)?
209                 } else {
210                     numeric_intrinsic(intrinsic_name, num, kind)?
211                 };
212                 self.write_scalar(num, dest)?;
213             }
214
215             "discriminant_value" => {
216                 let place = self.ref_to_mplace(self.read_value(args[0])?)?;
217                 let discr_val = self.read_discriminant_value(place.into())?;
218                 self.write_scalar(Scalar::from_uint(discr_val, dest.layout.size), dest)?;
219             }
220
221             "sinf32" | "fabsf32" | "cosf32" | "sqrtf32" | "expf32" | "exp2f32" | "logf32" |
222             "log10f32" | "log2f32" | "floorf32" | "ceilf32" | "truncf32" => {
223                 let f = self.read_scalar(args[0])?.to_bytes()?;
224                 let f = f32::from_bits(f as u32);
225                 let f = match intrinsic_name {
226                     "sinf32" => f.sin(),
227                     "fabsf32" => f.abs(),
228                     "cosf32" => f.cos(),
229                     "sqrtf32" => f.sqrt(),
230                     "expf32" => f.exp(),
231                     "exp2f32" => f.exp2(),
232                     "logf32" => f.ln(),
233                     "log10f32" => f.log10(),
234                     "log2f32" => f.log2(),
235                     "floorf32" => f.floor(),
236                     "ceilf32" => f.ceil(),
237                     "truncf32" => f.trunc(),
238                     _ => bug!(),
239                 };
240                 self.write_scalar(Scalar::from_f32(f), dest)?;
241             }
242
243             "sinf64" | "fabsf64" | "cosf64" | "sqrtf64" | "expf64" | "exp2f64" | "logf64" |
244             "log10f64" | "log2f64" | "floorf64" | "ceilf64" | "truncf64" => {
245                 let f = self.read_scalar(args[0])?.to_bytes()?;
246                 let f = f64::from_bits(f as u64);
247                 let f = match intrinsic_name {
248                     "sinf64" => f.sin(),
249                     "fabsf64" => f.abs(),
250                     "cosf64" => f.cos(),
251                     "sqrtf64" => f.sqrt(),
252                     "expf64" => f.exp(),
253                     "exp2f64" => f.exp2(),
254                     "logf64" => f.ln(),
255                     "log10f64" => f.log10(),
256                     "log2f64" => f.log2(),
257                     "floorf64" => f.floor(),
258                     "ceilf64" => f.ceil(),
259                     "truncf64" => f.trunc(),
260                     _ => bug!(),
261                 };
262                 self.write_scalar(Scalar::from_f64(f), dest)?;
263             }
264
265             "fadd_fast" | "fsub_fast" | "fmul_fast" | "fdiv_fast" | "frem_fast" => {
266                 let a = self.read_value(args[0])?;
267                 let b = self.read_value(args[1])?;
268                 let op = match intrinsic_name {
269                     "fadd_fast" => mir::BinOp::Add,
270                     "fsub_fast" => mir::BinOp::Sub,
271                     "fmul_fast" => mir::BinOp::Mul,
272                     "fdiv_fast" => mir::BinOp::Div,
273                     "frem_fast" => mir::BinOp::Rem,
274                     _ => bug!(),
275                 };
276                 let result = self.binary_op(op, a, b)?;
277                 self.write_scalar(result.0, dest)?;
278             }
279
280             "exact_div" => {
281                 // Performs an exact division, resulting in undefined behavior where
282                 // `x % y != 0` or `y == 0` or `x == T::min_value() && y == -1`
283                 let a = self.read_value(args[0])?;
284                 let b = self.read_value(args[1])?;
285                 // check x % y != 0
286                 if !self.binary_op(mir::BinOp::Rem, a, b)?.0.is_null() {
287                     return err!(ValidationFailure(format!("exact_div: {:?} cannot be divided by {:?}", a, b)));
288                 }
289                 let result = self.binary_op(mir::BinOp::Div, a, b)?;
290                 self.write_scalar(result.0, dest)?;
291             },
292
293             "likely" | "unlikely" | "forget" => {}
294
295             "init" => {
296                 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
297                 // but we also do not want to create a new allocation with 0s and then copy that over.
298                 match dest.layout.abi {
299                     layout::Abi::Scalar(ref s) => {
300                         let x = Scalar::null(s.value.size(&self));
301                         self.write_value(Value::Scalar(x.into()), dest)?;
302                     }
303                     layout::Abi::ScalarPair(ref s1, ref s2) => {
304                         let x = Scalar::null(s1.value.size(&self));
305                         let y = Scalar::null(s2.value.size(&self));
306                         self.write_value(Value::ScalarPair(x.into(), y.into()), dest)?;
307                     }
308                     _ => {
309                         // Do it in memory
310                         let mplace = self.force_allocation(dest)?;
311                         assert_eq!(mplace.extra, PlaceExtra::None);
312                         self.memory.write_repeat(mplace.ptr, 0, dest.layout.size)?;
313                     }
314                 }
315             }
316
317             "min_align_of" => {
318                 let elem_ty = substs.type_at(0);
319                 let elem_align = self.layout_of(elem_ty)?.align.abi();
320                 let ptr_size = self.memory.pointer_size();
321                 let align_val = Scalar::from_uint(elem_align as u128, ptr_size);
322                 self.write_scalar(align_val, dest)?;
323             }
324
325             "pref_align_of" => {
326                 let ty = substs.type_at(0);
327                 let layout = self.layout_of(ty)?;
328                 let align = layout.align.pref();
329                 let ptr_size = self.memory.pointer_size();
330                 let align_val = Scalar::from_uint(align as u128, ptr_size);
331                 self.write_scalar(align_val, dest)?;
332             }
333
334             "move_val_init" => {
335                 let ptr = self.ref_to_mplace(self.read_value(args[0])?)?;
336                 self.copy_op(args[1], ptr.into())?;
337             }
338
339             "needs_drop" => {
340                 let ty = substs.type_at(0);
341                 let env = ty::ParamEnv::reveal_all();
342                 let needs_drop = ty.needs_drop(self.tcx.tcx, env);
343                 self.write_scalar(
344                     Scalar::from_bool(needs_drop),
345                     dest,
346                 )?;
347             }
348
349             "offset" => {
350                 let offset = self.read_scalar(args[1])?.to_isize(&self)?;
351                 let ptr = self.read_scalar(args[0])?.not_undef()?;
352                 let result_ptr = self.pointer_offset_inbounds(ptr, substs.type_at(0), offset)?;
353                 self.write_scalar(result_ptr, dest)?;
354             }
355
356             "overflowing_sub" => {
357                 let l = self.read_value(args[0])?;
358                 let r = self.read_value(args[1])?;
359                 self.binop_ignore_overflow(
360                     mir::BinOp::Sub,
361                     l,
362                     r,
363                     dest,
364                 )?;
365             }
366
367             "overflowing_mul" => {
368                 let l = self.read_value(args[0])?;
369                 let r = self.read_value(args[1])?;
370                 self.binop_ignore_overflow(
371                     mir::BinOp::Mul,
372                     r,
373                     l,
374                     dest,
375                 )?;
376             }
377
378             "overflowing_add" => {
379                 let l = self.read_value(args[0])?;
380                 let r = self.read_value(args[1])?;
381                 self.binop_ignore_overflow(
382                     mir::BinOp::Add,
383                     r,
384                     l,
385                     dest,
386                 )?;
387             }
388
389             "powf32" => {
390                 let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
391                 let f = f32::from_bits(f as u32);
392                 let f2 = self.read_scalar(args[1])?.to_bits(Size::from_bits(32))?;
393                 let f2 = f32::from_bits(f2 as u32);
394                 self.write_scalar(
395                     Scalar::from_f32(f.powf(f2)),
396                     dest,
397                 )?;
398             }
399
400             "powf64" => {
401                 let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
402                 let f = f64::from_bits(f as u64);
403                 let f2 = self.read_scalar(args[1])?.to_bits(Size::from_bits(64))?;
404                 let f2 = f64::from_bits(f2 as u64);
405                 self.write_scalar(
406                     Scalar::from_f64(f.powf(f2)),
407                     dest,
408                 )?;
409             }
410
411             "fmaf32" => {
412                 let a = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
413                 let a = f32::from_bits(a as u32);
414                 let b = self.read_scalar(args[1])?.to_bits(Size::from_bits(32))?;
415                 let b = f32::from_bits(b as u32);
416                 let c = self.read_scalar(args[2])?.to_bits(Size::from_bits(32))?;
417                 let c = f32::from_bits(c as u32);
418                 self.write_scalar(
419                     Scalar::from_f32(a * b + c),
420                     dest,
421                 )?;
422             }
423
424             "fmaf64" => {
425                 let a = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
426                 let a = f64::from_bits(a as u64);
427                 let b = self.read_scalar(args[1])?.to_bits(Size::from_bits(64))?;
428                 let b = f64::from_bits(b as u64);
429                 let c = self.read_scalar(args[2])?.to_bits(Size::from_bits(64))?;
430                 let c = f64::from_bits(c as u64);
431                 self.write_scalar(
432                     Scalar::from_f64(a * b + c),
433                     dest,
434                 )?;
435             }
436
437             "powif32" => {
438                 let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(32))?;
439                 let f = f32::from_bits(f as u32);
440                 let i = self.read_scalar(args[1])?.to_i32()?;
441                 self.write_scalar(
442                     Scalar::from_f32(f.powi(i)),
443                     dest,
444                 )?;
445             }
446
447             "powif64" => {
448                 let f = self.read_scalar(args[0])?.to_bits(Size::from_bits(64))?;
449                 let f = f64::from_bits(f as u64);
450                 let i = self.read_scalar(args[1])?.to_i32()?;
451                 self.write_scalar(
452                     Scalar::from_f64(f.powi(i)),
453                     dest,
454                 )?;
455             }
456
457             "size_of" => {
458                 let ty = substs.type_at(0);
459                 let size = self.layout_of(ty)?.size.bytes();
460                 let ptr_size = self.memory.pointer_size();
461                 self.write_scalar(Scalar::from_uint(size, ptr_size), dest)?;
462             }
463
464             "size_of_val" => {
465                 let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
466                 let (size, _) = self.size_and_align_of_mplace(mplace)?;
467                 let ptr_size = self.memory.pointer_size();
468                 self.write_scalar(
469                     Scalar::from_uint(size.bytes() as u128, ptr_size),
470                     dest,
471                 )?;
472             }
473
474             "min_align_of_val" |
475             "align_of_val" => {
476                 let mplace = self.ref_to_mplace(self.read_value(args[0])?)?;
477                 let (_, align) = self.size_and_align_of_mplace(mplace)?;
478                 let ptr_size = self.memory.pointer_size();
479                 self.write_scalar(
480                     Scalar::from_uint(align.abi(), ptr_size),
481                     dest,
482                 )?;
483             }
484
485             "type_name" => {
486                 let ty = substs.type_at(0);
487                 let ty_name = ty.to_string();
488                 let value = self.str_to_value(&ty_name)?;
489                 self.write_value(value, dest)?;
490             }
491             "type_id" => {
492                 let ty = substs.type_at(0);
493                 let n = self.tcx.type_id_hash(ty);
494                 self.write_scalar(Scalar::Bits { bits: n as u128, size: 8 }, dest)?;
495             }
496
497             "transmute" => {
498                 // Go through an allocation, to make sure the completely different layouts
499                 // do not pose a problem.  (When the user transmutes through a union,
500                 // there will not be a layout mismatch.)
501                 let dest = self.force_allocation(dest)?;
502                 self.copy_op(args[0], dest.into())?;
503             }
504
505             "unchecked_shl" => {
506                 let bits = dest.layout.size.bytes() as u128 * 8;
507                 let l = self.read_value(args[0])?;
508                 let r = self.read_value(args[1])?;
509                 let rval = r.to_scalar()?.to_bytes()?;
510                 if rval >= bits {
511                     return err!(Intrinsic(
512                         format!("Overflowing shift by {} in unchecked_shl", rval),
513                     ));
514                 }
515                 self.binop_ignore_overflow(
516                     mir::BinOp::Shl,
517                     l,
518                     r,
519                     dest,
520                 )?;
521             }
522
523             "unchecked_shr" => {
524                 let bits = dest.layout.size.bytes() as u128 * 8;
525                 let l = self.read_value(args[0])?;
526                 let r = self.read_value(args[1])?;
527                 let rval = r.to_scalar()?.to_bytes()?;
528                 if rval >= bits {
529                     return err!(Intrinsic(
530                         format!("Overflowing shift by {} in unchecked_shr", rval),
531                     ));
532                 }
533                 self.binop_ignore_overflow(
534                     mir::BinOp::Shr,
535                     l,
536                     r,
537                     dest,
538                 )?;
539             }
540
541             "unchecked_div" => {
542                 let l = self.read_value(args[0])?;
543                 let r = self.read_value(args[1])?;
544                 let rval = r.to_scalar()?.to_bytes()?;
545                 if rval == 0 {
546                     return err!(Intrinsic(format!("Division by 0 in unchecked_div")));
547                 }
548                 self.binop_ignore_overflow(
549                     mir::BinOp::Div,
550                     l,
551                     r,
552                     dest,
553                 )?;
554             }
555
556             "unchecked_rem" => {
557                 let l = self.read_value(args[0])?;
558                 let r = self.read_value(args[1])?;
559                 let rval = r.to_scalar()?.to_bytes()?;
560                 if rval == 0 {
561                     return err!(Intrinsic(format!("Division by 0 in unchecked_rem")));
562                 }
563                 self.binop_ignore_overflow(
564                     mir::BinOp::Rem,
565                     l,
566                     r,
567                     dest,
568                 )?;
569             }
570
571             "uninit" => {
572                 // Check fast path: we don't want to force an allocation in case the destination is a simple value,
573                 // but we also do not want to create a new allocation with 0s and then copy that over.
574                 match dest.layout.abi {
575                     layout::Abi::Scalar(..) => {
576                         let x = ScalarMaybeUndef::Undef;
577                         self.write_value(Value::Scalar(x), dest)?;
578                     }
579                     layout::Abi::ScalarPair(..) => {
580                         let x = ScalarMaybeUndef::Undef;
581                         self.write_value(Value::ScalarPair(x, x), dest)?;
582                     }
583                     _ => {
584                         // Do it in memory
585                         let mplace = self.force_allocation(dest)?;
586                         assert_eq!(mplace.extra, PlaceExtra::None);
587                         self.memory.mark_definedness(mplace.ptr, dest.layout.size, false)?;
588                     }
589                 }
590             }
591
592             "write_bytes" => {
593                 let ty = substs.type_at(0);
594                 let ty_layout = self.layout_of(ty)?;
595                 let val_byte = self.read_scalar(args[1])?.to_u8()?;
596                 let ptr = self.read_scalar(args[0])?.not_undef()?;
597                 let count = self.read_scalar(args[2])?.to_usize(&self)?;
598                 if count > 0 {
599                     // HashMap relies on write_bytes on a NULL ptr with count == 0 to work
600                     // TODO: Should we, at least, validate the alignment? (Also see the copy intrinsic)
601                     self.memory.check_align(ptr, ty_layout.align)?;
602                     self.memory.write_repeat(ptr, val_byte, ty_layout.size * count)?;
603                 }
604             }
605
606             name => return err!(Unimplemented(format!("unimplemented intrinsic: {}", name))),
607         }
608
609         self.goto_block(target);
610
611         // Since we pushed no stack frame, the main loop will act
612         // as if the call just completed and it's returning to the
613         // current frame.
614         Ok(())
615     }
616 }
617
618 fn numeric_intrinsic<'tcx>(
619     name: &str,
620     bytes: u128,
621     kind: Primitive,
622 ) -> EvalResult<'tcx, Scalar> {
623     macro_rules! integer_intrinsic {
624         ($method:ident) => ({
625             let (result_bytes, size) = match kind {
626                 Primitive::Int(I8, true) => ((bytes as i8).$method() as u128, 1),
627                 Primitive::Int(I8, false) => ((bytes as u8).$method() as u128, 1),
628                 Primitive::Int(I16, true) => ((bytes as i16).$method() as u128, 2),
629                 Primitive::Int(I16, false) => ((bytes as u16).$method() as u128, 2),
630                 Primitive::Int(I32, true) => ((bytes as i32).$method() as u128, 4),
631                 Primitive::Int(I32, false) => ((bytes as u32).$method() as u128, 4),
632                 Primitive::Int(I64, true) => ((bytes as i64).$method() as u128, 8),
633                 Primitive::Int(I64, false) => ((bytes as u64).$method() as u128, 8),
634                 Primitive::Int(I128, true) => ((bytes as i128).$method() as u128, 16),
635                 Primitive::Int(I128, false) => (bytes.$method() as u128, 16),
636                 _ => bug!("invalid `{}` argument: {:?}", name, bytes),
637             };
638
639             Scalar::from_uint(result_bytes, Size::from_bytes(size))
640         });
641     }
642
643     let result_val = match name {
644         "bswap" => integer_intrinsic!(swap_bytes),
645         "ctlz" => integer_intrinsic!(leading_zeros),
646         "ctpop" => integer_intrinsic!(count_ones),
647         "cttz" => integer_intrinsic!(trailing_zeros),
648         _ => bug!("not a numeric intrinsic: {}", name),
649     };
650
651     Ok(result_val)
652 }