]> git.lizzy.rs Git - rust.git/blob - src/intrinsics/mod.rs
Rename `debugging_opts` to `unstable_opts`
[rust.git] / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 macro_rules! intrinsic_pat {
5     (_) => {
6         _
7     };
8     ($name:ident) => {
9         sym::$name
10     };
11     (kw.$name:ident) => {
12         kw::$name
13     };
14     ($name:literal) => {
15         $name
16     };
17 }
18
19 macro_rules! intrinsic_arg {
20     (o $fx:expr, $arg:ident) => {};
21     (c $fx:expr, $arg:ident) => {
22         let $arg = codegen_operand($fx, $arg);
23     };
24     (v $fx:expr, $arg:ident) => {
25         let $arg = codegen_operand($fx, $arg).load_scalar($fx);
26     };
27 }
28
29 macro_rules! intrinsic_match {
30     ($fx:expr, $intrinsic:expr, $args:expr,
31     _ => $unknown:block;
32     $(
33         $($($name:tt).*)|+ $(if $cond:expr)?, ($($a:ident $arg:ident),*) $content:block;
34     )*) => {
35         match $intrinsic {
36             $(
37                 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
38                     if let [$($arg),*] = $args {
39                         $(intrinsic_arg!($a $fx, $arg);)*
40                         $content
41                     } else {
42                         bug!("wrong number of args for intrinsic {:?}", $intrinsic);
43                     }
44                 }
45             )*
46             _ => $unknown,
47         }
48     }
49 }
50
51 mod cpuid;
52 mod llvm;
53 mod simd;
54
55 pub(crate) use cpuid::codegen_cpuid_call;
56 pub(crate) use llvm::codegen_llvm_intrinsic_call;
57
58 use rustc_middle::ty::print::with_no_trimmed_paths;
59 use rustc_middle::ty::subst::SubstsRef;
60 use rustc_span::symbol::{kw, sym, Symbol};
61 use rustc_target::abi::InitKind;
62
63 use crate::prelude::*;
64 use cranelift_codegen::ir::AtomicRmwOp;
65
66 fn report_atomic_type_validation_error<'tcx>(
67     fx: &mut FunctionCx<'_, '_, 'tcx>,
68     intrinsic: Symbol,
69     span: Span,
70     ty: Ty<'tcx>,
71 ) {
72     fx.tcx.sess.span_err(
73         span,
74         &format!(
75             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
76             intrinsic, ty
77         ),
78     );
79     // Prevent verifier error
80     crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
81 }
82
83 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
84     let (element, count) = match layout.abi {
85         Abi::Vector { element, count } => (element, count),
86         _ => unreachable!(),
87     };
88
89     match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
90         // Cranelift currently only implements icmp for 128bit vectors.
91         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
92         _ => None,
93     }
94 }
95
96 fn simd_for_each_lane<'tcx>(
97     fx: &mut FunctionCx<'_, '_, 'tcx>,
98     val: CValue<'tcx>,
99     ret: CPlace<'tcx>,
100     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
101 ) {
102     let layout = val.layout();
103
104     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
105     let lane_layout = fx.layout_of(lane_ty);
106     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
107     let ret_lane_layout = fx.layout_of(ret_lane_ty);
108     assert_eq!(lane_count, ret_lane_count);
109
110     for lane_idx in 0..lane_count {
111         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
112
113         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
114         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
115
116         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
117     }
118 }
119
120 fn simd_pair_for_each_lane<'tcx>(
121     fx: &mut FunctionCx<'_, '_, 'tcx>,
122     x: CValue<'tcx>,
123     y: CValue<'tcx>,
124     ret: CPlace<'tcx>,
125     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
126 ) {
127     assert_eq!(x.layout(), y.layout());
128     let layout = x.layout();
129
130     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
131     let lane_layout = fx.layout_of(lane_ty);
132     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
133     let ret_lane_layout = fx.layout_of(ret_lane_ty);
134     assert_eq!(lane_count, ret_lane_count);
135
136     for lane_idx in 0..lane_count {
137         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
138         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
139
140         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
141         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
142
143         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
144     }
145 }
146
147 fn simd_reduce<'tcx>(
148     fx: &mut FunctionCx<'_, '_, 'tcx>,
149     val: CValue<'tcx>,
150     acc: Option<Value>,
151     ret: CPlace<'tcx>,
152     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
153 ) {
154     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
155     let lane_layout = fx.layout_of(lane_ty);
156     assert_eq!(lane_layout, ret.layout());
157
158     let (mut res_val, start_lane) =
159         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
160     for lane_idx in start_lane..lane_count {
161         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
162         res_val = f(fx, lane_layout.ty, res_val, lane);
163     }
164     let res = CValue::by_val(res_val, lane_layout);
165     ret.write_cvalue(fx, res);
166 }
167
168 // FIXME move all uses to `simd_reduce`
169 fn simd_reduce_bool<'tcx>(
170     fx: &mut FunctionCx<'_, '_, 'tcx>,
171     val: CValue<'tcx>,
172     ret: CPlace<'tcx>,
173     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
174 ) {
175     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
176     assert!(ret.layout().ty.is_bool());
177
178     let res_val = val.value_lane(fx, 0).load_scalar(fx);
179     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
180     for lane_idx in 1..lane_count {
181         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
182         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
183         res_val = f(fx, res_val, lane);
184     }
185     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
186         fx.bcx.ins().ireduce(types::I8, res_val)
187     } else {
188         res_val
189     };
190     let res = CValue::by_val(res_val, ret.layout());
191     ret.write_cvalue(fx, res);
192 }
193
194 fn bool_to_zero_or_max_uint<'tcx>(
195     fx: &mut FunctionCx<'_, '_, 'tcx>,
196     ty: Ty<'tcx>,
197     val: Value,
198 ) -> Value {
199     let ty = fx.clif_type(ty).unwrap();
200
201     let int_ty = match ty {
202         types::F32 => types::I32,
203         types::F64 => types::I64,
204         ty => ty,
205     };
206
207     let val = fx.bcx.ins().bint(int_ty, val);
208     let mut res = fx.bcx.ins().ineg(val);
209
210     if ty.is_float() {
211         res = fx.bcx.ins().bitcast(ty, res);
212     }
213
214     res
215 }
216
217 pub(crate) fn codegen_intrinsic_call<'tcx>(
218     fx: &mut FunctionCx<'_, '_, 'tcx>,
219     instance: Instance<'tcx>,
220     args: &[mir::Operand<'tcx>],
221     destination: CPlace<'tcx>,
222     target: Option<BasicBlock>,
223     source_info: mir::SourceInfo,
224 ) {
225     let intrinsic = fx.tcx.item_name(instance.def_id());
226     let substs = instance.substs;
227
228     let target = if let Some(target) = target {
229         target
230     } else {
231         // Insert non returning intrinsics here
232         match intrinsic {
233             sym::abort => {
234                 fx.bcx.ins().trap(TrapCode::User(0));
235             }
236             sym::transmute => {
237                 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
238             }
239             _ => unimplemented!("unsupported instrinsic {}", intrinsic),
240         }
241         return;
242     };
243
244     if intrinsic.as_str().starts_with("simd_") {
245         self::simd::codegen_simd_intrinsic_call(
246             fx,
247             intrinsic,
248             substs,
249             args,
250             destination,
251             source_info.span,
252         );
253         let ret_block = fx.get_block(target);
254         fx.bcx.ins().jump(ret_block, &[]);
255     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
256         let ret_block = fx.get_block(target);
257         fx.bcx.ins().jump(ret_block, &[]);
258     } else {
259         codegen_regular_intrinsic_call(
260             fx,
261             instance,
262             intrinsic,
263             substs,
264             args,
265             destination,
266             Some(target),
267             source_info,
268         );
269     }
270 }
271
272 fn codegen_float_intrinsic_call<'tcx>(
273     fx: &mut FunctionCx<'_, '_, 'tcx>,
274     intrinsic: Symbol,
275     args: &[mir::Operand<'tcx>],
276     ret: CPlace<'tcx>,
277 ) -> bool {
278     let (name, arg_count, ty) = match intrinsic {
279         sym::expf32 => ("expf", 1, fx.tcx.types.f32),
280         sym::expf64 => ("exp", 1, fx.tcx.types.f64),
281         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
282         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
283         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
284         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
285         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
286         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
287         sym::powf32 => ("powf", 2, fx.tcx.types.f32),
288         sym::powf64 => ("pow", 2, fx.tcx.types.f64),
289         sym::logf32 => ("logf", 1, fx.tcx.types.f32),
290         sym::logf64 => ("log", 1, fx.tcx.types.f64),
291         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
292         sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
293         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
294         sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
295         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
296         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
297         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
298         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
299         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
300         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
301         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
302         sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
303         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
304         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
305         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
306         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
307         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
308         sym::roundf64 => ("round", 1, fx.tcx.types.f64),
309         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
310         sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
311         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
312         sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
313         _ => return false,
314     };
315
316     if args.len() != arg_count {
317         bug!("wrong number of args for intrinsic {:?}", intrinsic);
318     }
319
320     let (a, b, c);
321     let args = match args {
322         [x] => {
323             a = [codegen_operand(fx, x)];
324             &a as &[_]
325         }
326         [x, y] => {
327             b = [codegen_operand(fx, x), codegen_operand(fx, y)];
328             &b
329         }
330         [x, y, z] => {
331             c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
332             &c
333         }
334         _ => unreachable!(),
335     };
336
337     let res = fx.easy_call(name, &args, ty);
338     ret.write_cvalue(fx, res);
339
340     true
341 }
342
343 fn codegen_regular_intrinsic_call<'tcx>(
344     fx: &mut FunctionCx<'_, '_, 'tcx>,
345     instance: Instance<'tcx>,
346     intrinsic: Symbol,
347     substs: SubstsRef<'tcx>,
348     args: &[mir::Operand<'tcx>],
349     ret: CPlace<'tcx>,
350     destination: Option<BasicBlock>,
351     source_info: mir::SourceInfo,
352 ) {
353     let usize_layout = fx.layout_of(fx.tcx.types.usize);
354
355     intrinsic_match! {
356         fx, intrinsic, args,
357         _ => {
358             fx.tcx.sess.span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
359         };
360
361         assume, (c _a) {};
362         likely | unlikely, (c a) {
363             ret.write_cvalue(fx, a);
364         };
365         breakpoint, () {
366             fx.bcx.ins().debugtrap();
367         };
368         copy | copy_nonoverlapping, (v src, v dst, v count) {
369             let elem_ty = substs.type_at(0);
370             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
371             assert_eq!(args.len(), 3);
372             let byte_amount = if elem_size != 1 {
373                 fx.bcx.ins().imul_imm(count, elem_size as i64)
374             } else {
375                 count
376             };
377
378             if intrinsic == sym::copy_nonoverlapping {
379                 // FIXME emit_small_memcpy
380                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
381             } else {
382                 // FIXME emit_small_memmove
383                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
384             }
385         };
386         // NOTE: the volatile variants have src and dst swapped
387         volatile_copy_memory | volatile_copy_nonoverlapping_memory, (v dst, v src, v count) {
388             let elem_ty = substs.type_at(0);
389             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
390             assert_eq!(args.len(), 3);
391             let byte_amount = if elem_size != 1 {
392                 fx.bcx.ins().imul_imm(count, elem_size as i64)
393             } else {
394                 count
395             };
396
397             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
398             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
399                 // FIXME emit_small_memcpy
400                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
401             } else {
402                 // FIXME emit_small_memmove
403                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
404             }
405         };
406         size_of_val, (c ptr) {
407             let layout = fx.layout_of(substs.type_at(0));
408             let size = if layout.is_unsized() {
409                 let (_ptr, info) = ptr.load_scalar_pair(fx);
410                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
411                 size
412             } else {
413                 fx
414                     .bcx
415                     .ins()
416                     .iconst(fx.pointer_type, layout.size.bytes() as i64)
417             };
418             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
419         };
420         min_align_of_val, (c ptr) {
421             let layout = fx.layout_of(substs.type_at(0));
422             let align = if layout.is_unsized() {
423                 let (_ptr, info) = ptr.load_scalar_pair(fx);
424                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
425                 align
426             } else {
427                 fx
428                     .bcx
429                     .ins()
430                     .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
431             };
432             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
433         };
434
435         unchecked_add | unchecked_sub | unchecked_mul | unchecked_div | exact_div | unchecked_rem
436         | unchecked_shl | unchecked_shr, (c x, c y) {
437             // FIXME trap on overflow
438             let bin_op = match intrinsic {
439                 sym::unchecked_add => BinOp::Add,
440                 sym::unchecked_sub => BinOp::Sub,
441                 sym::unchecked_mul => BinOp::Mul,
442                 sym::unchecked_div | sym::exact_div => BinOp::Div,
443                 sym::unchecked_rem => BinOp::Rem,
444                 sym::unchecked_shl => BinOp::Shl,
445                 sym::unchecked_shr => BinOp::Shr,
446                 _ => unreachable!(),
447             };
448             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
449             ret.write_cvalue(fx, res);
450         };
451         add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
452             assert_eq!(x.layout().ty, y.layout().ty);
453             let bin_op = match intrinsic {
454                 sym::add_with_overflow => BinOp::Add,
455                 sym::sub_with_overflow => BinOp::Sub,
456                 sym::mul_with_overflow => BinOp::Mul,
457                 _ => unreachable!(),
458             };
459
460             let res = crate::num::codegen_checked_int_binop(
461                 fx,
462                 bin_op,
463                 x,
464                 y,
465             );
466             ret.write_cvalue(fx, res);
467         };
468         saturating_add | saturating_sub, (c lhs, c rhs) {
469             assert_eq!(lhs.layout().ty, rhs.layout().ty);
470             let bin_op = match intrinsic {
471                 sym::saturating_add => BinOp::Add,
472                 sym::saturating_sub => BinOp::Sub,
473                 _ => unreachable!(),
474             };
475
476             let signed = type_sign(lhs.layout().ty);
477
478             let checked_res = crate::num::codegen_checked_int_binop(
479                 fx,
480                 bin_op,
481                 lhs,
482                 rhs,
483             );
484
485             let (val, has_overflow) = checked_res.load_scalar_pair(fx);
486             let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
487
488             let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
489
490             let val = match (intrinsic, signed) {
491                 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
492                 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
493                 (sym::saturating_add, true) => {
494                     let rhs = rhs.load_scalar(fx);
495                     let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
496                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
497                     fx.bcx.ins().select(has_overflow, sat_val, val)
498                 }
499                 (sym::saturating_sub, true) => {
500                     let rhs = rhs.load_scalar(fx);
501                     let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
502                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
503                     fx.bcx.ins().select(has_overflow, sat_val, val)
504                 }
505                 _ => unreachable!(),
506             };
507
508             let res = CValue::by_val(val, lhs.layout());
509
510             ret.write_cvalue(fx, res);
511         };
512         rotate_left, (c x, v y) {
513             let layout = x.layout();
514             let x = x.load_scalar(fx);
515             let res = fx.bcx.ins().rotl(x, y);
516             ret.write_cvalue(fx, CValue::by_val(res, layout));
517         };
518         rotate_right, (c x, v y) {
519             let layout = x.layout();
520             let x = x.load_scalar(fx);
521             let res = fx.bcx.ins().rotr(x, y);
522             ret.write_cvalue(fx, CValue::by_val(res, layout));
523         };
524
525         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
526         // doesn't have UB both are codegen'ed the same way
527         offset | arith_offset, (c base, v offset) {
528             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
529             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
530             let ptr_diff = if pointee_size != 1 {
531                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
532             } else {
533                 offset
534             };
535             let base_val = base.load_scalar(fx);
536             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
537             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
538         };
539
540         transmute, (c from) {
541             ret.write_cvalue_transmute(fx, from);
542         };
543         write_bytes | volatile_set_memory, (c dst, v val, v count) {
544             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
545             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
546             let count = if pointee_size != 1 {
547                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
548             } else {
549                 count
550             };
551             let dst_ptr = dst.load_scalar(fx);
552             // FIXME make the memset actually volatile when switching to emit_small_memset
553             // FIXME use emit_small_memset
554             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
555         };
556         ctlz | ctlz_nonzero, (c arg) {
557             let val = arg.load_scalar(fx);
558             // FIXME trap on `ctlz_nonzero` with zero arg.
559             let res = fx.bcx.ins().clz(val);
560             let res = CValue::by_val(res, arg.layout());
561             ret.write_cvalue(fx, res);
562         };
563         cttz | cttz_nonzero, (c arg) {
564             let val = arg.load_scalar(fx);
565             // FIXME trap on `cttz_nonzero` with zero arg.
566             let res = fx.bcx.ins().ctz(val);
567             let res = CValue::by_val(res, arg.layout());
568             ret.write_cvalue(fx, res);
569         };
570         ctpop, (c arg) {
571             let val = arg.load_scalar(fx);
572             let res = fx.bcx.ins().popcnt(val);
573             let res = CValue::by_val(res, arg.layout());
574             ret.write_cvalue(fx, res);
575         };
576         bitreverse, (c arg) {
577             let val = arg.load_scalar(fx);
578             let res = fx.bcx.ins().bitrev(val);
579             let res = CValue::by_val(res, arg.layout());
580             ret.write_cvalue(fx, res);
581         };
582         bswap, (c arg) {
583             // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
584             fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
585                 match bcx.func.dfg.value_type(v) {
586                     types::I8 => v,
587
588                     // https://code.woboq.org/gcc/include/bits/byteswap.h.html
589                     types::I16 => {
590                         let tmp1 = bcx.ins().ishl_imm(v, 8);
591                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
592
593                         let tmp2 = bcx.ins().ushr_imm(v, 8);
594                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
595
596                         bcx.ins().bor(n1, n2)
597                     }
598                     types::I32 => {
599                         let tmp1 = bcx.ins().ishl_imm(v, 24);
600                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
601
602                         let tmp2 = bcx.ins().ishl_imm(v, 8);
603                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
604
605                         let tmp3 = bcx.ins().ushr_imm(v, 8);
606                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
607
608                         let tmp4 = bcx.ins().ushr_imm(v, 24);
609                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
610
611                         let or_tmp1 = bcx.ins().bor(n1, n2);
612                         let or_tmp2 = bcx.ins().bor(n3, n4);
613                         bcx.ins().bor(or_tmp1, or_tmp2)
614                     }
615                     types::I64 => {
616                         let tmp1 = bcx.ins().ishl_imm(v, 56);
617                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
618
619                         let tmp2 = bcx.ins().ishl_imm(v, 40);
620                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
621
622                         let tmp3 = bcx.ins().ishl_imm(v, 24);
623                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
624
625                         let tmp4 = bcx.ins().ishl_imm(v, 8);
626                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
627
628                         let tmp5 = bcx.ins().ushr_imm(v, 8);
629                         let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
630
631                         let tmp6 = bcx.ins().ushr_imm(v, 24);
632                         let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
633
634                         let tmp7 = bcx.ins().ushr_imm(v, 40);
635                         let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
636
637                         let tmp8 = bcx.ins().ushr_imm(v, 56);
638                         let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
639
640                         let or_tmp1 = bcx.ins().bor(n1, n2);
641                         let or_tmp2 = bcx.ins().bor(n3, n4);
642                         let or_tmp3 = bcx.ins().bor(n5, n6);
643                         let or_tmp4 = bcx.ins().bor(n7, n8);
644
645                         let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
646                         let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
647                         bcx.ins().bor(or_tmp5, or_tmp6)
648                     }
649                     types::I128 => {
650                         let (lo, hi) = bcx.ins().isplit(v);
651                         let lo = swap(bcx, lo);
652                         let hi = swap(bcx, hi);
653                         bcx.ins().iconcat(hi, lo)
654                     }
655                     ty => unreachable!("bswap {}", ty),
656                 }
657             }
658             let val = arg.load_scalar(fx);
659             let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
660             ret.write_cvalue(fx, res);
661         };
662         assert_inhabited | assert_zero_valid | assert_uninit_valid, () {
663             let layout = fx.layout_of(substs.type_at(0));
664             if layout.abi.is_uninhabited() {
665                 with_no_trimmed_paths!({
666                     crate::base::codegen_panic(
667                         fx,
668                         &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
669                         source_info,
670                     )
671                 });
672                 return;
673             }
674
675             if intrinsic == sym::assert_zero_valid
676                 && !layout.might_permit_raw_init(
677                     fx,
678                     InitKind::Zero,
679                     fx.tcx.sess.opts.unstable_opts.strict_init_checks) {
680
681                 with_no_trimmed_paths!({
682                     crate::base::codegen_panic(
683                         fx,
684                         &format!("attempted to zero-initialize type `{}`, which is invalid", layout.ty),
685                         source_info,
686                     );
687                 });
688                 return;
689             }
690
691             if intrinsic == sym::assert_uninit_valid
692                 && !layout.might_permit_raw_init(
693                     fx,
694                     InitKind::Uninit,
695                     fx.tcx.sess.opts.unstable_opts.strict_init_checks) {
696
697                 with_no_trimmed_paths!({
698                     crate::base::codegen_panic(
699                         fx,
700                         &format!("attempted to leave type `{}` uninitialized, which is invalid", layout.ty),
701                         source_info,
702                     )
703                 });
704                 return;
705             }
706         };
707
708         volatile_load | unaligned_volatile_load, (c ptr) {
709             // Cranelift treats loads as volatile by default
710             // FIXME correctly handle unaligned_volatile_load
711             let inner_layout =
712                 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
713             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
714             ret.write_cvalue(fx, val);
715         };
716         volatile_store | unaligned_volatile_store, (v ptr, c val) {
717             // Cranelift treats stores as volatile by default
718             // FIXME correctly handle unaligned_volatile_store
719             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
720             dest.write_cvalue(fx, val);
721         };
722
723         pref_align_of | needs_drop | type_id | type_name | variant_count, () {
724             let const_val =
725                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
726             let val = crate::constant::codegen_const_value(
727                 fx,
728                 const_val,
729                 ret.layout().ty,
730             );
731             ret.write_cvalue(fx, val);
732         };
733
734         ptr_offset_from | ptr_offset_from_unsigned, (v ptr, v base) {
735             let ty = substs.type_at(0);
736
737             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
738             let diff_bytes = fx.bcx.ins().isub(ptr, base);
739             // FIXME this can be an exact division.
740             let val = if intrinsic == sym::ptr_offset_from_unsigned {
741                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
742                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
743                 // but unsigned is slightly easier to codegen, so might as well.
744                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
745             } else {
746                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
747                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
748             };
749             ret.write_cvalue(fx, val);
750         };
751
752         ptr_guaranteed_eq, (c a, c b) {
753             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
754             ret.write_cvalue(fx, val);
755         };
756
757         ptr_guaranteed_ne, (c a, c b) {
758             let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
759             ret.write_cvalue(fx, val);
760         };
761
762         caller_location, () {
763             let caller_location = fx.get_caller_location(source_info);
764             ret.write_cvalue(fx, caller_location);
765         };
766
767         _ if intrinsic.as_str().starts_with("atomic_fence"), () {
768             fx.bcx.ins().fence();
769         };
770         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
771             // FIXME use a compiler fence once Cranelift supports it
772             fx.bcx.ins().fence();
773         };
774         _ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) {
775             let ty = substs.type_at(0);
776             match ty.kind() {
777                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
778                     // FIXME implement 128bit atomics
779                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
780                         // special case for compiler-builtins to avoid having to patch it
781                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
782                         let ret_block = fx.get_block(destination.unwrap());
783                         fx.bcx.ins().jump(ret_block, &[]);
784                         return;
785                     } else {
786                         fx.tcx.sess.span_fatal(source_info.span, "128bit atomics not yet supported");
787                     }
788                 }
789                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
790                 _ => {
791                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
792                     return;
793                 }
794             }
795             let clif_ty = fx.clif_type(ty).unwrap();
796
797             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
798
799             let val = CValue::by_val(val, fx.layout_of(ty));
800             ret.write_cvalue(fx, val);
801         };
802         _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
803             let ty = substs.type_at(0);
804             match ty.kind() {
805                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
806                     // FIXME implement 128bit atomics
807                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
808                         // special case for compiler-builtins to avoid having to patch it
809                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
810                         let ret_block = fx.get_block(destination.unwrap());
811                         fx.bcx.ins().jump(ret_block, &[]);
812                         return;
813                     } else {
814                         fx.tcx.sess.span_fatal(source_info.span, "128bit atomics not yet supported");
815                     }
816                 }
817                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
818                 _ => {
819                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
820                     return;
821                 }
822             }
823
824             let val = val.load_scalar(fx);
825
826             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
827         };
828         _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
829             let layout = new.layout();
830             match layout.ty.kind() {
831                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
832                 _ => {
833                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
834                     return;
835                 }
836             }
837             let ty = fx.clif_type(layout.ty).unwrap();
838
839             let new = new.load_scalar(fx);
840
841             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
842
843             let old = CValue::by_val(old, layout);
844             ret.write_cvalue(fx, old);
845         };
846         _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
847             let layout = new.layout();
848             match layout.ty.kind() {
849                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
850                 _ => {
851                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
852                     return;
853                 }
854             }
855
856             let test_old = test_old.load_scalar(fx);
857             let new = new.load_scalar(fx);
858
859             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
860             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
861
862             let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
863             ret.write_cvalue(fx, ret_val)
864         };
865
866         _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
867             let layout = amount.layout();
868             match layout.ty.kind() {
869                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
870                 _ => {
871                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
872                     return;
873                 }
874             }
875             let ty = fx.clif_type(layout.ty).unwrap();
876
877             let amount = amount.load_scalar(fx);
878
879             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
880
881             let old = CValue::by_val(old, layout);
882             ret.write_cvalue(fx, old);
883         };
884         _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
885             let layout = amount.layout();
886             match layout.ty.kind() {
887                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
888                 _ => {
889                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
890                     return;
891                 }
892             }
893             let ty = fx.clif_type(layout.ty).unwrap();
894
895             let amount = amount.load_scalar(fx);
896
897             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
898
899             let old = CValue::by_val(old, layout);
900             ret.write_cvalue(fx, old);
901         };
902         _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
903             let layout = src.layout();
904             match layout.ty.kind() {
905                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
906                 _ => {
907                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
908                     return;
909                 }
910             }
911             let ty = fx.clif_type(layout.ty).unwrap();
912
913             let src = src.load_scalar(fx);
914
915             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
916
917             let old = CValue::by_val(old, layout);
918             ret.write_cvalue(fx, old);
919         };
920         _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
921             let layout = src.layout();
922             match layout.ty.kind() {
923                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
924                 _ => {
925                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
926                     return;
927                 }
928             }
929             let ty = fx.clif_type(layout.ty).unwrap();
930
931             let src = src.load_scalar(fx);
932
933             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
934
935             let old = CValue::by_val(old, layout);
936             ret.write_cvalue(fx, old);
937         };
938         _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
939             let layout = src.layout();
940             match layout.ty.kind() {
941                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
942                 _ => {
943                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
944                     return;
945                 }
946             }
947             let ty = fx.clif_type(layout.ty).unwrap();
948
949             let src = src.load_scalar(fx);
950
951             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
952
953             let old = CValue::by_val(old, layout);
954             ret.write_cvalue(fx, old);
955         };
956         _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
957             let layout = src.layout();
958             match layout.ty.kind() {
959                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
960                 _ => {
961                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
962                     return;
963                 }
964             }
965             let ty = fx.clif_type(layout.ty).unwrap();
966
967             let src = src.load_scalar(fx);
968
969             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
970
971             let old = CValue::by_val(old, layout);
972             ret.write_cvalue(fx, old);
973         };
974         _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
975             let layout = src.layout();
976             match layout.ty.kind() {
977                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
978                 _ => {
979                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
980                     return;
981                 }
982             }
983             let ty = fx.clif_type(layout.ty).unwrap();
984
985             let src = src.load_scalar(fx);
986
987             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
988
989             let old = CValue::by_val(old, layout);
990             ret.write_cvalue(fx, old);
991         };
992         _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
993             let layout = src.layout();
994             match layout.ty.kind() {
995                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
996                 _ => {
997                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
998                     return;
999                 }
1000             }
1001             let ty = fx.clif_type(layout.ty).unwrap();
1002
1003             let src = src.load_scalar(fx);
1004
1005             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1006
1007             let old = CValue::by_val(old, layout);
1008             ret.write_cvalue(fx, old);
1009         };
1010         _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
1011             let layout = src.layout();
1012             match layout.ty.kind() {
1013                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1014                 _ => {
1015                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1016                     return;
1017                 }
1018             }
1019             let ty = fx.clif_type(layout.ty).unwrap();
1020
1021             let src = src.load_scalar(fx);
1022
1023             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1024
1025             let old = CValue::by_val(old, layout);
1026             ret.write_cvalue(fx, old);
1027         };
1028         _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
1029             let layout = src.layout();
1030             match layout.ty.kind() {
1031                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1032                 _ => {
1033                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1034                     return;
1035                 }
1036             }
1037             let ty = fx.clif_type(layout.ty).unwrap();
1038
1039             let src = src.load_scalar(fx);
1040
1041             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1042
1043             let old = CValue::by_val(old, layout);
1044             ret.write_cvalue(fx, old);
1045         };
1046
1047         minnumf32, (v a, v b) {
1048             let val = crate::num::codegen_float_min(fx, a, b);
1049             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1050             ret.write_cvalue(fx, val);
1051         };
1052         minnumf64, (v a, v b) {
1053             let val = crate::num::codegen_float_min(fx, a, b);
1054             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1055             ret.write_cvalue(fx, val);
1056         };
1057         maxnumf32, (v a, v b) {
1058             let val = crate::num::codegen_float_max(fx, a, b);
1059             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1060             ret.write_cvalue(fx, val);
1061         };
1062         maxnumf64, (v a, v b) {
1063             let val = crate::num::codegen_float_max(fx, a, b);
1064             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1065             ret.write_cvalue(fx, val);
1066         };
1067
1068         kw.Try, (v f, v data, v _catch_fn) {
1069             // FIXME once unwinding is supported, change this to actually catch panics
1070             let f_sig = fx.bcx.func.import_signature(Signature {
1071                 call_conv: fx.target_config.default_call_conv,
1072                 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1073                 returns: vec![],
1074             });
1075
1076             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1077
1078             let layout = ret.layout();
1079             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1080             ret.write_cvalue(fx, ret_val);
1081         };
1082
1083         fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
1084             let res = crate::num::codegen_float_binop(fx, match intrinsic {
1085                 sym::fadd_fast => BinOp::Add,
1086                 sym::fsub_fast => BinOp::Sub,
1087                 sym::fmul_fast => BinOp::Mul,
1088                 sym::fdiv_fast => BinOp::Div,
1089                 sym::frem_fast => BinOp::Rem,
1090                 _ => unreachable!(),
1091             }, x, y);
1092             ret.write_cvalue(fx, res);
1093         };
1094         float_to_int_unchecked, (v f) {
1095             let res = crate::cast::clif_int_or_float_cast(
1096                 fx,
1097                 f,
1098                 false,
1099                 fx.clif_type(ret.layout().ty).unwrap(),
1100                 type_sign(ret.layout().ty),
1101             );
1102             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1103         };
1104
1105         raw_eq, (v lhs_ref, v rhs_ref) {
1106             let size = fx.layout_of(substs.type_at(0)).layout.size();
1107             // FIXME add and use emit_small_memcmp
1108             let is_eq_value =
1109                 if size == Size::ZERO {
1110                     // No bytes means they're trivially equal
1111                     fx.bcx.ins().iconst(types::I8, 1)
1112                 } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1113                     // Can't use `trusted` for these loads; they could be unaligned.
1114                     let mut flags = MemFlags::new();
1115                     flags.set_notrap();
1116                     let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1117                     let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1118                     let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1119                     fx.bcx.ins().bint(types::I8, eq)
1120                 } else {
1121                     // Just call `memcmp` (like slices do in core) when the
1122                     // size is too large or it's not a power-of-two.
1123                     let signed_bytes = i64::try_from(size.bytes()).unwrap();
1124                     let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1125                     let params = vec![AbiParam::new(fx.pointer_type); 3];
1126                     let returns = vec![AbiParam::new(types::I32)];
1127                     let args = &[lhs_ref, rhs_ref, bytes_val];
1128                     let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1129                     let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1130                     fx.bcx.ins().bint(types::I8, eq)
1131                 };
1132             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1133         };
1134
1135         const_allocate, (c _size, c _align) {
1136             // returns a null pointer at runtime.
1137             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1138             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1139         };
1140
1141         const_deallocate, (c _ptr, c _size, c _align) {
1142             // nop at runtime.
1143         };
1144
1145         black_box, (c a) {
1146             // FIXME implement black_box semantics
1147             ret.write_cvalue(fx, a);
1148         };
1149     }
1150
1151     let ret_block = fx.get_block(destination.unwrap());
1152     fx.bcx.ins().jump(ret_block, &[]);
1153 }