]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
d561cf139b6c9bca3ac97f0d985f006383916322
[rust.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 macro_rules! intrinsic_args {
5     ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6         #[allow(unused_parens)]
7         let ($($arg),*) = if let [$($arg),*] = $args {
8             ($(codegen_operand($fx, $arg)),*)
9         } else {
10             $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11         };
12     }
13 }
14
15 mod cpuid;
16 mod llvm;
17 mod llvm_aarch64;
18 mod llvm_x86;
19 mod simd;
20
21 pub(crate) use cpuid::codegen_cpuid_call;
22 pub(crate) use llvm::codegen_llvm_intrinsic_call;
23
24 use rustc_middle::ty::layout::HasParamEnv;
25 use rustc_middle::ty::print::with_no_trimmed_paths;
26 use rustc_middle::ty::subst::SubstsRef;
27 use rustc_span::symbol::{kw, sym, Symbol};
28
29 use crate::prelude::*;
30 use cranelift_codegen::ir::AtomicRmwOp;
31
32 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
33     bug!("wrong number of args for intrinsic {}", intrinsic);
34 }
35
36 fn report_atomic_type_validation_error<'tcx>(
37     fx: &mut FunctionCx<'_, '_, 'tcx>,
38     intrinsic: Symbol,
39     span: Span,
40     ty: Ty<'tcx>,
41 ) {
42     fx.tcx.sess.span_err(
43         span,
44         &format!(
45             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
46             intrinsic, ty
47         ),
48     );
49     // Prevent verifier error
50     fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
51 }
52
53 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
54     let (element, count) = match layout.abi {
55         Abi::Vector { element, count } => (element, count),
56         _ => unreachable!(),
57     };
58
59     match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
60         // Cranelift currently only implements icmp for 128bit vectors.
61         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
62         _ => None,
63     }
64 }
65
66 fn simd_for_each_lane<'tcx>(
67     fx: &mut FunctionCx<'_, '_, 'tcx>,
68     val: CValue<'tcx>,
69     ret: CPlace<'tcx>,
70     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
71 ) {
72     let layout = val.layout();
73
74     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
75     let lane_layout = fx.layout_of(lane_ty);
76     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
77     let ret_lane_layout = fx.layout_of(ret_lane_ty);
78     assert_eq!(lane_count, ret_lane_count);
79
80     for lane_idx in 0..lane_count {
81         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
82
83         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
84         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
85
86         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
87     }
88 }
89
90 fn simd_pair_for_each_lane_typed<'tcx>(
91     fx: &mut FunctionCx<'_, '_, 'tcx>,
92     x: CValue<'tcx>,
93     y: CValue<'tcx>,
94     ret: CPlace<'tcx>,
95     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
96 ) {
97     assert_eq!(x.layout(), y.layout());
98     let layout = x.layout();
99
100     let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
101     let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
102     assert_eq!(lane_count, ret_lane_count);
103
104     for lane_idx in 0..lane_count {
105         let x_lane = x.value_lane(fx, lane_idx);
106         let y_lane = y.value_lane(fx, lane_idx);
107
108         let res_lane = f(fx, x_lane, y_lane);
109
110         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
111     }
112 }
113
114 fn simd_pair_for_each_lane<'tcx>(
115     fx: &mut FunctionCx<'_, '_, 'tcx>,
116     x: CValue<'tcx>,
117     y: CValue<'tcx>,
118     ret: CPlace<'tcx>,
119     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
120 ) {
121     assert_eq!(x.layout(), y.layout());
122     let layout = x.layout();
123
124     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
125     let lane_layout = fx.layout_of(lane_ty);
126     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
127     let ret_lane_layout = fx.layout_of(ret_lane_ty);
128     assert_eq!(lane_count, ret_lane_count);
129
130     for lane_idx in 0..lane_count {
131         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
132         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
133
134         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
135         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
136
137         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
138     }
139 }
140
141 fn simd_reduce<'tcx>(
142     fx: &mut FunctionCx<'_, '_, 'tcx>,
143     val: CValue<'tcx>,
144     acc: Option<Value>,
145     ret: CPlace<'tcx>,
146     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
147 ) {
148     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
149     let lane_layout = fx.layout_of(lane_ty);
150     assert_eq!(lane_layout, ret.layout());
151
152     let (mut res_val, start_lane) =
153         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
154     for lane_idx in start_lane..lane_count {
155         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
156         res_val = f(fx, lane_layout.ty, res_val, lane);
157     }
158     let res = CValue::by_val(res_val, lane_layout);
159     ret.write_cvalue(fx, res);
160 }
161
162 // FIXME move all uses to `simd_reduce`
163 fn simd_reduce_bool<'tcx>(
164     fx: &mut FunctionCx<'_, '_, 'tcx>,
165     val: CValue<'tcx>,
166     ret: CPlace<'tcx>,
167     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
168 ) {
169     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
170     assert!(ret.layout().ty.is_bool());
171
172     let res_val = val.value_lane(fx, 0).load_scalar(fx);
173     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
174     for lane_idx in 1..lane_count {
175         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
176         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
177         res_val = f(fx, res_val, lane);
178     }
179     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
180         fx.bcx.ins().ireduce(types::I8, res_val)
181     } else {
182         res_val
183     };
184     let res = CValue::by_val(res_val, ret.layout());
185     ret.write_cvalue(fx, res);
186 }
187
188 fn bool_to_zero_or_max_uint<'tcx>(
189     fx: &mut FunctionCx<'_, '_, 'tcx>,
190     ty: Ty<'tcx>,
191     val: Value,
192 ) -> Value {
193     let ty = fx.clif_type(ty).unwrap();
194
195     let int_ty = match ty {
196         types::F32 => types::I32,
197         types::F64 => types::I64,
198         ty => ty,
199     };
200
201     let mut res = fx.bcx.ins().bmask(int_ty, val);
202
203     if ty.is_float() {
204         res = codegen_bitcast(fx, ty, res);
205     }
206
207     res
208 }
209
210 pub(crate) fn codegen_intrinsic_call<'tcx>(
211     fx: &mut FunctionCx<'_, '_, 'tcx>,
212     instance: Instance<'tcx>,
213     args: &[mir::Operand<'tcx>],
214     destination: CPlace<'tcx>,
215     target: Option<BasicBlock>,
216     source_info: mir::SourceInfo,
217 ) {
218     let intrinsic = fx.tcx.item_name(instance.def_id());
219     let substs = instance.substs;
220
221     let target = if let Some(target) = target {
222         target
223     } else {
224         // Insert non returning intrinsics here
225         match intrinsic {
226             sym::abort => {
227                 fx.bcx.ins().trap(TrapCode::User(0));
228             }
229             sym::transmute => {
230                 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
231             }
232             _ => unimplemented!("unsupported intrinsic {}", intrinsic),
233         }
234         return;
235     };
236
237     if intrinsic.as_str().starts_with("simd_") {
238         self::simd::codegen_simd_intrinsic_call(
239             fx,
240             intrinsic,
241             substs,
242             args,
243             destination,
244             target,
245             source_info.span,
246         );
247     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
248         let ret_block = fx.get_block(target);
249         fx.bcx.ins().jump(ret_block, &[]);
250     } else {
251         codegen_regular_intrinsic_call(
252             fx,
253             instance,
254             intrinsic,
255             substs,
256             args,
257             destination,
258             Some(target),
259             source_info,
260         );
261     }
262 }
263
264 fn codegen_float_intrinsic_call<'tcx>(
265     fx: &mut FunctionCx<'_, '_, 'tcx>,
266     intrinsic: Symbol,
267     args: &[mir::Operand<'tcx>],
268     ret: CPlace<'tcx>,
269 ) -> bool {
270     let (name, arg_count, ty) = match intrinsic {
271         sym::expf32 => ("expf", 1, fx.tcx.types.f32),
272         sym::expf64 => ("exp", 1, fx.tcx.types.f64),
273         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
274         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
275         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
276         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
277         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
278         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
279         sym::powf32 => ("powf", 2, fx.tcx.types.f32),
280         sym::powf64 => ("pow", 2, fx.tcx.types.f64),
281         sym::logf32 => ("logf", 1, fx.tcx.types.f32),
282         sym::logf64 => ("log", 1, fx.tcx.types.f64),
283         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
284         sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
285         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
286         sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
287         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
288         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
289         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
290         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
291         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
292         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
293         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
294         sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
295         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
296         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
297         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
298         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
299         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
300         sym::roundf64 => ("round", 1, fx.tcx.types.f64),
301         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
302         sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
303         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
304         sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
305         _ => return false,
306     };
307
308     if args.len() != arg_count {
309         bug!("wrong number of args for intrinsic {:?}", intrinsic);
310     }
311
312     let (a, b, c);
313     let args = match args {
314         [x] => {
315             a = [codegen_operand(fx, x)];
316             &a as &[_]
317         }
318         [x, y] => {
319             b = [codegen_operand(fx, x), codegen_operand(fx, y)];
320             &b
321         }
322         [x, y, z] => {
323             c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
324             &c
325         }
326         _ => unreachable!(),
327     };
328
329     let layout = fx.layout_of(ty);
330     let res = match intrinsic {
331         sym::fmaf32 | sym::fmaf64 => {
332             let a = args[0].load_scalar(fx);
333             let b = args[1].load_scalar(fx);
334             let c = args[2].load_scalar(fx);
335             CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
336         }
337         sym::copysignf32 | sym::copysignf64 => {
338             let a = args[0].load_scalar(fx);
339             let b = args[1].load_scalar(fx);
340             CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
341         }
342         sym::fabsf32
343         | sym::fabsf64
344         | sym::floorf32
345         | sym::floorf64
346         | sym::ceilf32
347         | sym::ceilf64
348         | sym::truncf32
349         | sym::truncf64 => {
350             let a = args[0].load_scalar(fx);
351
352             let val = match intrinsic {
353                 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
354                 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
355                 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
356                 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
357                 _ => unreachable!(),
358             };
359
360             CValue::by_val(val, layout)
361         }
362         // These intrinsics aren't supported natively by Cranelift.
363         // Lower them to a libcall.
364         _ => fx.easy_call(name, &args, ty),
365     };
366
367     ret.write_cvalue(fx, res);
368
369     true
370 }
371
372 fn codegen_regular_intrinsic_call<'tcx>(
373     fx: &mut FunctionCx<'_, '_, 'tcx>,
374     instance: Instance<'tcx>,
375     intrinsic: Symbol,
376     substs: SubstsRef<'tcx>,
377     args: &[mir::Operand<'tcx>],
378     ret: CPlace<'tcx>,
379     destination: Option<BasicBlock>,
380     source_info: mir::SourceInfo,
381 ) {
382     let usize_layout = fx.layout_of(fx.tcx.types.usize);
383
384     match intrinsic {
385         sym::likely | sym::unlikely => {
386             intrinsic_args!(fx, args => (a); intrinsic);
387
388             ret.write_cvalue(fx, a);
389         }
390         sym::breakpoint => {
391             intrinsic_args!(fx, args => (); intrinsic);
392
393             fx.bcx.ins().debugtrap();
394         }
395         sym::copy | sym::copy_nonoverlapping => {
396             intrinsic_args!(fx, args => (src, dst, count); intrinsic);
397             let src = src.load_scalar(fx);
398             let dst = dst.load_scalar(fx);
399             let count = count.load_scalar(fx);
400
401             let elem_ty = substs.type_at(0);
402             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
403             assert_eq!(args.len(), 3);
404             let byte_amount =
405                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
406
407             if intrinsic == sym::copy_nonoverlapping {
408                 // FIXME emit_small_memcpy
409                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
410             } else {
411                 // FIXME emit_small_memmove
412                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
413             }
414         }
415         sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
416             // NOTE: the volatile variants have src and dst swapped
417             intrinsic_args!(fx, args => (dst, src, count); intrinsic);
418             let dst = dst.load_scalar(fx);
419             let src = src.load_scalar(fx);
420             let count = count.load_scalar(fx);
421
422             let elem_ty = substs.type_at(0);
423             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
424             assert_eq!(args.len(), 3);
425             let byte_amount =
426                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
427
428             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
429             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
430                 // FIXME emit_small_memcpy
431                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
432             } else {
433                 // FIXME emit_small_memmove
434                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
435             }
436         }
437         sym::size_of_val => {
438             intrinsic_args!(fx, args => (ptr); intrinsic);
439
440             let layout = fx.layout_of(substs.type_at(0));
441             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
442             // branch
443             let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
444                 let (_ptr, info) = ptr.load_scalar_pair(fx);
445                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
446                 size
447             } else {
448                 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
449             };
450             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
451         }
452         sym::min_align_of_val => {
453             intrinsic_args!(fx, args => (ptr); intrinsic);
454
455             let layout = fx.layout_of(substs.type_at(0));
456             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
457             // branch
458             let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
459                 let (_ptr, info) = ptr.load_scalar_pair(fx);
460                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
461                 align
462             } else {
463                 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
464             };
465             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
466         }
467
468         sym::vtable_size => {
469             intrinsic_args!(fx, args => (vtable); intrinsic);
470             let vtable = vtable.load_scalar(fx);
471
472             let size = crate::vtable::size_of_obj(fx, vtable);
473             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
474         }
475
476         sym::vtable_align => {
477             intrinsic_args!(fx, args => (vtable); intrinsic);
478             let vtable = vtable.load_scalar(fx);
479
480             let align = crate::vtable::min_align_of_obj(fx, vtable);
481             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
482         }
483
484         sym::unchecked_add
485         | sym::unchecked_sub
486         | sym::unchecked_mul
487         | sym::unchecked_div
488         | sym::exact_div
489         | sym::unchecked_rem
490         | sym::unchecked_shl
491         | sym::unchecked_shr => {
492             intrinsic_args!(fx, args => (x, y); intrinsic);
493
494             // FIXME trap on overflow
495             let bin_op = match intrinsic {
496                 sym::unchecked_add => BinOp::Add,
497                 sym::unchecked_sub => BinOp::Sub,
498                 sym::unchecked_mul => BinOp::Mul,
499                 sym::unchecked_div | sym::exact_div => BinOp::Div,
500                 sym::unchecked_rem => BinOp::Rem,
501                 sym::unchecked_shl => BinOp::Shl,
502                 sym::unchecked_shr => BinOp::Shr,
503                 _ => unreachable!(),
504             };
505             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
506             ret.write_cvalue(fx, res);
507         }
508         sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
509             intrinsic_args!(fx, args => (x, y); intrinsic);
510
511             assert_eq!(x.layout().ty, y.layout().ty);
512             let bin_op = match intrinsic {
513                 sym::add_with_overflow => BinOp::Add,
514                 sym::sub_with_overflow => BinOp::Sub,
515                 sym::mul_with_overflow => BinOp::Mul,
516                 _ => unreachable!(),
517             };
518
519             let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
520             ret.write_cvalue(fx, res);
521         }
522         sym::saturating_add | sym::saturating_sub => {
523             intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
524
525             assert_eq!(lhs.layout().ty, rhs.layout().ty);
526             let bin_op = match intrinsic {
527                 sym::saturating_add => BinOp::Add,
528                 sym::saturating_sub => BinOp::Sub,
529                 _ => unreachable!(),
530             };
531
532             let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
533             ret.write_cvalue(fx, res);
534         }
535         sym::rotate_left => {
536             intrinsic_args!(fx, args => (x, y); intrinsic);
537             let y = y.load_scalar(fx);
538
539             let layout = x.layout();
540             let x = x.load_scalar(fx);
541             let res = fx.bcx.ins().rotl(x, y);
542             ret.write_cvalue(fx, CValue::by_val(res, layout));
543         }
544         sym::rotate_right => {
545             intrinsic_args!(fx, args => (x, y); intrinsic);
546             let y = y.load_scalar(fx);
547
548             let layout = x.layout();
549             let x = x.load_scalar(fx);
550             let res = fx.bcx.ins().rotr(x, y);
551             ret.write_cvalue(fx, CValue::by_val(res, layout));
552         }
553
554         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
555         // doesn't have UB both are codegen'ed the same way
556         sym::offset | sym::arith_offset => {
557             intrinsic_args!(fx, args => (base, offset); intrinsic);
558             let offset = offset.load_scalar(fx);
559
560             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
561             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
562             let ptr_diff = if pointee_size != 1 {
563                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
564             } else {
565                 offset
566             };
567             let base_val = base.load_scalar(fx);
568             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
569             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
570         }
571
572         sym::ptr_mask => {
573             intrinsic_args!(fx, args => (ptr, mask); intrinsic);
574             let ptr = ptr.load_scalar(fx);
575             let mask = mask.load_scalar(fx);
576             fx.bcx.ins().band(ptr, mask);
577         }
578
579         sym::transmute => {
580             intrinsic_args!(fx, args => (from); intrinsic);
581
582             ret.write_cvalue_transmute(fx, from);
583         }
584         sym::write_bytes | sym::volatile_set_memory => {
585             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
586             let val = val.load_scalar(fx);
587             let count = count.load_scalar(fx);
588
589             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
590             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
591             let count = if pointee_size != 1 {
592                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
593             } else {
594                 count
595             };
596             let dst_ptr = dst.load_scalar(fx);
597             // FIXME make the memset actually volatile when switching to emit_small_memset
598             // FIXME use emit_small_memset
599             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
600         }
601         sym::ctlz | sym::ctlz_nonzero => {
602             intrinsic_args!(fx, args => (arg); intrinsic);
603             let val = arg.load_scalar(fx);
604
605             // FIXME trap on `ctlz_nonzero` with zero arg.
606             let res = fx.bcx.ins().clz(val);
607             let res = CValue::by_val(res, arg.layout());
608             ret.write_cvalue(fx, res);
609         }
610         sym::cttz | sym::cttz_nonzero => {
611             intrinsic_args!(fx, args => (arg); intrinsic);
612             let val = arg.load_scalar(fx);
613
614             // FIXME trap on `cttz_nonzero` with zero arg.
615             let res = fx.bcx.ins().ctz(val);
616             let res = CValue::by_val(res, arg.layout());
617             ret.write_cvalue(fx, res);
618         }
619         sym::ctpop => {
620             intrinsic_args!(fx, args => (arg); intrinsic);
621             let val = arg.load_scalar(fx);
622
623             let res = fx.bcx.ins().popcnt(val);
624             let res = CValue::by_val(res, arg.layout());
625             ret.write_cvalue(fx, res);
626         }
627         sym::bitreverse => {
628             intrinsic_args!(fx, args => (arg); intrinsic);
629             let val = arg.load_scalar(fx);
630
631             let res = fx.bcx.ins().bitrev(val);
632             let res = CValue::by_val(res, arg.layout());
633             ret.write_cvalue(fx, res);
634         }
635         sym::bswap => {
636             intrinsic_args!(fx, args => (arg); intrinsic);
637             let val = arg.load_scalar(fx);
638
639             let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
640                 val
641             } else {
642                 fx.bcx.ins().bswap(val)
643             };
644             let res = CValue::by_val(res, arg.layout());
645             ret.write_cvalue(fx, res);
646         }
647         sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
648             intrinsic_args!(fx, args => (); intrinsic);
649
650             let layout = fx.layout_of(substs.type_at(0));
651             if layout.abi.is_uninhabited() {
652                 with_no_trimmed_paths!({
653                     crate::base::codegen_panic_nounwind(
654                         fx,
655                         &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
656                         source_info,
657                     )
658                 });
659                 return;
660             }
661
662             if intrinsic == sym::assert_zero_valid
663                 && !fx.tcx.permits_zero_init(fx.param_env().and(layout))
664             {
665                 with_no_trimmed_paths!({
666                     crate::base::codegen_panic_nounwind(
667                         fx,
668                         &format!(
669                             "attempted to zero-initialize type `{}`, which is invalid",
670                             layout.ty
671                         ),
672                         source_info,
673                     );
674                 });
675                 return;
676             }
677
678             if intrinsic == sym::assert_mem_uninitialized_valid
679                 && !fx.tcx.permits_uninit_init(fx.param_env().and(layout))
680             {
681                 with_no_trimmed_paths!({
682                     crate::base::codegen_panic_nounwind(
683                         fx,
684                         &format!(
685                             "attempted to leave type `{}` uninitialized, which is invalid",
686                             layout.ty
687                         ),
688                         source_info,
689                     )
690                 });
691                 return;
692             }
693         }
694
695         sym::volatile_load | sym::unaligned_volatile_load => {
696             intrinsic_args!(fx, args => (ptr); intrinsic);
697
698             // Cranelift treats loads as volatile by default
699             // FIXME correctly handle unaligned_volatile_load
700             let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
701             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
702             ret.write_cvalue(fx, val);
703         }
704         sym::volatile_store | sym::unaligned_volatile_store => {
705             intrinsic_args!(fx, args => (ptr, val); intrinsic);
706             let ptr = ptr.load_scalar(fx);
707
708             // Cranelift treats stores as volatile by default
709             // FIXME correctly handle unaligned_volatile_store
710             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
711             dest.write_cvalue(fx, val);
712         }
713
714         sym::pref_align_of
715         | sym::needs_drop
716         | sym::type_id
717         | sym::type_name
718         | sym::variant_count => {
719             intrinsic_args!(fx, args => (); intrinsic);
720
721             let const_val =
722                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
723             let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
724             ret.write_cvalue(fx, val);
725         }
726
727         sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
728             intrinsic_args!(fx, args => (ptr, base); intrinsic);
729             let ptr = ptr.load_scalar(fx);
730             let base = base.load_scalar(fx);
731             let ty = substs.type_at(0);
732
733             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
734             let diff_bytes = fx.bcx.ins().isub(ptr, base);
735             // FIXME this can be an exact division.
736             let val = if intrinsic == sym::ptr_offset_from_unsigned {
737                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
738                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
739                 // but unsigned is slightly easier to codegen, so might as well.
740                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
741             } else {
742                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
743                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
744             };
745             ret.write_cvalue(fx, val);
746         }
747
748         sym::ptr_guaranteed_cmp => {
749             intrinsic_args!(fx, args => (a, b); intrinsic);
750
751             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
752             ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
753         }
754
755         sym::caller_location => {
756             intrinsic_args!(fx, args => (); intrinsic);
757
758             let caller_location = fx.get_caller_location(source_info);
759             ret.write_cvalue(fx, caller_location);
760         }
761
762         _ if intrinsic.as_str().starts_with("atomic_fence") => {
763             intrinsic_args!(fx, args => (); intrinsic);
764
765             fx.bcx.ins().fence();
766         }
767         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
768             intrinsic_args!(fx, args => (); intrinsic);
769
770             // FIXME use a compiler fence once Cranelift supports it
771             fx.bcx.ins().fence();
772         }
773         _ if intrinsic.as_str().starts_with("atomic_load") => {
774             intrinsic_args!(fx, args => (ptr); intrinsic);
775             let ptr = ptr.load_scalar(fx);
776
777             let ty = substs.type_at(0);
778             match ty.kind() {
779                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
780                     // FIXME implement 128bit atomics
781                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
782                         // special case for compiler-builtins to avoid having to patch it
783                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
784                         return;
785                     } else {
786                         fx.tcx
787                             .sess
788                             .span_fatal(source_info.span, "128bit atomics not yet supported");
789                     }
790                 }
791                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
792                 _ => {
793                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
794                     return;
795                 }
796             }
797             let clif_ty = fx.clif_type(ty).unwrap();
798
799             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
800
801             let val = CValue::by_val(val, fx.layout_of(ty));
802             ret.write_cvalue(fx, val);
803         }
804         _ if intrinsic.as_str().starts_with("atomic_store") => {
805             intrinsic_args!(fx, args => (ptr, val); intrinsic);
806             let ptr = ptr.load_scalar(fx);
807
808             let ty = substs.type_at(0);
809             match ty.kind() {
810                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
811                     // FIXME implement 128bit atomics
812                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
813                         // special case for compiler-builtins to avoid having to patch it
814                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
815                         return;
816                     } else {
817                         fx.tcx
818                             .sess
819                             .span_fatal(source_info.span, "128bit atomics not yet supported");
820                     }
821                 }
822                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
823                 _ => {
824                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
825                     return;
826                 }
827             }
828
829             let val = val.load_scalar(fx);
830
831             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
832         }
833         _ if intrinsic.as_str().starts_with("atomic_xchg") => {
834             intrinsic_args!(fx, args => (ptr, new); intrinsic);
835             let ptr = ptr.load_scalar(fx);
836
837             let layout = new.layout();
838             match layout.ty.kind() {
839                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
840                 _ => {
841                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
842                     return;
843                 }
844             }
845             let ty = fx.clif_type(layout.ty).unwrap();
846
847             let new = new.load_scalar(fx);
848
849             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
850
851             let old = CValue::by_val(old, layout);
852             ret.write_cvalue(fx, old);
853         }
854         _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
855             // both atomic_cxchg_* and atomic_cxchgweak_*
856             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
857             let ptr = ptr.load_scalar(fx);
858
859             let layout = new.layout();
860             match layout.ty.kind() {
861                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
862                 _ => {
863                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
864                     return;
865                 }
866             }
867
868             let test_old = test_old.load_scalar(fx);
869             let new = new.load_scalar(fx);
870
871             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
872             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
873
874             let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
875             ret.write_cvalue(fx, ret_val)
876         }
877
878         _ if intrinsic.as_str().starts_with("atomic_xadd") => {
879             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
880             let ptr = ptr.load_scalar(fx);
881
882             let layout = amount.layout();
883             match layout.ty.kind() {
884                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
885                 _ => {
886                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
887                     return;
888                 }
889             }
890             let ty = fx.clif_type(layout.ty).unwrap();
891
892             let amount = amount.load_scalar(fx);
893
894             let old =
895                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
896
897             let old = CValue::by_val(old, layout);
898             ret.write_cvalue(fx, old);
899         }
900         _ if intrinsic.as_str().starts_with("atomic_xsub") => {
901             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
902             let ptr = ptr.load_scalar(fx);
903
904             let layout = amount.layout();
905             match layout.ty.kind() {
906                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
907                 _ => {
908                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
909                     return;
910                 }
911             }
912             let ty = fx.clif_type(layout.ty).unwrap();
913
914             let amount = amount.load_scalar(fx);
915
916             let old =
917                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
918
919             let old = CValue::by_val(old, layout);
920             ret.write_cvalue(fx, old);
921         }
922         _ if intrinsic.as_str().starts_with("atomic_and") => {
923             intrinsic_args!(fx, args => (ptr, src); intrinsic);
924             let ptr = ptr.load_scalar(fx);
925
926             let layout = src.layout();
927             match layout.ty.kind() {
928                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
929                 _ => {
930                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
931                     return;
932                 }
933             }
934             let ty = fx.clif_type(layout.ty).unwrap();
935
936             let src = src.load_scalar(fx);
937
938             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
939
940             let old = CValue::by_val(old, layout);
941             ret.write_cvalue(fx, old);
942         }
943         _ if intrinsic.as_str().starts_with("atomic_or") => {
944             intrinsic_args!(fx, args => (ptr, src); intrinsic);
945             let ptr = ptr.load_scalar(fx);
946
947             let layout = src.layout();
948             match layout.ty.kind() {
949                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
950                 _ => {
951                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
952                     return;
953                 }
954             }
955             let ty = fx.clif_type(layout.ty).unwrap();
956
957             let src = src.load_scalar(fx);
958
959             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
960
961             let old = CValue::by_val(old, layout);
962             ret.write_cvalue(fx, old);
963         }
964         _ if intrinsic.as_str().starts_with("atomic_xor") => {
965             intrinsic_args!(fx, args => (ptr, src); intrinsic);
966             let ptr = ptr.load_scalar(fx);
967
968             let layout = src.layout();
969             match layout.ty.kind() {
970                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
971                 _ => {
972                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
973                     return;
974                 }
975             }
976             let ty = fx.clif_type(layout.ty).unwrap();
977
978             let src = src.load_scalar(fx);
979
980             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
981
982             let old = CValue::by_val(old, layout);
983             ret.write_cvalue(fx, old);
984         }
985         _ if intrinsic.as_str().starts_with("atomic_nand") => {
986             intrinsic_args!(fx, args => (ptr, src); intrinsic);
987             let ptr = ptr.load_scalar(fx);
988
989             let layout = src.layout();
990             match layout.ty.kind() {
991                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
992                 _ => {
993                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
994                     return;
995                 }
996             }
997             let ty = fx.clif_type(layout.ty).unwrap();
998
999             let src = src.load_scalar(fx);
1000
1001             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1002
1003             let old = CValue::by_val(old, layout);
1004             ret.write_cvalue(fx, old);
1005         }
1006         _ if intrinsic.as_str().starts_with("atomic_max") => {
1007             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1008             let ptr = ptr.load_scalar(fx);
1009
1010             let layout = src.layout();
1011             match layout.ty.kind() {
1012                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1013                 _ => {
1014                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1015                     return;
1016                 }
1017             }
1018             let ty = fx.clif_type(layout.ty).unwrap();
1019
1020             let src = src.load_scalar(fx);
1021
1022             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1023
1024             let old = CValue::by_val(old, layout);
1025             ret.write_cvalue(fx, old);
1026         }
1027         _ if intrinsic.as_str().starts_with("atomic_umax") => {
1028             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1029             let ptr = ptr.load_scalar(fx);
1030
1031             let layout = src.layout();
1032             match layout.ty.kind() {
1033                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1034                 _ => {
1035                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1036                     return;
1037                 }
1038             }
1039             let ty = fx.clif_type(layout.ty).unwrap();
1040
1041             let src = src.load_scalar(fx);
1042
1043             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1044
1045             let old = CValue::by_val(old, layout);
1046             ret.write_cvalue(fx, old);
1047         }
1048         _ if intrinsic.as_str().starts_with("atomic_min") => {
1049             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1050             let ptr = ptr.load_scalar(fx);
1051
1052             let layout = src.layout();
1053             match layout.ty.kind() {
1054                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1055                 _ => {
1056                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1057                     return;
1058                 }
1059             }
1060             let ty = fx.clif_type(layout.ty).unwrap();
1061
1062             let src = src.load_scalar(fx);
1063
1064             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1065
1066             let old = CValue::by_val(old, layout);
1067             ret.write_cvalue(fx, old);
1068         }
1069         _ if intrinsic.as_str().starts_with("atomic_umin") => {
1070             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1071             let ptr = ptr.load_scalar(fx);
1072
1073             let layout = src.layout();
1074             match layout.ty.kind() {
1075                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1076                 _ => {
1077                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1078                     return;
1079                 }
1080             }
1081             let ty = fx.clif_type(layout.ty).unwrap();
1082
1083             let src = src.load_scalar(fx);
1084
1085             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1086
1087             let old = CValue::by_val(old, layout);
1088             ret.write_cvalue(fx, old);
1089         }
1090
1091         sym::minnumf32 => {
1092             intrinsic_args!(fx, args => (a, b); intrinsic);
1093             let a = a.load_scalar(fx);
1094             let b = b.load_scalar(fx);
1095
1096             let val = crate::num::codegen_float_min(fx, a, b);
1097             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1098             ret.write_cvalue(fx, val);
1099         }
1100         sym::minnumf64 => {
1101             intrinsic_args!(fx, args => (a, b); intrinsic);
1102             let a = a.load_scalar(fx);
1103             let b = b.load_scalar(fx);
1104
1105             let val = crate::num::codegen_float_min(fx, a, b);
1106             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1107             ret.write_cvalue(fx, val);
1108         }
1109         sym::maxnumf32 => {
1110             intrinsic_args!(fx, args => (a, b); intrinsic);
1111             let a = a.load_scalar(fx);
1112             let b = b.load_scalar(fx);
1113
1114             let val = crate::num::codegen_float_max(fx, a, b);
1115             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1116             ret.write_cvalue(fx, val);
1117         }
1118         sym::maxnumf64 => {
1119             intrinsic_args!(fx, args => (a, b); intrinsic);
1120             let a = a.load_scalar(fx);
1121             let b = b.load_scalar(fx);
1122
1123             let val = crate::num::codegen_float_max(fx, a, b);
1124             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1125             ret.write_cvalue(fx, val);
1126         }
1127
1128         kw::Try => {
1129             intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1130             let f = f.load_scalar(fx);
1131             let data = data.load_scalar(fx);
1132             let _catch_fn = catch_fn.load_scalar(fx);
1133
1134             // FIXME once unwinding is supported, change this to actually catch panics
1135             let f_sig = fx.bcx.func.import_signature(Signature {
1136                 call_conv: fx.target_config.default_call_conv,
1137                 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
1138                 returns: vec![],
1139             });
1140
1141             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1142
1143             let layout = ret.layout();
1144             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1145             ret.write_cvalue(fx, ret_val);
1146         }
1147
1148         sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1149             intrinsic_args!(fx, args => (x, y); intrinsic);
1150
1151             let res = crate::num::codegen_float_binop(
1152                 fx,
1153                 match intrinsic {
1154                     sym::fadd_fast => BinOp::Add,
1155                     sym::fsub_fast => BinOp::Sub,
1156                     sym::fmul_fast => BinOp::Mul,
1157                     sym::fdiv_fast => BinOp::Div,
1158                     sym::frem_fast => BinOp::Rem,
1159                     _ => unreachable!(),
1160                 },
1161                 x,
1162                 y,
1163             );
1164             ret.write_cvalue(fx, res);
1165         }
1166         sym::float_to_int_unchecked => {
1167             intrinsic_args!(fx, args => (f); intrinsic);
1168             let f = f.load_scalar(fx);
1169
1170             let res = crate::cast::clif_int_or_float_cast(
1171                 fx,
1172                 f,
1173                 false,
1174                 fx.clif_type(ret.layout().ty).unwrap(),
1175                 type_sign(ret.layout().ty),
1176             );
1177             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1178         }
1179
1180         sym::raw_eq => {
1181             intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1182             let lhs_ref = lhs_ref.load_scalar(fx);
1183             let rhs_ref = rhs_ref.load_scalar(fx);
1184
1185             let size = fx.layout_of(substs.type_at(0)).layout.size();
1186             // FIXME add and use emit_small_memcmp
1187             let is_eq_value = if size == Size::ZERO {
1188                 // No bytes means they're trivially equal
1189                 fx.bcx.ins().iconst(types::I8, 1)
1190             } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1191                 // Can't use `trusted` for these loads; they could be unaligned.
1192                 let mut flags = MemFlags::new();
1193                 flags.set_notrap();
1194                 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1195                 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1196                 fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
1197             } else {
1198                 // Just call `memcmp` (like slices do in core) when the
1199                 // size is too large or it's not a power-of-two.
1200                 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1201                 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1202                 let params = vec![AbiParam::new(fx.pointer_type); 3];
1203                 let returns = vec![AbiParam::new(types::I32)];
1204                 let args = &[lhs_ref, rhs_ref, bytes_val];
1205                 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1206                 fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
1207             };
1208             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1209         }
1210
1211         sym::const_allocate => {
1212             intrinsic_args!(fx, args => (_size, _align); intrinsic);
1213
1214             // returns a null pointer at runtime.
1215             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1216             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1217         }
1218
1219         sym::const_deallocate => {
1220             intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1221             // nop at runtime.
1222         }
1223
1224         sym::black_box => {
1225             intrinsic_args!(fx, args => (a); intrinsic);
1226
1227             // FIXME implement black_box semantics
1228             ret.write_cvalue(fx, a);
1229         }
1230
1231         // FIXME implement variadics in cranelift
1232         sym::va_copy | sym::va_arg | sym::va_end => {
1233             fx.tcx.sess.span_fatal(
1234                 source_info.span,
1235                 "Defining variadic functions is not yet supported by Cranelift",
1236             );
1237         }
1238
1239         _ => {
1240             fx.tcx
1241                 .sess
1242                 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1243         }
1244     }
1245
1246     let ret_block = fx.get_block(destination.unwrap());
1247     fx.bcx.ins().jump(ret_block, &[]);
1248 }