]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
Rollup merge of #104672 - Voultapher:unify-sort-modules, r=thomcc
[rust.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 macro_rules! intrinsic_args {
5     ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6         #[allow(unused_parens)]
7         let ($($arg),*) = if let [$($arg),*] = $args {
8             ($(codegen_operand($fx, $arg)),*)
9         } else {
10             $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11         };
12     }
13 }
14
15 mod cpuid;
16 mod llvm;
17 mod llvm_aarch64;
18 mod llvm_x86;
19 mod simd;
20
21 pub(crate) use cpuid::codegen_cpuid_call;
22 pub(crate) use llvm::codegen_llvm_intrinsic_call;
23
24 use rustc_middle::ty::print::with_no_trimmed_paths;
25 use rustc_middle::ty::subst::SubstsRef;
26 use rustc_span::symbol::{kw, sym, Symbol};
27
28 use crate::prelude::*;
29 use cranelift_codegen::ir::AtomicRmwOp;
30
31 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
32     bug!("wrong number of args for intrinsic {}", intrinsic);
33 }
34
35 fn report_atomic_type_validation_error<'tcx>(
36     fx: &mut FunctionCx<'_, '_, 'tcx>,
37     intrinsic: Symbol,
38     span: Span,
39     ty: Ty<'tcx>,
40 ) {
41     fx.tcx.sess.span_err(
42         span,
43         &format!(
44             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
45             intrinsic, ty
46         ),
47     );
48     // Prevent verifier error
49     fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
50 }
51
52 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
53     let (element, count) = match layout.abi {
54         Abi::Vector { element, count } => (element, count),
55         _ => unreachable!(),
56     };
57
58     match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
59         // Cranelift currently only implements icmp for 128bit vectors.
60         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
61         _ => None,
62     }
63 }
64
65 fn simd_for_each_lane<'tcx>(
66     fx: &mut FunctionCx<'_, '_, 'tcx>,
67     val: CValue<'tcx>,
68     ret: CPlace<'tcx>,
69     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
70 ) {
71     let layout = val.layout();
72
73     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
74     let lane_layout = fx.layout_of(lane_ty);
75     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
76     let ret_lane_layout = fx.layout_of(ret_lane_ty);
77     assert_eq!(lane_count, ret_lane_count);
78
79     for lane_idx in 0..lane_count {
80         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
81
82         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
83         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
84
85         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
86     }
87 }
88
89 fn simd_pair_for_each_lane_typed<'tcx>(
90     fx: &mut FunctionCx<'_, '_, 'tcx>,
91     x: CValue<'tcx>,
92     y: CValue<'tcx>,
93     ret: CPlace<'tcx>,
94     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
95 ) {
96     assert_eq!(x.layout(), y.layout());
97     let layout = x.layout();
98
99     let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
100     let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
101     assert_eq!(lane_count, ret_lane_count);
102
103     for lane_idx in 0..lane_count {
104         let x_lane = x.value_lane(fx, lane_idx);
105         let y_lane = y.value_lane(fx, lane_idx);
106
107         let res_lane = f(fx, x_lane, y_lane);
108
109         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
110     }
111 }
112
113 fn simd_pair_for_each_lane<'tcx>(
114     fx: &mut FunctionCx<'_, '_, 'tcx>,
115     x: CValue<'tcx>,
116     y: CValue<'tcx>,
117     ret: CPlace<'tcx>,
118     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
119 ) {
120     assert_eq!(x.layout(), y.layout());
121     let layout = x.layout();
122
123     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
124     let lane_layout = fx.layout_of(lane_ty);
125     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
126     let ret_lane_layout = fx.layout_of(ret_lane_ty);
127     assert_eq!(lane_count, ret_lane_count);
128
129     for lane_idx in 0..lane_count {
130         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
131         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
132
133         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
134         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
135
136         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
137     }
138 }
139
140 fn simd_reduce<'tcx>(
141     fx: &mut FunctionCx<'_, '_, 'tcx>,
142     val: CValue<'tcx>,
143     acc: Option<Value>,
144     ret: CPlace<'tcx>,
145     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
146 ) {
147     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
148     let lane_layout = fx.layout_of(lane_ty);
149     assert_eq!(lane_layout, ret.layout());
150
151     let (mut res_val, start_lane) =
152         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
153     for lane_idx in start_lane..lane_count {
154         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
155         res_val = f(fx, lane_layout.ty, res_val, lane);
156     }
157     let res = CValue::by_val(res_val, lane_layout);
158     ret.write_cvalue(fx, res);
159 }
160
161 // FIXME move all uses to `simd_reduce`
162 fn simd_reduce_bool<'tcx>(
163     fx: &mut FunctionCx<'_, '_, 'tcx>,
164     val: CValue<'tcx>,
165     ret: CPlace<'tcx>,
166     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
167 ) {
168     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
169     assert!(ret.layout().ty.is_bool());
170
171     let res_val = val.value_lane(fx, 0).load_scalar(fx);
172     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
173     for lane_idx in 1..lane_count {
174         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
175         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
176         res_val = f(fx, res_val, lane);
177     }
178     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
179         fx.bcx.ins().ireduce(types::I8, res_val)
180     } else {
181         res_val
182     };
183     let res = CValue::by_val(res_val, ret.layout());
184     ret.write_cvalue(fx, res);
185 }
186
187 fn bool_to_zero_or_max_uint<'tcx>(
188     fx: &mut FunctionCx<'_, '_, 'tcx>,
189     ty: Ty<'tcx>,
190     val: Value,
191 ) -> Value {
192     let ty = fx.clif_type(ty).unwrap();
193
194     let int_ty = match ty {
195         types::F32 => types::I32,
196         types::F64 => types::I64,
197         ty => ty,
198     };
199
200     let mut res = fx.bcx.ins().bmask(int_ty, val);
201
202     if ty.is_float() {
203         res = fx.bcx.ins().bitcast(ty, res);
204     }
205
206     res
207 }
208
209 pub(crate) fn codegen_intrinsic_call<'tcx>(
210     fx: &mut FunctionCx<'_, '_, 'tcx>,
211     instance: Instance<'tcx>,
212     args: &[mir::Operand<'tcx>],
213     destination: CPlace<'tcx>,
214     target: Option<BasicBlock>,
215     source_info: mir::SourceInfo,
216 ) {
217     let intrinsic = fx.tcx.item_name(instance.def_id());
218     let substs = instance.substs;
219
220     let target = if let Some(target) = target {
221         target
222     } else {
223         // Insert non returning intrinsics here
224         match intrinsic {
225             sym::abort => {
226                 fx.bcx.ins().trap(TrapCode::User(0));
227             }
228             sym::transmute => {
229                 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
230             }
231             _ => unimplemented!("unsupported intrinsic {}", intrinsic),
232         }
233         return;
234     };
235
236     if intrinsic.as_str().starts_with("simd_") {
237         self::simd::codegen_simd_intrinsic_call(
238             fx,
239             intrinsic,
240             substs,
241             args,
242             destination,
243             source_info.span,
244         );
245         let ret_block = fx.get_block(target);
246         fx.bcx.ins().jump(ret_block, &[]);
247     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
248         let ret_block = fx.get_block(target);
249         fx.bcx.ins().jump(ret_block, &[]);
250     } else {
251         codegen_regular_intrinsic_call(
252             fx,
253             instance,
254             intrinsic,
255             substs,
256             args,
257             destination,
258             Some(target),
259             source_info,
260         );
261     }
262 }
263
264 fn codegen_float_intrinsic_call<'tcx>(
265     fx: &mut FunctionCx<'_, '_, 'tcx>,
266     intrinsic: Symbol,
267     args: &[mir::Operand<'tcx>],
268     ret: CPlace<'tcx>,
269 ) -> bool {
270     let (name, arg_count, ty) = match intrinsic {
271         sym::expf32 => ("expf", 1, fx.tcx.types.f32),
272         sym::expf64 => ("exp", 1, fx.tcx.types.f64),
273         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
274         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
275         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
276         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
277         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
278         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
279         sym::powf32 => ("powf", 2, fx.tcx.types.f32),
280         sym::powf64 => ("pow", 2, fx.tcx.types.f64),
281         sym::logf32 => ("logf", 1, fx.tcx.types.f32),
282         sym::logf64 => ("log", 1, fx.tcx.types.f64),
283         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
284         sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
285         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
286         sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
287         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
288         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
289         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
290         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
291         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
292         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
293         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
294         sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
295         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
296         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
297         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
298         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
299         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
300         sym::roundf64 => ("round", 1, fx.tcx.types.f64),
301         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
302         sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
303         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
304         sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
305         _ => return false,
306     };
307
308     if args.len() != arg_count {
309         bug!("wrong number of args for intrinsic {:?}", intrinsic);
310     }
311
312     let (a, b, c);
313     let args = match args {
314         [x] => {
315             a = [codegen_operand(fx, x)];
316             &a as &[_]
317         }
318         [x, y] => {
319             b = [codegen_operand(fx, x), codegen_operand(fx, y)];
320             &b
321         }
322         [x, y, z] => {
323             c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
324             &c
325         }
326         _ => unreachable!(),
327     };
328
329     let layout = fx.layout_of(ty);
330     let res = match intrinsic {
331         sym::fmaf32 | sym::fmaf64 => {
332             let a = args[0].load_scalar(fx);
333             let b = args[1].load_scalar(fx);
334             let c = args[2].load_scalar(fx);
335             CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
336         }
337         sym::copysignf32 | sym::copysignf64 => {
338             let a = args[0].load_scalar(fx);
339             let b = args[1].load_scalar(fx);
340             CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
341         }
342         sym::fabsf32
343         | sym::fabsf64
344         | sym::floorf32
345         | sym::floorf64
346         | sym::ceilf32
347         | sym::ceilf64
348         | sym::truncf32
349         | sym::truncf64 => {
350             let a = args[0].load_scalar(fx);
351
352             let val = match intrinsic {
353                 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
354                 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
355                 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
356                 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
357                 _ => unreachable!(),
358             };
359
360             CValue::by_val(val, layout)
361         }
362         // These intrinsics aren't supported natively by Cranelift.
363         // Lower them to a libcall.
364         _ => fx.easy_call(name, &args, ty),
365     };
366
367     ret.write_cvalue(fx, res);
368
369     true
370 }
371
372 fn codegen_regular_intrinsic_call<'tcx>(
373     fx: &mut FunctionCx<'_, '_, 'tcx>,
374     instance: Instance<'tcx>,
375     intrinsic: Symbol,
376     substs: SubstsRef<'tcx>,
377     args: &[mir::Operand<'tcx>],
378     ret: CPlace<'tcx>,
379     destination: Option<BasicBlock>,
380     source_info: mir::SourceInfo,
381 ) {
382     let usize_layout = fx.layout_of(fx.tcx.types.usize);
383
384     match intrinsic {
385         sym::likely | sym::unlikely => {
386             intrinsic_args!(fx, args => (a); intrinsic);
387
388             ret.write_cvalue(fx, a);
389         }
390         sym::breakpoint => {
391             intrinsic_args!(fx, args => (); intrinsic);
392
393             fx.bcx.ins().debugtrap();
394         }
395         sym::copy | sym::copy_nonoverlapping => {
396             intrinsic_args!(fx, args => (src, dst, count); intrinsic);
397             let src = src.load_scalar(fx);
398             let dst = dst.load_scalar(fx);
399             let count = count.load_scalar(fx);
400
401             let elem_ty = substs.type_at(0);
402             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
403             assert_eq!(args.len(), 3);
404             let byte_amount =
405                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
406
407             if intrinsic == sym::copy_nonoverlapping {
408                 // FIXME emit_small_memcpy
409                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
410             } else {
411                 // FIXME emit_small_memmove
412                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
413             }
414         }
415         sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
416             // NOTE: the volatile variants have src and dst swapped
417             intrinsic_args!(fx, args => (dst, src, count); intrinsic);
418             let dst = dst.load_scalar(fx);
419             let src = src.load_scalar(fx);
420             let count = count.load_scalar(fx);
421
422             let elem_ty = substs.type_at(0);
423             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
424             assert_eq!(args.len(), 3);
425             let byte_amount =
426                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
427
428             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
429             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
430                 // FIXME emit_small_memcpy
431                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
432             } else {
433                 // FIXME emit_small_memmove
434                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
435             }
436         }
437         sym::size_of_val => {
438             intrinsic_args!(fx, args => (ptr); intrinsic);
439
440             let layout = fx.layout_of(substs.type_at(0));
441             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
442             // branch
443             let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
444                 let (_ptr, info) = ptr.load_scalar_pair(fx);
445                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
446                 size
447             } else {
448                 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
449             };
450             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
451         }
452         sym::min_align_of_val => {
453             intrinsic_args!(fx, args => (ptr); intrinsic);
454
455             let layout = fx.layout_of(substs.type_at(0));
456             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
457             // branch
458             let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
459                 let (_ptr, info) = ptr.load_scalar_pair(fx);
460                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
461                 align
462             } else {
463                 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
464             };
465             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
466         }
467
468         sym::vtable_size => {
469             intrinsic_args!(fx, args => (vtable); intrinsic);
470             let vtable = vtable.load_scalar(fx);
471
472             let size = crate::vtable::size_of_obj(fx, vtable);
473             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
474         }
475
476         sym::vtable_align => {
477             intrinsic_args!(fx, args => (vtable); intrinsic);
478             let vtable = vtable.load_scalar(fx);
479
480             let align = crate::vtable::min_align_of_obj(fx, vtable);
481             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
482         }
483
484         sym::unchecked_add
485         | sym::unchecked_sub
486         | sym::unchecked_mul
487         | sym::unchecked_div
488         | sym::exact_div
489         | sym::unchecked_rem
490         | sym::unchecked_shl
491         | sym::unchecked_shr => {
492             intrinsic_args!(fx, args => (x, y); intrinsic);
493
494             // FIXME trap on overflow
495             let bin_op = match intrinsic {
496                 sym::unchecked_add => BinOp::Add,
497                 sym::unchecked_sub => BinOp::Sub,
498                 sym::unchecked_mul => BinOp::Mul,
499                 sym::unchecked_div | sym::exact_div => BinOp::Div,
500                 sym::unchecked_rem => BinOp::Rem,
501                 sym::unchecked_shl => BinOp::Shl,
502                 sym::unchecked_shr => BinOp::Shr,
503                 _ => unreachable!(),
504             };
505             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
506             ret.write_cvalue(fx, res);
507         }
508         sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
509             intrinsic_args!(fx, args => (x, y); intrinsic);
510
511             assert_eq!(x.layout().ty, y.layout().ty);
512             let bin_op = match intrinsic {
513                 sym::add_with_overflow => BinOp::Add,
514                 sym::sub_with_overflow => BinOp::Sub,
515                 sym::mul_with_overflow => BinOp::Mul,
516                 _ => unreachable!(),
517             };
518
519             let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
520             ret.write_cvalue(fx, res);
521         }
522         sym::saturating_add | sym::saturating_sub => {
523             intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
524
525             assert_eq!(lhs.layout().ty, rhs.layout().ty);
526             let bin_op = match intrinsic {
527                 sym::saturating_add => BinOp::Add,
528                 sym::saturating_sub => BinOp::Sub,
529                 _ => unreachable!(),
530             };
531
532             let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
533             ret.write_cvalue(fx, res);
534         }
535         sym::rotate_left => {
536             intrinsic_args!(fx, args => (x, y); intrinsic);
537             let y = y.load_scalar(fx);
538
539             let layout = x.layout();
540             let x = x.load_scalar(fx);
541             let res = fx.bcx.ins().rotl(x, y);
542             ret.write_cvalue(fx, CValue::by_val(res, layout));
543         }
544         sym::rotate_right => {
545             intrinsic_args!(fx, args => (x, y); intrinsic);
546             let y = y.load_scalar(fx);
547
548             let layout = x.layout();
549             let x = x.load_scalar(fx);
550             let res = fx.bcx.ins().rotr(x, y);
551             ret.write_cvalue(fx, CValue::by_val(res, layout));
552         }
553
554         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
555         // doesn't have UB both are codegen'ed the same way
556         sym::offset | sym::arith_offset => {
557             intrinsic_args!(fx, args => (base, offset); intrinsic);
558             let offset = offset.load_scalar(fx);
559
560             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
561             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
562             let ptr_diff = if pointee_size != 1 {
563                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
564             } else {
565                 offset
566             };
567             let base_val = base.load_scalar(fx);
568             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
569             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
570         }
571
572         sym::ptr_mask => {
573             intrinsic_args!(fx, args => (ptr, mask); intrinsic);
574             let ptr = ptr.load_scalar(fx);
575             let mask = mask.load_scalar(fx);
576             fx.bcx.ins().band(ptr, mask);
577         }
578
579         sym::transmute => {
580             intrinsic_args!(fx, args => (from); intrinsic);
581
582             ret.write_cvalue_transmute(fx, from);
583         }
584         sym::write_bytes | sym::volatile_set_memory => {
585             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
586             let val = val.load_scalar(fx);
587             let count = count.load_scalar(fx);
588
589             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
590             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
591             let count = if pointee_size != 1 {
592                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
593             } else {
594                 count
595             };
596             let dst_ptr = dst.load_scalar(fx);
597             // FIXME make the memset actually volatile when switching to emit_small_memset
598             // FIXME use emit_small_memset
599             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
600         }
601         sym::ctlz | sym::ctlz_nonzero => {
602             intrinsic_args!(fx, args => (arg); intrinsic);
603             let val = arg.load_scalar(fx);
604
605             // FIXME trap on `ctlz_nonzero` with zero arg.
606             let res = fx.bcx.ins().clz(val);
607             let res = CValue::by_val(res, arg.layout());
608             ret.write_cvalue(fx, res);
609         }
610         sym::cttz | sym::cttz_nonzero => {
611             intrinsic_args!(fx, args => (arg); intrinsic);
612             let val = arg.load_scalar(fx);
613
614             // FIXME trap on `cttz_nonzero` with zero arg.
615             let res = fx.bcx.ins().ctz(val);
616             let res = CValue::by_val(res, arg.layout());
617             ret.write_cvalue(fx, res);
618         }
619         sym::ctpop => {
620             intrinsic_args!(fx, args => (arg); intrinsic);
621             let val = arg.load_scalar(fx);
622
623             let res = fx.bcx.ins().popcnt(val);
624             let res = CValue::by_val(res, arg.layout());
625             ret.write_cvalue(fx, res);
626         }
627         sym::bitreverse => {
628             intrinsic_args!(fx, args => (arg); intrinsic);
629             let val = arg.load_scalar(fx);
630
631             let res = fx.bcx.ins().bitrev(val);
632             let res = CValue::by_val(res, arg.layout());
633             ret.write_cvalue(fx, res);
634         }
635         sym::bswap => {
636             intrinsic_args!(fx, args => (arg); intrinsic);
637             let val = arg.load_scalar(fx);
638
639             let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
640                 val
641             } else {
642                 fx.bcx.ins().bswap(val)
643             };
644             let res = CValue::by_val(res, arg.layout());
645             ret.write_cvalue(fx, res);
646         }
647         sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
648             intrinsic_args!(fx, args => (); intrinsic);
649
650             let layout = fx.layout_of(substs.type_at(0));
651             if layout.abi.is_uninhabited() {
652                 with_no_trimmed_paths!({
653                     crate::base::codegen_panic(
654                         fx,
655                         &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
656                         source_info,
657                     )
658                 });
659                 return;
660             }
661
662             if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
663                 with_no_trimmed_paths!({
664                     crate::base::codegen_panic(
665                         fx,
666                         &format!(
667                             "attempted to zero-initialize type `{}`, which is invalid",
668                             layout.ty
669                         ),
670                         source_info,
671                     );
672                 });
673                 return;
674             }
675
676             if intrinsic == sym::assert_mem_uninitialized_valid
677                 && !fx.tcx.permits_uninit_init(layout)
678             {
679                 with_no_trimmed_paths!({
680                     crate::base::codegen_panic(
681                         fx,
682                         &format!(
683                             "attempted to leave type `{}` uninitialized, which is invalid",
684                             layout.ty
685                         ),
686                         source_info,
687                     )
688                 });
689                 return;
690             }
691         }
692
693         sym::volatile_load | sym::unaligned_volatile_load => {
694             intrinsic_args!(fx, args => (ptr); intrinsic);
695
696             // Cranelift treats loads as volatile by default
697             // FIXME correctly handle unaligned_volatile_load
698             let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
699             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
700             ret.write_cvalue(fx, val);
701         }
702         sym::volatile_store | sym::unaligned_volatile_store => {
703             intrinsic_args!(fx, args => (ptr, val); intrinsic);
704             let ptr = ptr.load_scalar(fx);
705
706             // Cranelift treats stores as volatile by default
707             // FIXME correctly handle unaligned_volatile_store
708             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
709             dest.write_cvalue(fx, val);
710         }
711
712         sym::pref_align_of
713         | sym::needs_drop
714         | sym::type_id
715         | sym::type_name
716         | sym::variant_count => {
717             intrinsic_args!(fx, args => (); intrinsic);
718
719             let const_val =
720                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
721             let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
722             ret.write_cvalue(fx, val);
723         }
724
725         sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
726             intrinsic_args!(fx, args => (ptr, base); intrinsic);
727             let ptr = ptr.load_scalar(fx);
728             let base = base.load_scalar(fx);
729             let ty = substs.type_at(0);
730
731             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
732             let diff_bytes = fx.bcx.ins().isub(ptr, base);
733             // FIXME this can be an exact division.
734             let val = if intrinsic == sym::ptr_offset_from_unsigned {
735                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
736                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
737                 // but unsigned is slightly easier to codegen, so might as well.
738                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
739             } else {
740                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
741                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
742             };
743             ret.write_cvalue(fx, val);
744         }
745
746         sym::ptr_guaranteed_cmp => {
747             intrinsic_args!(fx, args => (a, b); intrinsic);
748
749             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
750             ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
751         }
752
753         sym::caller_location => {
754             intrinsic_args!(fx, args => (); intrinsic);
755
756             let caller_location = fx.get_caller_location(source_info);
757             ret.write_cvalue(fx, caller_location);
758         }
759
760         _ if intrinsic.as_str().starts_with("atomic_fence") => {
761             intrinsic_args!(fx, args => (); intrinsic);
762
763             fx.bcx.ins().fence();
764         }
765         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
766             intrinsic_args!(fx, args => (); intrinsic);
767
768             // FIXME use a compiler fence once Cranelift supports it
769             fx.bcx.ins().fence();
770         }
771         _ if intrinsic.as_str().starts_with("atomic_load") => {
772             intrinsic_args!(fx, args => (ptr); intrinsic);
773             let ptr = ptr.load_scalar(fx);
774
775             let ty = substs.type_at(0);
776             match ty.kind() {
777                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
778                     // FIXME implement 128bit atomics
779                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
780                         // special case for compiler-builtins to avoid having to patch it
781                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
782                         return;
783                     } else {
784                         fx.tcx
785                             .sess
786                             .span_fatal(source_info.span, "128bit atomics not yet supported");
787                     }
788                 }
789                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
790                 _ => {
791                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
792                     return;
793                 }
794             }
795             let clif_ty = fx.clif_type(ty).unwrap();
796
797             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
798
799             let val = CValue::by_val(val, fx.layout_of(ty));
800             ret.write_cvalue(fx, val);
801         }
802         _ if intrinsic.as_str().starts_with("atomic_store") => {
803             intrinsic_args!(fx, args => (ptr, val); intrinsic);
804             let ptr = ptr.load_scalar(fx);
805
806             let ty = substs.type_at(0);
807             match ty.kind() {
808                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
809                     // FIXME implement 128bit atomics
810                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
811                         // special case for compiler-builtins to avoid having to patch it
812                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
813                         return;
814                     } else {
815                         fx.tcx
816                             .sess
817                             .span_fatal(source_info.span, "128bit atomics not yet supported");
818                     }
819                 }
820                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
821                 _ => {
822                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
823                     return;
824                 }
825             }
826
827             let val = val.load_scalar(fx);
828
829             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
830         }
831         _ if intrinsic.as_str().starts_with("atomic_xchg") => {
832             intrinsic_args!(fx, args => (ptr, new); intrinsic);
833             let ptr = ptr.load_scalar(fx);
834
835             let layout = new.layout();
836             match layout.ty.kind() {
837                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
838                 _ => {
839                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
840                     return;
841                 }
842             }
843             let ty = fx.clif_type(layout.ty).unwrap();
844
845             let new = new.load_scalar(fx);
846
847             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
848
849             let old = CValue::by_val(old, layout);
850             ret.write_cvalue(fx, old);
851         }
852         _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
853             // both atomic_cxchg_* and atomic_cxchgweak_*
854             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
855             let ptr = ptr.load_scalar(fx);
856
857             let layout = new.layout();
858             match layout.ty.kind() {
859                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
860                 _ => {
861                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
862                     return;
863                 }
864             }
865
866             let test_old = test_old.load_scalar(fx);
867             let new = new.load_scalar(fx);
868
869             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
870             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
871
872             let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
873             ret.write_cvalue(fx, ret_val)
874         }
875
876         _ if intrinsic.as_str().starts_with("atomic_xadd") => {
877             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
878             let ptr = ptr.load_scalar(fx);
879
880             let layout = amount.layout();
881             match layout.ty.kind() {
882                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
883                 _ => {
884                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
885                     return;
886                 }
887             }
888             let ty = fx.clif_type(layout.ty).unwrap();
889
890             let amount = amount.load_scalar(fx);
891
892             let old =
893                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
894
895             let old = CValue::by_val(old, layout);
896             ret.write_cvalue(fx, old);
897         }
898         _ if intrinsic.as_str().starts_with("atomic_xsub") => {
899             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
900             let ptr = ptr.load_scalar(fx);
901
902             let layout = amount.layout();
903             match layout.ty.kind() {
904                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
905                 _ => {
906                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
907                     return;
908                 }
909             }
910             let ty = fx.clif_type(layout.ty).unwrap();
911
912             let amount = amount.load_scalar(fx);
913
914             let old =
915                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
916
917             let old = CValue::by_val(old, layout);
918             ret.write_cvalue(fx, old);
919         }
920         _ if intrinsic.as_str().starts_with("atomic_and") => {
921             intrinsic_args!(fx, args => (ptr, src); intrinsic);
922             let ptr = ptr.load_scalar(fx);
923
924             let layout = src.layout();
925             match layout.ty.kind() {
926                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
927                 _ => {
928                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
929                     return;
930                 }
931             }
932             let ty = fx.clif_type(layout.ty).unwrap();
933
934             let src = src.load_scalar(fx);
935
936             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
937
938             let old = CValue::by_val(old, layout);
939             ret.write_cvalue(fx, old);
940         }
941         _ if intrinsic.as_str().starts_with("atomic_or") => {
942             intrinsic_args!(fx, args => (ptr, src); intrinsic);
943             let ptr = ptr.load_scalar(fx);
944
945             let layout = src.layout();
946             match layout.ty.kind() {
947                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
948                 _ => {
949                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
950                     return;
951                 }
952             }
953             let ty = fx.clif_type(layout.ty).unwrap();
954
955             let src = src.load_scalar(fx);
956
957             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
958
959             let old = CValue::by_val(old, layout);
960             ret.write_cvalue(fx, old);
961         }
962         _ if intrinsic.as_str().starts_with("atomic_xor") => {
963             intrinsic_args!(fx, args => (ptr, src); intrinsic);
964             let ptr = ptr.load_scalar(fx);
965
966             let layout = src.layout();
967             match layout.ty.kind() {
968                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
969                 _ => {
970                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
971                     return;
972                 }
973             }
974             let ty = fx.clif_type(layout.ty).unwrap();
975
976             let src = src.load_scalar(fx);
977
978             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
979
980             let old = CValue::by_val(old, layout);
981             ret.write_cvalue(fx, old);
982         }
983         _ if intrinsic.as_str().starts_with("atomic_nand") => {
984             intrinsic_args!(fx, args => (ptr, src); intrinsic);
985             let ptr = ptr.load_scalar(fx);
986
987             let layout = src.layout();
988             match layout.ty.kind() {
989                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
990                 _ => {
991                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
992                     return;
993                 }
994             }
995             let ty = fx.clif_type(layout.ty).unwrap();
996
997             let src = src.load_scalar(fx);
998
999             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1000
1001             let old = CValue::by_val(old, layout);
1002             ret.write_cvalue(fx, old);
1003         }
1004         _ if intrinsic.as_str().starts_with("atomic_max") => {
1005             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1006             let ptr = ptr.load_scalar(fx);
1007
1008             let layout = src.layout();
1009             match layout.ty.kind() {
1010                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1011                 _ => {
1012                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1013                     return;
1014                 }
1015             }
1016             let ty = fx.clif_type(layout.ty).unwrap();
1017
1018             let src = src.load_scalar(fx);
1019
1020             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1021
1022             let old = CValue::by_val(old, layout);
1023             ret.write_cvalue(fx, old);
1024         }
1025         _ if intrinsic.as_str().starts_with("atomic_umax") => {
1026             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1027             let ptr = ptr.load_scalar(fx);
1028
1029             let layout = src.layout();
1030             match layout.ty.kind() {
1031                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1032                 _ => {
1033                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1034                     return;
1035                 }
1036             }
1037             let ty = fx.clif_type(layout.ty).unwrap();
1038
1039             let src = src.load_scalar(fx);
1040
1041             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1042
1043             let old = CValue::by_val(old, layout);
1044             ret.write_cvalue(fx, old);
1045         }
1046         _ if intrinsic.as_str().starts_with("atomic_min") => {
1047             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1048             let ptr = ptr.load_scalar(fx);
1049
1050             let layout = src.layout();
1051             match layout.ty.kind() {
1052                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1053                 _ => {
1054                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1055                     return;
1056                 }
1057             }
1058             let ty = fx.clif_type(layout.ty).unwrap();
1059
1060             let src = src.load_scalar(fx);
1061
1062             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1063
1064             let old = CValue::by_val(old, layout);
1065             ret.write_cvalue(fx, old);
1066         }
1067         _ if intrinsic.as_str().starts_with("atomic_umin") => {
1068             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1069             let ptr = ptr.load_scalar(fx);
1070
1071             let layout = src.layout();
1072             match layout.ty.kind() {
1073                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1074                 _ => {
1075                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1076                     return;
1077                 }
1078             }
1079             let ty = fx.clif_type(layout.ty).unwrap();
1080
1081             let src = src.load_scalar(fx);
1082
1083             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1084
1085             let old = CValue::by_val(old, layout);
1086             ret.write_cvalue(fx, old);
1087         }
1088
1089         sym::minnumf32 => {
1090             intrinsic_args!(fx, args => (a, b); intrinsic);
1091             let a = a.load_scalar(fx);
1092             let b = b.load_scalar(fx);
1093
1094             let val = crate::num::codegen_float_min(fx, a, b);
1095             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1096             ret.write_cvalue(fx, val);
1097         }
1098         sym::minnumf64 => {
1099             intrinsic_args!(fx, args => (a, b); intrinsic);
1100             let a = a.load_scalar(fx);
1101             let b = b.load_scalar(fx);
1102
1103             let val = crate::num::codegen_float_min(fx, a, b);
1104             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1105             ret.write_cvalue(fx, val);
1106         }
1107         sym::maxnumf32 => {
1108             intrinsic_args!(fx, args => (a, b); intrinsic);
1109             let a = a.load_scalar(fx);
1110             let b = b.load_scalar(fx);
1111
1112             let val = crate::num::codegen_float_max(fx, a, b);
1113             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1114             ret.write_cvalue(fx, val);
1115         }
1116         sym::maxnumf64 => {
1117             intrinsic_args!(fx, args => (a, b); intrinsic);
1118             let a = a.load_scalar(fx);
1119             let b = b.load_scalar(fx);
1120
1121             let val = crate::num::codegen_float_max(fx, a, b);
1122             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1123             ret.write_cvalue(fx, val);
1124         }
1125
1126         kw::Try => {
1127             intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1128             let f = f.load_scalar(fx);
1129             let data = data.load_scalar(fx);
1130             let _catch_fn = catch_fn.load_scalar(fx);
1131
1132             // FIXME once unwinding is supported, change this to actually catch panics
1133             let f_sig = fx.bcx.func.import_signature(Signature {
1134                 call_conv: fx.target_config.default_call_conv,
1135                 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
1136                 returns: vec![],
1137             });
1138
1139             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1140
1141             let layout = ret.layout();
1142             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1143             ret.write_cvalue(fx, ret_val);
1144         }
1145
1146         sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1147             intrinsic_args!(fx, args => (x, y); intrinsic);
1148
1149             let res = crate::num::codegen_float_binop(
1150                 fx,
1151                 match intrinsic {
1152                     sym::fadd_fast => BinOp::Add,
1153                     sym::fsub_fast => BinOp::Sub,
1154                     sym::fmul_fast => BinOp::Mul,
1155                     sym::fdiv_fast => BinOp::Div,
1156                     sym::frem_fast => BinOp::Rem,
1157                     _ => unreachable!(),
1158                 },
1159                 x,
1160                 y,
1161             );
1162             ret.write_cvalue(fx, res);
1163         }
1164         sym::float_to_int_unchecked => {
1165             intrinsic_args!(fx, args => (f); intrinsic);
1166             let f = f.load_scalar(fx);
1167
1168             let res = crate::cast::clif_int_or_float_cast(
1169                 fx,
1170                 f,
1171                 false,
1172                 fx.clif_type(ret.layout().ty).unwrap(),
1173                 type_sign(ret.layout().ty),
1174             );
1175             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1176         }
1177
1178         sym::raw_eq => {
1179             intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1180             let lhs_ref = lhs_ref.load_scalar(fx);
1181             let rhs_ref = rhs_ref.load_scalar(fx);
1182
1183             let size = fx.layout_of(substs.type_at(0)).layout.size();
1184             // FIXME add and use emit_small_memcmp
1185             let is_eq_value = if size == Size::ZERO {
1186                 // No bytes means they're trivially equal
1187                 fx.bcx.ins().iconst(types::I8, 1)
1188             } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1189                 // Can't use `trusted` for these loads; they could be unaligned.
1190                 let mut flags = MemFlags::new();
1191                 flags.set_notrap();
1192                 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1193                 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1194                 fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
1195             } else {
1196                 // Just call `memcmp` (like slices do in core) when the
1197                 // size is too large or it's not a power-of-two.
1198                 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1199                 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1200                 let params = vec![AbiParam::new(fx.pointer_type); 3];
1201                 let returns = vec![AbiParam::new(types::I32)];
1202                 let args = &[lhs_ref, rhs_ref, bytes_val];
1203                 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1204                 fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
1205             };
1206             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1207         }
1208
1209         sym::const_allocate => {
1210             intrinsic_args!(fx, args => (_size, _align); intrinsic);
1211
1212             // returns a null pointer at runtime.
1213             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1214             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1215         }
1216
1217         sym::const_deallocate => {
1218             intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1219             // nop at runtime.
1220         }
1221
1222         sym::black_box => {
1223             intrinsic_args!(fx, args => (a); intrinsic);
1224
1225             // FIXME implement black_box semantics
1226             ret.write_cvalue(fx, a);
1227         }
1228
1229         // FIXME implement variadics in cranelift
1230         sym::va_copy | sym::va_arg | sym::va_end => {
1231             fx.tcx.sess.span_fatal(
1232                 source_info.span,
1233                 "Defining variadic functions is not yet supported by Cranelift",
1234             );
1235         }
1236
1237         _ => {
1238             fx.tcx
1239                 .sess
1240                 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1241         }
1242     }
1243
1244     let ret_block = fx.get_block(destination.unwrap());
1245     fx.bcx.ins().jump(ret_block, &[]);
1246 }