]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
Rollup merge of #105682 - thomcc:expose-ptr-fmt, r=RalfJung
[rust.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 macro_rules! intrinsic_args {
5     ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6         #[allow(unused_parens)]
7         let ($($arg),*) = if let [$($arg),*] = $args {
8             ($(codegen_operand($fx, $arg)),*)
9         } else {
10             $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11         };
12     }
13 }
14
15 mod cpuid;
16 mod llvm;
17 mod llvm_aarch64;
18 mod llvm_x86;
19 mod simd;
20
21 pub(crate) use cpuid::codegen_cpuid_call;
22 pub(crate) use llvm::codegen_llvm_intrinsic_call;
23
24 use rustc_middle::ty::print::with_no_trimmed_paths;
25 use rustc_middle::ty::subst::SubstsRef;
26 use rustc_span::symbol::{kw, sym, Symbol};
27
28 use crate::prelude::*;
29 use cranelift_codegen::ir::AtomicRmwOp;
30
31 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
32     bug!("wrong number of args for intrinsic {}", intrinsic);
33 }
34
35 fn report_atomic_type_validation_error<'tcx>(
36     fx: &mut FunctionCx<'_, '_, 'tcx>,
37     intrinsic: Symbol,
38     span: Span,
39     ty: Ty<'tcx>,
40 ) {
41     fx.tcx.sess.span_err(
42         span,
43         &format!(
44             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
45             intrinsic, ty
46         ),
47     );
48     // Prevent verifier error
49     fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
50 }
51
52 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
53     let (element, count) = match layout.abi {
54         Abi::Vector { element, count } => (element, count),
55         _ => unreachable!(),
56     };
57
58     match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
59         // Cranelift currently only implements icmp for 128bit vectors.
60         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
61         _ => None,
62     }
63 }
64
65 fn simd_for_each_lane<'tcx>(
66     fx: &mut FunctionCx<'_, '_, 'tcx>,
67     val: CValue<'tcx>,
68     ret: CPlace<'tcx>,
69     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
70 ) {
71     let layout = val.layout();
72
73     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
74     let lane_layout = fx.layout_of(lane_ty);
75     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
76     let ret_lane_layout = fx.layout_of(ret_lane_ty);
77     assert_eq!(lane_count, ret_lane_count);
78
79     for lane_idx in 0..lane_count {
80         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
81
82         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
83         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
84
85         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
86     }
87 }
88
89 fn simd_pair_for_each_lane_typed<'tcx>(
90     fx: &mut FunctionCx<'_, '_, 'tcx>,
91     x: CValue<'tcx>,
92     y: CValue<'tcx>,
93     ret: CPlace<'tcx>,
94     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
95 ) {
96     assert_eq!(x.layout(), y.layout());
97     let layout = x.layout();
98
99     let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
100     let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
101     assert_eq!(lane_count, ret_lane_count);
102
103     for lane_idx in 0..lane_count {
104         let x_lane = x.value_lane(fx, lane_idx);
105         let y_lane = y.value_lane(fx, lane_idx);
106
107         let res_lane = f(fx, x_lane, y_lane);
108
109         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
110     }
111 }
112
113 fn simd_pair_for_each_lane<'tcx>(
114     fx: &mut FunctionCx<'_, '_, 'tcx>,
115     x: CValue<'tcx>,
116     y: CValue<'tcx>,
117     ret: CPlace<'tcx>,
118     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
119 ) {
120     assert_eq!(x.layout(), y.layout());
121     let layout = x.layout();
122
123     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
124     let lane_layout = fx.layout_of(lane_ty);
125     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
126     let ret_lane_layout = fx.layout_of(ret_lane_ty);
127     assert_eq!(lane_count, ret_lane_count);
128
129     for lane_idx in 0..lane_count {
130         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
131         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
132
133         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
134         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
135
136         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
137     }
138 }
139
140 fn simd_reduce<'tcx>(
141     fx: &mut FunctionCx<'_, '_, 'tcx>,
142     val: CValue<'tcx>,
143     acc: Option<Value>,
144     ret: CPlace<'tcx>,
145     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
146 ) {
147     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
148     let lane_layout = fx.layout_of(lane_ty);
149     assert_eq!(lane_layout, ret.layout());
150
151     let (mut res_val, start_lane) =
152         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
153     for lane_idx in start_lane..lane_count {
154         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
155         res_val = f(fx, lane_layout.ty, res_val, lane);
156     }
157     let res = CValue::by_val(res_val, lane_layout);
158     ret.write_cvalue(fx, res);
159 }
160
161 // FIXME move all uses to `simd_reduce`
162 fn simd_reduce_bool<'tcx>(
163     fx: &mut FunctionCx<'_, '_, 'tcx>,
164     val: CValue<'tcx>,
165     ret: CPlace<'tcx>,
166     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
167 ) {
168     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
169     assert!(ret.layout().ty.is_bool());
170
171     let res_val = val.value_lane(fx, 0).load_scalar(fx);
172     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
173     for lane_idx in 1..lane_count {
174         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
175         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
176         res_val = f(fx, res_val, lane);
177     }
178     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
179         fx.bcx.ins().ireduce(types::I8, res_val)
180     } else {
181         res_val
182     };
183     let res = CValue::by_val(res_val, ret.layout());
184     ret.write_cvalue(fx, res);
185 }
186
187 fn bool_to_zero_or_max_uint<'tcx>(
188     fx: &mut FunctionCx<'_, '_, 'tcx>,
189     ty: Ty<'tcx>,
190     val: Value,
191 ) -> Value {
192     let ty = fx.clif_type(ty).unwrap();
193
194     let int_ty = match ty {
195         types::F32 => types::I32,
196         types::F64 => types::I64,
197         ty => ty,
198     };
199
200     let mut res = fx.bcx.ins().bmask(int_ty, val);
201
202     if ty.is_float() {
203         res = fx.bcx.ins().bitcast(ty, res);
204     }
205
206     res
207 }
208
209 pub(crate) fn codegen_intrinsic_call<'tcx>(
210     fx: &mut FunctionCx<'_, '_, 'tcx>,
211     instance: Instance<'tcx>,
212     args: &[mir::Operand<'tcx>],
213     destination: CPlace<'tcx>,
214     target: Option<BasicBlock>,
215     source_info: mir::SourceInfo,
216 ) {
217     let intrinsic = fx.tcx.item_name(instance.def_id());
218     let substs = instance.substs;
219
220     let target = if let Some(target) = target {
221         target
222     } else {
223         // Insert non returning intrinsics here
224         match intrinsic {
225             sym::abort => {
226                 fx.bcx.ins().trap(TrapCode::User(0));
227             }
228             sym::transmute => {
229                 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
230             }
231             _ => unimplemented!("unsupported intrinsic {}", intrinsic),
232         }
233         return;
234     };
235
236     if intrinsic.as_str().starts_with("simd_") {
237         self::simd::codegen_simd_intrinsic_call(
238             fx,
239             intrinsic,
240             substs,
241             args,
242             destination,
243             source_info.span,
244         );
245         let ret_block = fx.get_block(target);
246         fx.bcx.ins().jump(ret_block, &[]);
247     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
248         let ret_block = fx.get_block(target);
249         fx.bcx.ins().jump(ret_block, &[]);
250     } else {
251         codegen_regular_intrinsic_call(
252             fx,
253             instance,
254             intrinsic,
255             substs,
256             args,
257             destination,
258             Some(target),
259             source_info,
260         );
261     }
262 }
263
264 fn codegen_float_intrinsic_call<'tcx>(
265     fx: &mut FunctionCx<'_, '_, 'tcx>,
266     intrinsic: Symbol,
267     args: &[mir::Operand<'tcx>],
268     ret: CPlace<'tcx>,
269 ) -> bool {
270     let (name, arg_count, ty) = match intrinsic {
271         sym::expf32 => ("expf", 1, fx.tcx.types.f32),
272         sym::expf64 => ("exp", 1, fx.tcx.types.f64),
273         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
274         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
275         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
276         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
277         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
278         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
279         sym::powf32 => ("powf", 2, fx.tcx.types.f32),
280         sym::powf64 => ("pow", 2, fx.tcx.types.f64),
281         sym::logf32 => ("logf", 1, fx.tcx.types.f32),
282         sym::logf64 => ("log", 1, fx.tcx.types.f64),
283         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
284         sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
285         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
286         sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
287         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
288         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
289         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
290         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
291         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
292         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
293         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
294         sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
295         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
296         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
297         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
298         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
299         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
300         sym::roundf64 => ("round", 1, fx.tcx.types.f64),
301         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
302         sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
303         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
304         sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
305         _ => return false,
306     };
307
308     if args.len() != arg_count {
309         bug!("wrong number of args for intrinsic {:?}", intrinsic);
310     }
311
312     let (a, b, c);
313     let args = match args {
314         [x] => {
315             a = [codegen_operand(fx, x)];
316             &a as &[_]
317         }
318         [x, y] => {
319             b = [codegen_operand(fx, x), codegen_operand(fx, y)];
320             &b
321         }
322         [x, y, z] => {
323             c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
324             &c
325         }
326         _ => unreachable!(),
327     };
328
329     let layout = fx.layout_of(ty);
330     let res = match intrinsic {
331         sym::fmaf32 | sym::fmaf64 => {
332             let a = args[0].load_scalar(fx);
333             let b = args[1].load_scalar(fx);
334             let c = args[2].load_scalar(fx);
335             CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
336         }
337         sym::copysignf32 | sym::copysignf64 => {
338             let a = args[0].load_scalar(fx);
339             let b = args[1].load_scalar(fx);
340             CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
341         }
342         sym::fabsf32
343         | sym::fabsf64
344         | sym::floorf32
345         | sym::floorf64
346         | sym::ceilf32
347         | sym::ceilf64
348         | sym::truncf32
349         | sym::truncf64 => {
350             let a = args[0].load_scalar(fx);
351
352             let val = match intrinsic {
353                 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
354                 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
355                 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
356                 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
357                 _ => unreachable!(),
358             };
359
360             CValue::by_val(val, layout)
361         }
362         // These intrinsics aren't supported natively by Cranelift.
363         // Lower them to a libcall.
364         _ => fx.easy_call(name, &args, ty),
365     };
366
367     ret.write_cvalue(fx, res);
368
369     true
370 }
371
372 fn codegen_regular_intrinsic_call<'tcx>(
373     fx: &mut FunctionCx<'_, '_, 'tcx>,
374     instance: Instance<'tcx>,
375     intrinsic: Symbol,
376     substs: SubstsRef<'tcx>,
377     args: &[mir::Operand<'tcx>],
378     ret: CPlace<'tcx>,
379     destination: Option<BasicBlock>,
380     source_info: mir::SourceInfo,
381 ) {
382     let usize_layout = fx.layout_of(fx.tcx.types.usize);
383
384     match intrinsic {
385         sym::likely | sym::unlikely => {
386             intrinsic_args!(fx, args => (a); intrinsic);
387
388             ret.write_cvalue(fx, a);
389         }
390         sym::breakpoint => {
391             intrinsic_args!(fx, args => (); intrinsic);
392
393             fx.bcx.ins().debugtrap();
394         }
395         sym::copy | sym::copy_nonoverlapping => {
396             intrinsic_args!(fx, args => (src, dst, count); intrinsic);
397             let src = src.load_scalar(fx);
398             let dst = dst.load_scalar(fx);
399             let count = count.load_scalar(fx);
400
401             let elem_ty = substs.type_at(0);
402             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
403             assert_eq!(args.len(), 3);
404             let byte_amount =
405                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
406
407             if intrinsic == sym::copy_nonoverlapping {
408                 // FIXME emit_small_memcpy
409                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
410             } else {
411                 // FIXME emit_small_memmove
412                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
413             }
414         }
415         sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
416             // NOTE: the volatile variants have src and dst swapped
417             intrinsic_args!(fx, args => (dst, src, count); intrinsic);
418             let dst = dst.load_scalar(fx);
419             let src = src.load_scalar(fx);
420             let count = count.load_scalar(fx);
421
422             let elem_ty = substs.type_at(0);
423             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
424             assert_eq!(args.len(), 3);
425             let byte_amount =
426                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
427
428             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
429             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
430                 // FIXME emit_small_memcpy
431                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
432             } else {
433                 // FIXME emit_small_memmove
434                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
435             }
436         }
437         sym::size_of_val => {
438             intrinsic_args!(fx, args => (ptr); intrinsic);
439
440             let layout = fx.layout_of(substs.type_at(0));
441             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
442             // branch
443             let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
444                 let (_ptr, info) = ptr.load_scalar_pair(fx);
445                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
446                 size
447             } else {
448                 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
449             };
450             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
451         }
452         sym::min_align_of_val => {
453             intrinsic_args!(fx, args => (ptr); intrinsic);
454
455             let layout = fx.layout_of(substs.type_at(0));
456             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
457             // branch
458             let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
459                 let (_ptr, info) = ptr.load_scalar_pair(fx);
460                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
461                 align
462             } else {
463                 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
464             };
465             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
466         }
467
468         sym::vtable_size => {
469             intrinsic_args!(fx, args => (vtable); intrinsic);
470             let vtable = vtable.load_scalar(fx);
471
472             let size = crate::vtable::size_of_obj(fx, vtable);
473             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
474         }
475
476         sym::vtable_align => {
477             intrinsic_args!(fx, args => (vtable); intrinsic);
478             let vtable = vtable.load_scalar(fx);
479
480             let align = crate::vtable::min_align_of_obj(fx, vtable);
481             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
482         }
483
484         sym::unchecked_add
485         | sym::unchecked_sub
486         | sym::unchecked_mul
487         | sym::unchecked_div
488         | sym::exact_div
489         | sym::unchecked_rem
490         | sym::unchecked_shl
491         | sym::unchecked_shr => {
492             intrinsic_args!(fx, args => (x, y); intrinsic);
493
494             // FIXME trap on overflow
495             let bin_op = match intrinsic {
496                 sym::unchecked_add => BinOp::Add,
497                 sym::unchecked_sub => BinOp::Sub,
498                 sym::unchecked_mul => BinOp::Mul,
499                 sym::unchecked_div | sym::exact_div => BinOp::Div,
500                 sym::unchecked_rem => BinOp::Rem,
501                 sym::unchecked_shl => BinOp::Shl,
502                 sym::unchecked_shr => BinOp::Shr,
503                 _ => unreachable!(),
504             };
505             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
506             ret.write_cvalue(fx, res);
507         }
508         sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
509             intrinsic_args!(fx, args => (x, y); intrinsic);
510
511             assert_eq!(x.layout().ty, y.layout().ty);
512             let bin_op = match intrinsic {
513                 sym::add_with_overflow => BinOp::Add,
514                 sym::sub_with_overflow => BinOp::Sub,
515                 sym::mul_with_overflow => BinOp::Mul,
516                 _ => unreachable!(),
517             };
518
519             let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
520             ret.write_cvalue(fx, res);
521         }
522         sym::saturating_add | sym::saturating_sub => {
523             intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
524
525             assert_eq!(lhs.layout().ty, rhs.layout().ty);
526             let bin_op = match intrinsic {
527                 sym::saturating_add => BinOp::Add,
528                 sym::saturating_sub => BinOp::Sub,
529                 _ => unreachable!(),
530             };
531
532             let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
533             ret.write_cvalue(fx, res);
534         }
535         sym::rotate_left => {
536             intrinsic_args!(fx, args => (x, y); intrinsic);
537             let y = y.load_scalar(fx);
538
539             let layout = x.layout();
540             let x = x.load_scalar(fx);
541             let res = fx.bcx.ins().rotl(x, y);
542             ret.write_cvalue(fx, CValue::by_val(res, layout));
543         }
544         sym::rotate_right => {
545             intrinsic_args!(fx, args => (x, y); intrinsic);
546             let y = y.load_scalar(fx);
547
548             let layout = x.layout();
549             let x = x.load_scalar(fx);
550             let res = fx.bcx.ins().rotr(x, y);
551             ret.write_cvalue(fx, CValue::by_val(res, layout));
552         }
553
554         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
555         // doesn't have UB both are codegen'ed the same way
556         sym::offset | sym::arith_offset => {
557             intrinsic_args!(fx, args => (base, offset); intrinsic);
558             let offset = offset.load_scalar(fx);
559
560             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
561             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
562             let ptr_diff = if pointee_size != 1 {
563                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
564             } else {
565                 offset
566             };
567             let base_val = base.load_scalar(fx);
568             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
569             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
570         }
571
572         sym::ptr_mask => {
573             intrinsic_args!(fx, args => (ptr, mask); intrinsic);
574             let ptr = ptr.load_scalar(fx);
575             let mask = mask.load_scalar(fx);
576             fx.bcx.ins().band(ptr, mask);
577         }
578
579         sym::transmute => {
580             intrinsic_args!(fx, args => (from); intrinsic);
581
582             ret.write_cvalue_transmute(fx, from);
583         }
584         sym::write_bytes | sym::volatile_set_memory => {
585             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
586             let val = val.load_scalar(fx);
587             let count = count.load_scalar(fx);
588
589             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
590             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
591             let count = if pointee_size != 1 {
592                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
593             } else {
594                 count
595             };
596             let dst_ptr = dst.load_scalar(fx);
597             // FIXME make the memset actually volatile when switching to emit_small_memset
598             // FIXME use emit_small_memset
599             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
600         }
601         sym::ctlz | sym::ctlz_nonzero => {
602             intrinsic_args!(fx, args => (arg); intrinsic);
603             let val = arg.load_scalar(fx);
604
605             // FIXME trap on `ctlz_nonzero` with zero arg.
606             let res = fx.bcx.ins().clz(val);
607             let res = CValue::by_val(res, arg.layout());
608             ret.write_cvalue(fx, res);
609         }
610         sym::cttz | sym::cttz_nonzero => {
611             intrinsic_args!(fx, args => (arg); intrinsic);
612             let val = arg.load_scalar(fx);
613
614             // FIXME trap on `cttz_nonzero` with zero arg.
615             let res = fx.bcx.ins().ctz(val);
616             let res = CValue::by_val(res, arg.layout());
617             ret.write_cvalue(fx, res);
618         }
619         sym::ctpop => {
620             intrinsic_args!(fx, args => (arg); intrinsic);
621             let val = arg.load_scalar(fx);
622
623             let res = fx.bcx.ins().popcnt(val);
624             let res = CValue::by_val(res, arg.layout());
625             ret.write_cvalue(fx, res);
626         }
627         sym::bitreverse => {
628             intrinsic_args!(fx, args => (arg); intrinsic);
629             let val = arg.load_scalar(fx);
630
631             let res = fx.bcx.ins().bitrev(val);
632             let res = CValue::by_val(res, arg.layout());
633             ret.write_cvalue(fx, res);
634         }
635         sym::bswap => {
636             intrinsic_args!(fx, args => (arg); intrinsic);
637             let val = arg.load_scalar(fx);
638
639             let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
640                 val
641             } else {
642                 fx.bcx.ins().bswap(val)
643             };
644             let res = CValue::by_val(res, arg.layout());
645             ret.write_cvalue(fx, res);
646         }
647         sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
648             intrinsic_args!(fx, args => (); intrinsic);
649
650             let layout = fx.layout_of(substs.type_at(0));
651             if layout.abi.is_uninhabited() {
652                 with_no_trimmed_paths!({
653                     crate::base::codegen_panic(
654                         fx,
655                         &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
656                         source_info,
657                     )
658                 });
659                 return;
660             }
661
662             if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
663                 with_no_trimmed_paths!({
664                     crate::base::codegen_panic(
665                         fx,
666                         &format!(
667                             "attempted to zero-initialize type `{}`, which is invalid",
668                             layout.ty
669                         ),
670                         source_info,
671                     );
672                 });
673                 return;
674             }
675
676             if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
677                 with_no_trimmed_paths!({
678                     crate::base::codegen_panic(
679                         fx,
680                         &format!(
681                             "attempted to leave type `{}` uninitialized, which is invalid",
682                             layout.ty
683                         ),
684                         source_info,
685                     )
686                 });
687                 return;
688             }
689         }
690
691         sym::volatile_load | sym::unaligned_volatile_load => {
692             intrinsic_args!(fx, args => (ptr); intrinsic);
693
694             // Cranelift treats loads as volatile by default
695             // FIXME correctly handle unaligned_volatile_load
696             let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
697             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
698             ret.write_cvalue(fx, val);
699         }
700         sym::volatile_store | sym::unaligned_volatile_store => {
701             intrinsic_args!(fx, args => (ptr, val); intrinsic);
702             let ptr = ptr.load_scalar(fx);
703
704             // Cranelift treats stores as volatile by default
705             // FIXME correctly handle unaligned_volatile_store
706             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
707             dest.write_cvalue(fx, val);
708         }
709
710         sym::pref_align_of
711         | sym::needs_drop
712         | sym::type_id
713         | sym::type_name
714         | sym::variant_count => {
715             intrinsic_args!(fx, args => (); intrinsic);
716
717             let const_val =
718                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
719             let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
720             ret.write_cvalue(fx, val);
721         }
722
723         sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
724             intrinsic_args!(fx, args => (ptr, base); intrinsic);
725             let ptr = ptr.load_scalar(fx);
726             let base = base.load_scalar(fx);
727             let ty = substs.type_at(0);
728
729             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
730             let diff_bytes = fx.bcx.ins().isub(ptr, base);
731             // FIXME this can be an exact division.
732             let val = if intrinsic == sym::ptr_offset_from_unsigned {
733                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
734                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
735                 // but unsigned is slightly easier to codegen, so might as well.
736                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
737             } else {
738                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
739                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
740             };
741             ret.write_cvalue(fx, val);
742         }
743
744         sym::ptr_guaranteed_cmp => {
745             intrinsic_args!(fx, args => (a, b); intrinsic);
746
747             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
748             ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
749         }
750
751         sym::caller_location => {
752             intrinsic_args!(fx, args => (); intrinsic);
753
754             let caller_location = fx.get_caller_location(source_info);
755             ret.write_cvalue(fx, caller_location);
756         }
757
758         _ if intrinsic.as_str().starts_with("atomic_fence") => {
759             intrinsic_args!(fx, args => (); intrinsic);
760
761             fx.bcx.ins().fence();
762         }
763         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
764             intrinsic_args!(fx, args => (); intrinsic);
765
766             // FIXME use a compiler fence once Cranelift supports it
767             fx.bcx.ins().fence();
768         }
769         _ if intrinsic.as_str().starts_with("atomic_load") => {
770             intrinsic_args!(fx, args => (ptr); intrinsic);
771             let ptr = ptr.load_scalar(fx);
772
773             let ty = substs.type_at(0);
774             match ty.kind() {
775                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
776                     // FIXME implement 128bit atomics
777                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
778                         // special case for compiler-builtins to avoid having to patch it
779                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
780                         return;
781                     } else {
782                         fx.tcx
783                             .sess
784                             .span_fatal(source_info.span, "128bit atomics not yet supported");
785                     }
786                 }
787                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
788                 _ => {
789                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
790                     return;
791                 }
792             }
793             let clif_ty = fx.clif_type(ty).unwrap();
794
795             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
796
797             let val = CValue::by_val(val, fx.layout_of(ty));
798             ret.write_cvalue(fx, val);
799         }
800         _ if intrinsic.as_str().starts_with("atomic_store") => {
801             intrinsic_args!(fx, args => (ptr, val); intrinsic);
802             let ptr = ptr.load_scalar(fx);
803
804             let ty = substs.type_at(0);
805             match ty.kind() {
806                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
807                     // FIXME implement 128bit atomics
808                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
809                         // special case for compiler-builtins to avoid having to patch it
810                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
811                         return;
812                     } else {
813                         fx.tcx
814                             .sess
815                             .span_fatal(source_info.span, "128bit atomics not yet supported");
816                     }
817                 }
818                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
819                 _ => {
820                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
821                     return;
822                 }
823             }
824
825             let val = val.load_scalar(fx);
826
827             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
828         }
829         _ if intrinsic.as_str().starts_with("atomic_xchg") => {
830             intrinsic_args!(fx, args => (ptr, new); intrinsic);
831             let ptr = ptr.load_scalar(fx);
832
833             let layout = new.layout();
834             match layout.ty.kind() {
835                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
836                 _ => {
837                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
838                     return;
839                 }
840             }
841             let ty = fx.clif_type(layout.ty).unwrap();
842
843             let new = new.load_scalar(fx);
844
845             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
846
847             let old = CValue::by_val(old, layout);
848             ret.write_cvalue(fx, old);
849         }
850         _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
851             // both atomic_cxchg_* and atomic_cxchgweak_*
852             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
853             let ptr = ptr.load_scalar(fx);
854
855             let layout = new.layout();
856             match layout.ty.kind() {
857                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
858                 _ => {
859                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
860                     return;
861                 }
862             }
863
864             let test_old = test_old.load_scalar(fx);
865             let new = new.load_scalar(fx);
866
867             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
868             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
869
870             let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
871             ret.write_cvalue(fx, ret_val)
872         }
873
874         _ if intrinsic.as_str().starts_with("atomic_xadd") => {
875             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
876             let ptr = ptr.load_scalar(fx);
877
878             let layout = amount.layout();
879             match layout.ty.kind() {
880                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
881                 _ => {
882                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
883                     return;
884                 }
885             }
886             let ty = fx.clif_type(layout.ty).unwrap();
887
888             let amount = amount.load_scalar(fx);
889
890             let old =
891                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
892
893             let old = CValue::by_val(old, layout);
894             ret.write_cvalue(fx, old);
895         }
896         _ if intrinsic.as_str().starts_with("atomic_xsub") => {
897             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
898             let ptr = ptr.load_scalar(fx);
899
900             let layout = amount.layout();
901             match layout.ty.kind() {
902                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
903                 _ => {
904                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
905                     return;
906                 }
907             }
908             let ty = fx.clif_type(layout.ty).unwrap();
909
910             let amount = amount.load_scalar(fx);
911
912             let old =
913                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
914
915             let old = CValue::by_val(old, layout);
916             ret.write_cvalue(fx, old);
917         }
918         _ if intrinsic.as_str().starts_with("atomic_and") => {
919             intrinsic_args!(fx, args => (ptr, src); intrinsic);
920             let ptr = ptr.load_scalar(fx);
921
922             let layout = src.layout();
923             match layout.ty.kind() {
924                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
925                 _ => {
926                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
927                     return;
928                 }
929             }
930             let ty = fx.clif_type(layout.ty).unwrap();
931
932             let src = src.load_scalar(fx);
933
934             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
935
936             let old = CValue::by_val(old, layout);
937             ret.write_cvalue(fx, old);
938         }
939         _ if intrinsic.as_str().starts_with("atomic_or") => {
940             intrinsic_args!(fx, args => (ptr, src); intrinsic);
941             let ptr = ptr.load_scalar(fx);
942
943             let layout = src.layout();
944             match layout.ty.kind() {
945                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
946                 _ => {
947                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
948                     return;
949                 }
950             }
951             let ty = fx.clif_type(layout.ty).unwrap();
952
953             let src = src.load_scalar(fx);
954
955             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
956
957             let old = CValue::by_val(old, layout);
958             ret.write_cvalue(fx, old);
959         }
960         _ if intrinsic.as_str().starts_with("atomic_xor") => {
961             intrinsic_args!(fx, args => (ptr, src); intrinsic);
962             let ptr = ptr.load_scalar(fx);
963
964             let layout = src.layout();
965             match layout.ty.kind() {
966                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
967                 _ => {
968                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
969                     return;
970                 }
971             }
972             let ty = fx.clif_type(layout.ty).unwrap();
973
974             let src = src.load_scalar(fx);
975
976             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
977
978             let old = CValue::by_val(old, layout);
979             ret.write_cvalue(fx, old);
980         }
981         _ if intrinsic.as_str().starts_with("atomic_nand") => {
982             intrinsic_args!(fx, args => (ptr, src); intrinsic);
983             let ptr = ptr.load_scalar(fx);
984
985             let layout = src.layout();
986             match layout.ty.kind() {
987                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
988                 _ => {
989                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
990                     return;
991                 }
992             }
993             let ty = fx.clif_type(layout.ty).unwrap();
994
995             let src = src.load_scalar(fx);
996
997             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
998
999             let old = CValue::by_val(old, layout);
1000             ret.write_cvalue(fx, old);
1001         }
1002         _ if intrinsic.as_str().starts_with("atomic_max") => {
1003             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1004             let ptr = ptr.load_scalar(fx);
1005
1006             let layout = src.layout();
1007             match layout.ty.kind() {
1008                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1009                 _ => {
1010                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1011                     return;
1012                 }
1013             }
1014             let ty = fx.clif_type(layout.ty).unwrap();
1015
1016             let src = src.load_scalar(fx);
1017
1018             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1019
1020             let old = CValue::by_val(old, layout);
1021             ret.write_cvalue(fx, old);
1022         }
1023         _ if intrinsic.as_str().starts_with("atomic_umax") => {
1024             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1025             let ptr = ptr.load_scalar(fx);
1026
1027             let layout = src.layout();
1028             match layout.ty.kind() {
1029                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1030                 _ => {
1031                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1032                     return;
1033                 }
1034             }
1035             let ty = fx.clif_type(layout.ty).unwrap();
1036
1037             let src = src.load_scalar(fx);
1038
1039             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1040
1041             let old = CValue::by_val(old, layout);
1042             ret.write_cvalue(fx, old);
1043         }
1044         _ if intrinsic.as_str().starts_with("atomic_min") => {
1045             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1046             let ptr = ptr.load_scalar(fx);
1047
1048             let layout = src.layout();
1049             match layout.ty.kind() {
1050                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1051                 _ => {
1052                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1053                     return;
1054                 }
1055             }
1056             let ty = fx.clif_type(layout.ty).unwrap();
1057
1058             let src = src.load_scalar(fx);
1059
1060             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1061
1062             let old = CValue::by_val(old, layout);
1063             ret.write_cvalue(fx, old);
1064         }
1065         _ if intrinsic.as_str().starts_with("atomic_umin") => {
1066             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1067             let ptr = ptr.load_scalar(fx);
1068
1069             let layout = src.layout();
1070             match layout.ty.kind() {
1071                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1072                 _ => {
1073                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1074                     return;
1075                 }
1076             }
1077             let ty = fx.clif_type(layout.ty).unwrap();
1078
1079             let src = src.load_scalar(fx);
1080
1081             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1082
1083             let old = CValue::by_val(old, layout);
1084             ret.write_cvalue(fx, old);
1085         }
1086
1087         sym::minnumf32 => {
1088             intrinsic_args!(fx, args => (a, b); intrinsic);
1089             let a = a.load_scalar(fx);
1090             let b = b.load_scalar(fx);
1091
1092             let val = crate::num::codegen_float_min(fx, a, b);
1093             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1094             ret.write_cvalue(fx, val);
1095         }
1096         sym::minnumf64 => {
1097             intrinsic_args!(fx, args => (a, b); intrinsic);
1098             let a = a.load_scalar(fx);
1099             let b = b.load_scalar(fx);
1100
1101             let val = crate::num::codegen_float_min(fx, a, b);
1102             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1103             ret.write_cvalue(fx, val);
1104         }
1105         sym::maxnumf32 => {
1106             intrinsic_args!(fx, args => (a, b); intrinsic);
1107             let a = a.load_scalar(fx);
1108             let b = b.load_scalar(fx);
1109
1110             let val = crate::num::codegen_float_max(fx, a, b);
1111             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1112             ret.write_cvalue(fx, val);
1113         }
1114         sym::maxnumf64 => {
1115             intrinsic_args!(fx, args => (a, b); intrinsic);
1116             let a = a.load_scalar(fx);
1117             let b = b.load_scalar(fx);
1118
1119             let val = crate::num::codegen_float_max(fx, a, b);
1120             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1121             ret.write_cvalue(fx, val);
1122         }
1123
1124         kw::Try => {
1125             intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1126             let f = f.load_scalar(fx);
1127             let data = data.load_scalar(fx);
1128             let _catch_fn = catch_fn.load_scalar(fx);
1129
1130             // FIXME once unwinding is supported, change this to actually catch panics
1131             let f_sig = fx.bcx.func.import_signature(Signature {
1132                 call_conv: fx.target_config.default_call_conv,
1133                 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
1134                 returns: vec![],
1135             });
1136
1137             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1138
1139             let layout = ret.layout();
1140             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1141             ret.write_cvalue(fx, ret_val);
1142         }
1143
1144         sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1145             intrinsic_args!(fx, args => (x, y); intrinsic);
1146
1147             let res = crate::num::codegen_float_binop(
1148                 fx,
1149                 match intrinsic {
1150                     sym::fadd_fast => BinOp::Add,
1151                     sym::fsub_fast => BinOp::Sub,
1152                     sym::fmul_fast => BinOp::Mul,
1153                     sym::fdiv_fast => BinOp::Div,
1154                     sym::frem_fast => BinOp::Rem,
1155                     _ => unreachable!(),
1156                 },
1157                 x,
1158                 y,
1159             );
1160             ret.write_cvalue(fx, res);
1161         }
1162         sym::float_to_int_unchecked => {
1163             intrinsic_args!(fx, args => (f); intrinsic);
1164             let f = f.load_scalar(fx);
1165
1166             let res = crate::cast::clif_int_or_float_cast(
1167                 fx,
1168                 f,
1169                 false,
1170                 fx.clif_type(ret.layout().ty).unwrap(),
1171                 type_sign(ret.layout().ty),
1172             );
1173             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1174         }
1175
1176         sym::raw_eq => {
1177             intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1178             let lhs_ref = lhs_ref.load_scalar(fx);
1179             let rhs_ref = rhs_ref.load_scalar(fx);
1180
1181             let size = fx.layout_of(substs.type_at(0)).layout.size();
1182             // FIXME add and use emit_small_memcmp
1183             let is_eq_value = if size == Size::ZERO {
1184                 // No bytes means they're trivially equal
1185                 fx.bcx.ins().iconst(types::I8, 1)
1186             } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1187                 // Can't use `trusted` for these loads; they could be unaligned.
1188                 let mut flags = MemFlags::new();
1189                 flags.set_notrap();
1190                 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1191                 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1192                 fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
1193             } else {
1194                 // Just call `memcmp` (like slices do in core) when the
1195                 // size is too large or it's not a power-of-two.
1196                 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1197                 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1198                 let params = vec![AbiParam::new(fx.pointer_type); 3];
1199                 let returns = vec![AbiParam::new(types::I32)];
1200                 let args = &[lhs_ref, rhs_ref, bytes_val];
1201                 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1202                 fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
1203             };
1204             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1205         }
1206
1207         sym::const_allocate => {
1208             intrinsic_args!(fx, args => (_size, _align); intrinsic);
1209
1210             // returns a null pointer at runtime.
1211             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1212             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1213         }
1214
1215         sym::const_deallocate => {
1216             intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1217             // nop at runtime.
1218         }
1219
1220         sym::black_box => {
1221             intrinsic_args!(fx, args => (a); intrinsic);
1222
1223             // FIXME implement black_box semantics
1224             ret.write_cvalue(fx, a);
1225         }
1226
1227         // FIXME implement variadics in cranelift
1228         sym::va_copy | sym::va_arg | sym::va_end => {
1229             fx.tcx.sess.span_fatal(
1230                 source_info.span,
1231                 "Defining variadic functions is not yet supported by Cranelift",
1232             );
1233         }
1234
1235         _ => {
1236             fx.tcx
1237                 .sess
1238                 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1239         }
1240     }
1241
1242     let ret_block = fx.get_block(destination.unwrap());
1243     fx.bcx.ins().jump(ret_block, &[]);
1244 }