]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
Merge commit '7d53619064ab7045c383644cb445052d2a3d46db' into sync_cg_clif-2023-02-09
[rust.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 macro_rules! intrinsic_args {
5     ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6         #[allow(unused_parens)]
7         let ($($arg),*) = if let [$($arg),*] = $args {
8             ($(codegen_operand($fx, $arg)),*)
9         } else {
10             $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11         };
12     }
13 }
14
15 mod cpuid;
16 mod llvm;
17 mod llvm_aarch64;
18 mod llvm_x86;
19 mod simd;
20
21 pub(crate) use cpuid::codegen_cpuid_call;
22 pub(crate) use llvm::codegen_llvm_intrinsic_call;
23
24 use rustc_middle::ty::layout::HasParamEnv;
25 use rustc_middle::ty::print::with_no_trimmed_paths;
26 use rustc_middle::ty::subst::SubstsRef;
27 use rustc_span::symbol::{kw, sym, Symbol};
28
29 use crate::prelude::*;
30 use cranelift_codegen::ir::AtomicRmwOp;
31
32 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
33     bug!("wrong number of args for intrinsic {}", intrinsic);
34 }
35
36 fn report_atomic_type_validation_error<'tcx>(
37     fx: &mut FunctionCx<'_, '_, 'tcx>,
38     intrinsic: Symbol,
39     span: Span,
40     ty: Ty<'tcx>,
41 ) {
42     fx.tcx.sess.span_err(
43         span,
44         &format!(
45             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
46             intrinsic, ty
47         ),
48     );
49     // Prevent verifier error
50     fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
51 }
52
53 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
54     let (element, count) = match layout.abi {
55         Abi::Vector { element, count } => (element, count),
56         _ => unreachable!(),
57     };
58
59     match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
60         // Cranelift currently only implements icmp for 128bit vectors.
61         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
62         _ => None,
63     }
64 }
65
66 fn simd_for_each_lane<'tcx>(
67     fx: &mut FunctionCx<'_, '_, 'tcx>,
68     val: CValue<'tcx>,
69     ret: CPlace<'tcx>,
70     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
71 ) {
72     let layout = val.layout();
73
74     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
75     let lane_layout = fx.layout_of(lane_ty);
76     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
77     let ret_lane_layout = fx.layout_of(ret_lane_ty);
78     assert_eq!(lane_count, ret_lane_count);
79
80     for lane_idx in 0..lane_count {
81         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
82
83         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
84         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
85
86         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
87     }
88 }
89
90 fn simd_pair_for_each_lane_typed<'tcx>(
91     fx: &mut FunctionCx<'_, '_, 'tcx>,
92     x: CValue<'tcx>,
93     y: CValue<'tcx>,
94     ret: CPlace<'tcx>,
95     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
96 ) {
97     assert_eq!(x.layout(), y.layout());
98     let layout = x.layout();
99
100     let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
101     let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
102     assert_eq!(lane_count, ret_lane_count);
103
104     for lane_idx in 0..lane_count {
105         let x_lane = x.value_lane(fx, lane_idx);
106         let y_lane = y.value_lane(fx, lane_idx);
107
108         let res_lane = f(fx, x_lane, y_lane);
109
110         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
111     }
112 }
113
114 fn simd_pair_for_each_lane<'tcx>(
115     fx: &mut FunctionCx<'_, '_, 'tcx>,
116     x: CValue<'tcx>,
117     y: CValue<'tcx>,
118     ret: CPlace<'tcx>,
119     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
120 ) {
121     assert_eq!(x.layout(), y.layout());
122     let layout = x.layout();
123
124     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
125     let lane_layout = fx.layout_of(lane_ty);
126     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
127     let ret_lane_layout = fx.layout_of(ret_lane_ty);
128     assert_eq!(lane_count, ret_lane_count);
129
130     for lane_idx in 0..lane_count {
131         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
132         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
133
134         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
135         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
136
137         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
138     }
139 }
140
141 fn simd_reduce<'tcx>(
142     fx: &mut FunctionCx<'_, '_, 'tcx>,
143     val: CValue<'tcx>,
144     acc: Option<Value>,
145     ret: CPlace<'tcx>,
146     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
147 ) {
148     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
149     let lane_layout = fx.layout_of(lane_ty);
150     assert_eq!(lane_layout, ret.layout());
151
152     let (mut res_val, start_lane) =
153         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
154     for lane_idx in start_lane..lane_count {
155         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
156         res_val = f(fx, lane_layout.ty, res_val, lane);
157     }
158     let res = CValue::by_val(res_val, lane_layout);
159     ret.write_cvalue(fx, res);
160 }
161
162 // FIXME move all uses to `simd_reduce`
163 fn simd_reduce_bool<'tcx>(
164     fx: &mut FunctionCx<'_, '_, 'tcx>,
165     val: CValue<'tcx>,
166     ret: CPlace<'tcx>,
167     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
168 ) {
169     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
170     assert!(ret.layout().ty.is_bool());
171
172     let res_val = val.value_lane(fx, 0).load_scalar(fx);
173     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
174     for lane_idx in 1..lane_count {
175         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
176         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
177         res_val = f(fx, res_val, lane);
178     }
179     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
180         fx.bcx.ins().ireduce(types::I8, res_val)
181     } else {
182         res_val
183     };
184     let res = CValue::by_val(res_val, ret.layout());
185     ret.write_cvalue(fx, res);
186 }
187
188 fn bool_to_zero_or_max_uint<'tcx>(
189     fx: &mut FunctionCx<'_, '_, 'tcx>,
190     ty: Ty<'tcx>,
191     val: Value,
192 ) -> Value {
193     let ty = fx.clif_type(ty).unwrap();
194
195     let int_ty = match ty {
196         types::F32 => types::I32,
197         types::F64 => types::I64,
198         ty => ty,
199     };
200
201     let mut res = fx.bcx.ins().bmask(int_ty, val);
202
203     if ty.is_float() {
204         res = codegen_bitcast(fx, ty, res);
205     }
206
207     res
208 }
209
210 pub(crate) fn codegen_intrinsic_call<'tcx>(
211     fx: &mut FunctionCx<'_, '_, 'tcx>,
212     instance: Instance<'tcx>,
213     args: &[mir::Operand<'tcx>],
214     destination: CPlace<'tcx>,
215     target: Option<BasicBlock>,
216     source_info: mir::SourceInfo,
217 ) {
218     let intrinsic = fx.tcx.item_name(instance.def_id());
219     let substs = instance.substs;
220
221     if intrinsic.as_str().starts_with("simd_") {
222         self::simd::codegen_simd_intrinsic_call(
223             fx,
224             intrinsic,
225             substs,
226             args,
227             destination,
228             target.expect("target for simd intrinsic"),
229             source_info.span,
230         );
231     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
232         let ret_block = fx.get_block(target.expect("target for float intrinsic"));
233         fx.bcx.ins().jump(ret_block, &[]);
234     } else {
235         codegen_regular_intrinsic_call(
236             fx,
237             instance,
238             intrinsic,
239             substs,
240             args,
241             destination,
242             target,
243             source_info,
244         );
245     }
246 }
247
248 fn codegen_float_intrinsic_call<'tcx>(
249     fx: &mut FunctionCx<'_, '_, 'tcx>,
250     intrinsic: Symbol,
251     args: &[mir::Operand<'tcx>],
252     ret: CPlace<'tcx>,
253 ) -> bool {
254     let (name, arg_count, ty) = match intrinsic {
255         sym::expf32 => ("expf", 1, fx.tcx.types.f32),
256         sym::expf64 => ("exp", 1, fx.tcx.types.f64),
257         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
258         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
259         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
260         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
261         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
262         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
263         sym::powf32 => ("powf", 2, fx.tcx.types.f32),
264         sym::powf64 => ("pow", 2, fx.tcx.types.f64),
265         sym::logf32 => ("logf", 1, fx.tcx.types.f32),
266         sym::logf64 => ("log", 1, fx.tcx.types.f64),
267         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
268         sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
269         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
270         sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
271         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
272         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
273         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
274         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
275         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
276         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
277         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
278         sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
279         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
280         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
281         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
282         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
283         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
284         sym::roundf64 => ("round", 1, fx.tcx.types.f64),
285         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
286         sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
287         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
288         sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
289         _ => return false,
290     };
291
292     if args.len() != arg_count {
293         bug!("wrong number of args for intrinsic {:?}", intrinsic);
294     }
295
296     let (a, b, c);
297     let args = match args {
298         [x] => {
299             a = [codegen_operand(fx, x)];
300             &a as &[_]
301         }
302         [x, y] => {
303             b = [codegen_operand(fx, x), codegen_operand(fx, y)];
304             &b
305         }
306         [x, y, z] => {
307             c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
308             &c
309         }
310         _ => unreachable!(),
311     };
312
313     let layout = fx.layout_of(ty);
314     let res = match intrinsic {
315         sym::fmaf32 | sym::fmaf64 => {
316             let a = args[0].load_scalar(fx);
317             let b = args[1].load_scalar(fx);
318             let c = args[2].load_scalar(fx);
319             CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
320         }
321         sym::copysignf32 | sym::copysignf64 => {
322             let a = args[0].load_scalar(fx);
323             let b = args[1].load_scalar(fx);
324             CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
325         }
326         sym::fabsf32
327         | sym::fabsf64
328         | sym::floorf32
329         | sym::floorf64
330         | sym::ceilf32
331         | sym::ceilf64
332         | sym::truncf32
333         | sym::truncf64 => {
334             let a = args[0].load_scalar(fx);
335
336             let val = match intrinsic {
337                 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
338                 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
339                 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
340                 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
341                 _ => unreachable!(),
342             };
343
344             CValue::by_val(val, layout)
345         }
346         // These intrinsics aren't supported natively by Cranelift.
347         // Lower them to a libcall.
348         _ => fx.easy_call(name, &args, ty),
349     };
350
351     ret.write_cvalue(fx, res);
352
353     true
354 }
355
356 fn codegen_regular_intrinsic_call<'tcx>(
357     fx: &mut FunctionCx<'_, '_, 'tcx>,
358     instance: Instance<'tcx>,
359     intrinsic: Symbol,
360     substs: SubstsRef<'tcx>,
361     args: &[mir::Operand<'tcx>],
362     ret: CPlace<'tcx>,
363     destination: Option<BasicBlock>,
364     source_info: mir::SourceInfo,
365 ) {
366     let usize_layout = fx.layout_of(fx.tcx.types.usize);
367
368     match intrinsic {
369         sym::abort => {
370             fx.bcx.ins().trap(TrapCode::User(0));
371             return;
372         }
373         sym::likely | sym::unlikely => {
374             intrinsic_args!(fx, args => (a); intrinsic);
375
376             ret.write_cvalue(fx, a);
377         }
378         sym::breakpoint => {
379             intrinsic_args!(fx, args => (); intrinsic);
380
381             fx.bcx.ins().debugtrap();
382         }
383         sym::copy | sym::copy_nonoverlapping => {
384             intrinsic_args!(fx, args => (src, dst, count); intrinsic);
385             let src = src.load_scalar(fx);
386             let dst = dst.load_scalar(fx);
387             let count = count.load_scalar(fx);
388
389             let elem_ty = substs.type_at(0);
390             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
391             assert_eq!(args.len(), 3);
392             let byte_amount =
393                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
394
395             if intrinsic == sym::copy_nonoverlapping {
396                 // FIXME emit_small_memcpy
397                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
398             } else {
399                 // FIXME emit_small_memmove
400                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
401             }
402         }
403         sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
404             // NOTE: the volatile variants have src and dst swapped
405             intrinsic_args!(fx, args => (dst, src, count); intrinsic);
406             let dst = dst.load_scalar(fx);
407             let src = src.load_scalar(fx);
408             let count = count.load_scalar(fx);
409
410             let elem_ty = substs.type_at(0);
411             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
412             assert_eq!(args.len(), 3);
413             let byte_amount =
414                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
415
416             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
417             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
418                 // FIXME emit_small_memcpy
419                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
420             } else {
421                 // FIXME emit_small_memmove
422                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
423             }
424         }
425         sym::size_of_val => {
426             intrinsic_args!(fx, args => (ptr); intrinsic);
427
428             let layout = fx.layout_of(substs.type_at(0));
429             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
430             // branch
431             let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
432                 let (_ptr, info) = ptr.load_scalar_pair(fx);
433                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
434                 size
435             } else {
436                 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
437             };
438             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
439         }
440         sym::min_align_of_val => {
441             intrinsic_args!(fx, args => (ptr); intrinsic);
442
443             let layout = fx.layout_of(substs.type_at(0));
444             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
445             // branch
446             let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
447                 let (_ptr, info) = ptr.load_scalar_pair(fx);
448                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
449                 align
450             } else {
451                 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
452             };
453             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
454         }
455
456         sym::vtable_size => {
457             intrinsic_args!(fx, args => (vtable); intrinsic);
458             let vtable = vtable.load_scalar(fx);
459
460             let size = crate::vtable::size_of_obj(fx, vtable);
461             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
462         }
463
464         sym::vtable_align => {
465             intrinsic_args!(fx, args => (vtable); intrinsic);
466             let vtable = vtable.load_scalar(fx);
467
468             let align = crate::vtable::min_align_of_obj(fx, vtable);
469             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
470         }
471
472         sym::unchecked_add
473         | sym::unchecked_sub
474         | sym::unchecked_mul
475         | sym::unchecked_div
476         | sym::exact_div
477         | sym::unchecked_rem
478         | sym::unchecked_shl
479         | sym::unchecked_shr => {
480             intrinsic_args!(fx, args => (x, y); intrinsic);
481
482             // FIXME trap on overflow
483             let bin_op = match intrinsic {
484                 sym::unchecked_add => BinOp::Add,
485                 sym::unchecked_sub => BinOp::Sub,
486                 sym::unchecked_mul => BinOp::Mul,
487                 sym::unchecked_div | sym::exact_div => BinOp::Div,
488                 sym::unchecked_rem => BinOp::Rem,
489                 sym::unchecked_shl => BinOp::Shl,
490                 sym::unchecked_shr => BinOp::Shr,
491                 _ => unreachable!(),
492             };
493             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
494             ret.write_cvalue(fx, res);
495         }
496         sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
497             intrinsic_args!(fx, args => (x, y); intrinsic);
498
499             assert_eq!(x.layout().ty, y.layout().ty);
500             let bin_op = match intrinsic {
501                 sym::add_with_overflow => BinOp::Add,
502                 sym::sub_with_overflow => BinOp::Sub,
503                 sym::mul_with_overflow => BinOp::Mul,
504                 _ => unreachable!(),
505             };
506
507             let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
508             ret.write_cvalue(fx, res);
509         }
510         sym::saturating_add | sym::saturating_sub => {
511             intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
512
513             assert_eq!(lhs.layout().ty, rhs.layout().ty);
514             let bin_op = match intrinsic {
515                 sym::saturating_add => BinOp::Add,
516                 sym::saturating_sub => BinOp::Sub,
517                 _ => unreachable!(),
518             };
519
520             let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
521             ret.write_cvalue(fx, res);
522         }
523         sym::rotate_left => {
524             intrinsic_args!(fx, args => (x, y); intrinsic);
525             let y = y.load_scalar(fx);
526
527             let layout = x.layout();
528             let x = x.load_scalar(fx);
529             let res = fx.bcx.ins().rotl(x, y);
530             ret.write_cvalue(fx, CValue::by_val(res, layout));
531         }
532         sym::rotate_right => {
533             intrinsic_args!(fx, args => (x, y); intrinsic);
534             let y = y.load_scalar(fx);
535
536             let layout = x.layout();
537             let x = x.load_scalar(fx);
538             let res = fx.bcx.ins().rotr(x, y);
539             ret.write_cvalue(fx, CValue::by_val(res, layout));
540         }
541
542         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
543         // doesn't have UB both are codegen'ed the same way
544         sym::offset | sym::arith_offset => {
545             intrinsic_args!(fx, args => (base, offset); intrinsic);
546             let offset = offset.load_scalar(fx);
547
548             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
549             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
550             let ptr_diff = if pointee_size != 1 {
551                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
552             } else {
553                 offset
554             };
555             let base_val = base.load_scalar(fx);
556             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
557             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
558         }
559
560         sym::ptr_mask => {
561             intrinsic_args!(fx, args => (ptr, mask); intrinsic);
562             let ptr = ptr.load_scalar(fx);
563             let mask = mask.load_scalar(fx);
564             fx.bcx.ins().band(ptr, mask);
565         }
566
567         sym::transmute => {
568             intrinsic_args!(fx, args => (from); intrinsic);
569
570             if ret.layout().abi.is_uninhabited() {
571                 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
572                 return;
573             }
574
575             ret.write_cvalue_transmute(fx, from);
576         }
577         sym::write_bytes | sym::volatile_set_memory => {
578             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
579             let val = val.load_scalar(fx);
580             let count = count.load_scalar(fx);
581
582             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
583             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
584             let count = if pointee_size != 1 {
585                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
586             } else {
587                 count
588             };
589             let dst_ptr = dst.load_scalar(fx);
590             // FIXME make the memset actually volatile when switching to emit_small_memset
591             // FIXME use emit_small_memset
592             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
593         }
594         sym::ctlz | sym::ctlz_nonzero => {
595             intrinsic_args!(fx, args => (arg); intrinsic);
596             let val = arg.load_scalar(fx);
597
598             // FIXME trap on `ctlz_nonzero` with zero arg.
599             let res = fx.bcx.ins().clz(val);
600             let res = CValue::by_val(res, arg.layout());
601             ret.write_cvalue(fx, res);
602         }
603         sym::cttz | sym::cttz_nonzero => {
604             intrinsic_args!(fx, args => (arg); intrinsic);
605             let val = arg.load_scalar(fx);
606
607             // FIXME trap on `cttz_nonzero` with zero arg.
608             let res = fx.bcx.ins().ctz(val);
609             let res = CValue::by_val(res, arg.layout());
610             ret.write_cvalue(fx, res);
611         }
612         sym::ctpop => {
613             intrinsic_args!(fx, args => (arg); intrinsic);
614             let val = arg.load_scalar(fx);
615
616             let res = fx.bcx.ins().popcnt(val);
617             let res = CValue::by_val(res, arg.layout());
618             ret.write_cvalue(fx, res);
619         }
620         sym::bitreverse => {
621             intrinsic_args!(fx, args => (arg); intrinsic);
622             let val = arg.load_scalar(fx);
623
624             let res = fx.bcx.ins().bitrev(val);
625             let res = CValue::by_val(res, arg.layout());
626             ret.write_cvalue(fx, res);
627         }
628         sym::bswap => {
629             intrinsic_args!(fx, args => (arg); intrinsic);
630             let val = arg.load_scalar(fx);
631
632             let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
633                 val
634             } else {
635                 fx.bcx.ins().bswap(val)
636             };
637             let res = CValue::by_val(res, arg.layout());
638             ret.write_cvalue(fx, res);
639         }
640         sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
641             intrinsic_args!(fx, args => (); intrinsic);
642
643             let layout = fx.layout_of(substs.type_at(0));
644             if layout.abi.is_uninhabited() {
645                 with_no_trimmed_paths!({
646                     crate::base::codegen_panic_nounwind(
647                         fx,
648                         &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
649                         source_info,
650                     )
651                 });
652                 return;
653             }
654
655             if intrinsic == sym::assert_zero_valid
656                 && !fx.tcx.permits_zero_init(fx.param_env().and(layout))
657             {
658                 with_no_trimmed_paths!({
659                     crate::base::codegen_panic_nounwind(
660                         fx,
661                         &format!(
662                             "attempted to zero-initialize type `{}`, which is invalid",
663                             layout.ty
664                         ),
665                         source_info,
666                     );
667                 });
668                 return;
669             }
670
671             if intrinsic == sym::assert_mem_uninitialized_valid
672                 && !fx.tcx.permits_uninit_init(fx.param_env().and(layout))
673             {
674                 with_no_trimmed_paths!({
675                     crate::base::codegen_panic_nounwind(
676                         fx,
677                         &format!(
678                             "attempted to leave type `{}` uninitialized, which is invalid",
679                             layout.ty
680                         ),
681                         source_info,
682                     )
683                 });
684                 return;
685             }
686         }
687
688         sym::volatile_load | sym::unaligned_volatile_load => {
689             intrinsic_args!(fx, args => (ptr); intrinsic);
690
691             // Cranelift treats loads as volatile by default
692             // FIXME correctly handle unaligned_volatile_load
693             let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
694             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
695             ret.write_cvalue(fx, val);
696         }
697         sym::volatile_store | sym::unaligned_volatile_store => {
698             intrinsic_args!(fx, args => (ptr, val); intrinsic);
699             let ptr = ptr.load_scalar(fx);
700
701             // Cranelift treats stores as volatile by default
702             // FIXME correctly handle unaligned_volatile_store
703             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
704             dest.write_cvalue(fx, val);
705         }
706
707         sym::pref_align_of
708         | sym::needs_drop
709         | sym::type_id
710         | sym::type_name
711         | sym::variant_count => {
712             intrinsic_args!(fx, args => (); intrinsic);
713
714             let const_val =
715                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
716             let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
717             ret.write_cvalue(fx, val);
718         }
719
720         sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
721             intrinsic_args!(fx, args => (ptr, base); intrinsic);
722             let ptr = ptr.load_scalar(fx);
723             let base = base.load_scalar(fx);
724             let ty = substs.type_at(0);
725
726             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
727             let diff_bytes = fx.bcx.ins().isub(ptr, base);
728             // FIXME this can be an exact division.
729             let val = if intrinsic == sym::ptr_offset_from_unsigned {
730                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
731                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
732                 // but unsigned is slightly easier to codegen, so might as well.
733                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
734             } else {
735                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
736                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
737             };
738             ret.write_cvalue(fx, val);
739         }
740
741         sym::ptr_guaranteed_cmp => {
742             intrinsic_args!(fx, args => (a, b); intrinsic);
743
744             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
745             ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
746         }
747
748         sym::caller_location => {
749             intrinsic_args!(fx, args => (); intrinsic);
750
751             let caller_location = fx.get_caller_location(source_info);
752             ret.write_cvalue(fx, caller_location);
753         }
754
755         _ if intrinsic.as_str().starts_with("atomic_fence") => {
756             intrinsic_args!(fx, args => (); intrinsic);
757
758             fx.bcx.ins().fence();
759         }
760         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
761             intrinsic_args!(fx, args => (); intrinsic);
762
763             // FIXME use a compiler fence once Cranelift supports it
764             fx.bcx.ins().fence();
765         }
766         _ if intrinsic.as_str().starts_with("atomic_load") => {
767             intrinsic_args!(fx, args => (ptr); intrinsic);
768             let ptr = ptr.load_scalar(fx);
769
770             let ty = substs.type_at(0);
771             match ty.kind() {
772                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
773                     // FIXME implement 128bit atomics
774                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
775                         // special case for compiler-builtins to avoid having to patch it
776                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
777                         return;
778                     } else {
779                         fx.tcx
780                             .sess
781                             .span_fatal(source_info.span, "128bit atomics not yet supported");
782                     }
783                 }
784                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
785                 _ => {
786                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
787                     return;
788                 }
789             }
790             let clif_ty = fx.clif_type(ty).unwrap();
791
792             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
793
794             let val = CValue::by_val(val, fx.layout_of(ty));
795             ret.write_cvalue(fx, val);
796         }
797         _ if intrinsic.as_str().starts_with("atomic_store") => {
798             intrinsic_args!(fx, args => (ptr, val); intrinsic);
799             let ptr = ptr.load_scalar(fx);
800
801             let ty = substs.type_at(0);
802             match ty.kind() {
803                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
804                     // FIXME implement 128bit atomics
805                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
806                         // special case for compiler-builtins to avoid having to patch it
807                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
808                         return;
809                     } else {
810                         fx.tcx
811                             .sess
812                             .span_fatal(source_info.span, "128bit atomics not yet supported");
813                     }
814                 }
815                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
816                 _ => {
817                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
818                     return;
819                 }
820             }
821
822             let val = val.load_scalar(fx);
823
824             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
825         }
826         _ if intrinsic.as_str().starts_with("atomic_xchg") => {
827             intrinsic_args!(fx, args => (ptr, new); intrinsic);
828             let ptr = ptr.load_scalar(fx);
829
830             let layout = new.layout();
831             match layout.ty.kind() {
832                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
833                 _ => {
834                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
835                     return;
836                 }
837             }
838             let ty = fx.clif_type(layout.ty).unwrap();
839
840             let new = new.load_scalar(fx);
841
842             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
843
844             let old = CValue::by_val(old, layout);
845             ret.write_cvalue(fx, old);
846         }
847         _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
848             // both atomic_cxchg_* and atomic_cxchgweak_*
849             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
850             let ptr = ptr.load_scalar(fx);
851
852             let layout = new.layout();
853             match layout.ty.kind() {
854                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
855                 _ => {
856                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
857                     return;
858                 }
859             }
860
861             let test_old = test_old.load_scalar(fx);
862             let new = new.load_scalar(fx);
863
864             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
865             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
866
867             let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
868             ret.write_cvalue(fx, ret_val)
869         }
870
871         _ if intrinsic.as_str().starts_with("atomic_xadd") => {
872             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
873             let ptr = ptr.load_scalar(fx);
874
875             let layout = amount.layout();
876             match layout.ty.kind() {
877                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
878                 _ => {
879                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
880                     return;
881                 }
882             }
883             let ty = fx.clif_type(layout.ty).unwrap();
884
885             let amount = amount.load_scalar(fx);
886
887             let old =
888                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
889
890             let old = CValue::by_val(old, layout);
891             ret.write_cvalue(fx, old);
892         }
893         _ if intrinsic.as_str().starts_with("atomic_xsub") => {
894             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
895             let ptr = ptr.load_scalar(fx);
896
897             let layout = amount.layout();
898             match layout.ty.kind() {
899                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
900                 _ => {
901                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
902                     return;
903                 }
904             }
905             let ty = fx.clif_type(layout.ty).unwrap();
906
907             let amount = amount.load_scalar(fx);
908
909             let old =
910                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
911
912             let old = CValue::by_val(old, layout);
913             ret.write_cvalue(fx, old);
914         }
915         _ if intrinsic.as_str().starts_with("atomic_and") => {
916             intrinsic_args!(fx, args => (ptr, src); intrinsic);
917             let ptr = ptr.load_scalar(fx);
918
919             let layout = src.layout();
920             match layout.ty.kind() {
921                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
922                 _ => {
923                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
924                     return;
925                 }
926             }
927             let ty = fx.clif_type(layout.ty).unwrap();
928
929             let src = src.load_scalar(fx);
930
931             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
932
933             let old = CValue::by_val(old, layout);
934             ret.write_cvalue(fx, old);
935         }
936         _ if intrinsic.as_str().starts_with("atomic_or") => {
937             intrinsic_args!(fx, args => (ptr, src); intrinsic);
938             let ptr = ptr.load_scalar(fx);
939
940             let layout = src.layout();
941             match layout.ty.kind() {
942                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
943                 _ => {
944                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
945                     return;
946                 }
947             }
948             let ty = fx.clif_type(layout.ty).unwrap();
949
950             let src = src.load_scalar(fx);
951
952             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
953
954             let old = CValue::by_val(old, layout);
955             ret.write_cvalue(fx, old);
956         }
957         _ if intrinsic.as_str().starts_with("atomic_xor") => {
958             intrinsic_args!(fx, args => (ptr, src); intrinsic);
959             let ptr = ptr.load_scalar(fx);
960
961             let layout = src.layout();
962             match layout.ty.kind() {
963                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
964                 _ => {
965                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
966                     return;
967                 }
968             }
969             let ty = fx.clif_type(layout.ty).unwrap();
970
971             let src = src.load_scalar(fx);
972
973             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
974
975             let old = CValue::by_val(old, layout);
976             ret.write_cvalue(fx, old);
977         }
978         _ if intrinsic.as_str().starts_with("atomic_nand") => {
979             intrinsic_args!(fx, args => (ptr, src); intrinsic);
980             let ptr = ptr.load_scalar(fx);
981
982             let layout = src.layout();
983             match layout.ty.kind() {
984                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
985                 _ => {
986                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
987                     return;
988                 }
989             }
990             let ty = fx.clif_type(layout.ty).unwrap();
991
992             let src = src.load_scalar(fx);
993
994             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
995
996             let old = CValue::by_val(old, layout);
997             ret.write_cvalue(fx, old);
998         }
999         _ if intrinsic.as_str().starts_with("atomic_max") => {
1000             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1001             let ptr = ptr.load_scalar(fx);
1002
1003             let layout = src.layout();
1004             match layout.ty.kind() {
1005                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1006                 _ => {
1007                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1008                     return;
1009                 }
1010             }
1011             let ty = fx.clif_type(layout.ty).unwrap();
1012
1013             let src = src.load_scalar(fx);
1014
1015             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1016
1017             let old = CValue::by_val(old, layout);
1018             ret.write_cvalue(fx, old);
1019         }
1020         _ if intrinsic.as_str().starts_with("atomic_umax") => {
1021             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1022             let ptr = ptr.load_scalar(fx);
1023
1024             let layout = src.layout();
1025             match layout.ty.kind() {
1026                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1027                 _ => {
1028                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1029                     return;
1030                 }
1031             }
1032             let ty = fx.clif_type(layout.ty).unwrap();
1033
1034             let src = src.load_scalar(fx);
1035
1036             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1037
1038             let old = CValue::by_val(old, layout);
1039             ret.write_cvalue(fx, old);
1040         }
1041         _ if intrinsic.as_str().starts_with("atomic_min") => {
1042             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1043             let ptr = ptr.load_scalar(fx);
1044
1045             let layout = src.layout();
1046             match layout.ty.kind() {
1047                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1048                 _ => {
1049                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1050                     return;
1051                 }
1052             }
1053             let ty = fx.clif_type(layout.ty).unwrap();
1054
1055             let src = src.load_scalar(fx);
1056
1057             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1058
1059             let old = CValue::by_val(old, layout);
1060             ret.write_cvalue(fx, old);
1061         }
1062         _ if intrinsic.as_str().starts_with("atomic_umin") => {
1063             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1064             let ptr = ptr.load_scalar(fx);
1065
1066             let layout = src.layout();
1067             match layout.ty.kind() {
1068                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1069                 _ => {
1070                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1071                     return;
1072                 }
1073             }
1074             let ty = fx.clif_type(layout.ty).unwrap();
1075
1076             let src = src.load_scalar(fx);
1077
1078             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1079
1080             let old = CValue::by_val(old, layout);
1081             ret.write_cvalue(fx, old);
1082         }
1083
1084         sym::minnumf32 => {
1085             intrinsic_args!(fx, args => (a, b); intrinsic);
1086             let a = a.load_scalar(fx);
1087             let b = b.load_scalar(fx);
1088
1089             let val = crate::num::codegen_float_min(fx, a, b);
1090             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1091             ret.write_cvalue(fx, val);
1092         }
1093         sym::minnumf64 => {
1094             intrinsic_args!(fx, args => (a, b); intrinsic);
1095             let a = a.load_scalar(fx);
1096             let b = b.load_scalar(fx);
1097
1098             let val = crate::num::codegen_float_min(fx, a, b);
1099             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1100             ret.write_cvalue(fx, val);
1101         }
1102         sym::maxnumf32 => {
1103             intrinsic_args!(fx, args => (a, b); intrinsic);
1104             let a = a.load_scalar(fx);
1105             let b = b.load_scalar(fx);
1106
1107             let val = crate::num::codegen_float_max(fx, a, b);
1108             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1109             ret.write_cvalue(fx, val);
1110         }
1111         sym::maxnumf64 => {
1112             intrinsic_args!(fx, args => (a, b); intrinsic);
1113             let a = a.load_scalar(fx);
1114             let b = b.load_scalar(fx);
1115
1116             let val = crate::num::codegen_float_max(fx, a, b);
1117             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1118             ret.write_cvalue(fx, val);
1119         }
1120
1121         kw::Try => {
1122             intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1123             let f = f.load_scalar(fx);
1124             let data = data.load_scalar(fx);
1125             let _catch_fn = catch_fn.load_scalar(fx);
1126
1127             // FIXME once unwinding is supported, change this to actually catch panics
1128             let f_sig = fx.bcx.func.import_signature(Signature {
1129                 call_conv: fx.target_config.default_call_conv,
1130                 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
1131                 returns: vec![],
1132             });
1133
1134             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1135
1136             let layout = ret.layout();
1137             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1138             ret.write_cvalue(fx, ret_val);
1139         }
1140
1141         sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1142             intrinsic_args!(fx, args => (x, y); intrinsic);
1143
1144             let res = crate::num::codegen_float_binop(
1145                 fx,
1146                 match intrinsic {
1147                     sym::fadd_fast => BinOp::Add,
1148                     sym::fsub_fast => BinOp::Sub,
1149                     sym::fmul_fast => BinOp::Mul,
1150                     sym::fdiv_fast => BinOp::Div,
1151                     sym::frem_fast => BinOp::Rem,
1152                     _ => unreachable!(),
1153                 },
1154                 x,
1155                 y,
1156             );
1157             ret.write_cvalue(fx, res);
1158         }
1159         sym::float_to_int_unchecked => {
1160             intrinsic_args!(fx, args => (f); intrinsic);
1161             let f = f.load_scalar(fx);
1162
1163             let res = crate::cast::clif_int_or_float_cast(
1164                 fx,
1165                 f,
1166                 false,
1167                 fx.clif_type(ret.layout().ty).unwrap(),
1168                 type_sign(ret.layout().ty),
1169             );
1170             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1171         }
1172
1173         sym::raw_eq => {
1174             intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1175             let lhs_ref = lhs_ref.load_scalar(fx);
1176             let rhs_ref = rhs_ref.load_scalar(fx);
1177
1178             let size = fx.layout_of(substs.type_at(0)).layout.size();
1179             // FIXME add and use emit_small_memcmp
1180             let is_eq_value = if size == Size::ZERO {
1181                 // No bytes means they're trivially equal
1182                 fx.bcx.ins().iconst(types::I8, 1)
1183             } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1184                 // Can't use `trusted` for these loads; they could be unaligned.
1185                 let mut flags = MemFlags::new();
1186                 flags.set_notrap();
1187                 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1188                 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1189                 fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
1190             } else {
1191                 // Just call `memcmp` (like slices do in core) when the
1192                 // size is too large or it's not a power-of-two.
1193                 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1194                 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1195                 let params = vec![AbiParam::new(fx.pointer_type); 3];
1196                 let returns = vec![AbiParam::new(types::I32)];
1197                 let args = &[lhs_ref, rhs_ref, bytes_val];
1198                 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1199                 fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
1200             };
1201             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1202         }
1203
1204         sym::const_allocate => {
1205             intrinsic_args!(fx, args => (_size, _align); intrinsic);
1206
1207             // returns a null pointer at runtime.
1208             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1209             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1210         }
1211
1212         sym::const_deallocate => {
1213             intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1214             // nop at runtime.
1215         }
1216
1217         sym::black_box => {
1218             intrinsic_args!(fx, args => (a); intrinsic);
1219
1220             // FIXME implement black_box semantics
1221             ret.write_cvalue(fx, a);
1222         }
1223
1224         // FIXME implement variadics in cranelift
1225         sym::va_copy | sym::va_arg | sym::va_end => {
1226             fx.tcx.sess.span_fatal(
1227                 source_info.span,
1228                 "Defining variadic functions is not yet supported by Cranelift",
1229             );
1230         }
1231
1232         _ => {
1233             fx.tcx
1234                 .sess
1235                 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1236         }
1237     }
1238
1239     let ret_block = fx.get_block(destination.unwrap());
1240     fx.bcx.ins().jump(ret_block, &[]);
1241 }