]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
Auto merge of #106745 - m-ou-se:format-args-ast, r=oli-obk
[rust.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 macro_rules! intrinsic_args {
5     ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6         #[allow(unused_parens)]
7         let ($($arg),*) = if let [$($arg),*] = $args {
8             ($(codegen_operand($fx, $arg)),*)
9         } else {
10             $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11         };
12     }
13 }
14
15 mod cpuid;
16 mod llvm;
17 mod llvm_aarch64;
18 mod llvm_x86;
19 mod simd;
20
21 pub(crate) use cpuid::codegen_cpuid_call;
22 pub(crate) use llvm::codegen_llvm_intrinsic_call;
23
24 use rustc_middle::ty::layout::HasParamEnv;
25 use rustc_middle::ty::print::with_no_trimmed_paths;
26 use rustc_middle::ty::subst::SubstsRef;
27 use rustc_span::symbol::{kw, sym, Symbol};
28
29 use crate::prelude::*;
30 use cranelift_codegen::ir::AtomicRmwOp;
31
32 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
33     bug!("wrong number of args for intrinsic {}", intrinsic);
34 }
35
36 fn report_atomic_type_validation_error<'tcx>(
37     fx: &mut FunctionCx<'_, '_, 'tcx>,
38     intrinsic: Symbol,
39     span: Span,
40     ty: Ty<'tcx>,
41 ) {
42     fx.tcx.sess.span_err(
43         span,
44         &format!(
45             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
46             intrinsic, ty
47         ),
48     );
49     // Prevent verifier error
50     fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
51 }
52
53 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
54     let (element, count) = match layout.abi {
55         Abi::Vector { element, count } => (element, count),
56         _ => unreachable!(),
57     };
58
59     match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
60         // Cranelift currently only implements icmp for 128bit vectors.
61         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
62         _ => None,
63     }
64 }
65
66 fn simd_for_each_lane<'tcx>(
67     fx: &mut FunctionCx<'_, '_, 'tcx>,
68     val: CValue<'tcx>,
69     ret: CPlace<'tcx>,
70     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
71 ) {
72     let layout = val.layout();
73
74     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
75     let lane_layout = fx.layout_of(lane_ty);
76     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
77     let ret_lane_layout = fx.layout_of(ret_lane_ty);
78     assert_eq!(lane_count, ret_lane_count);
79
80     for lane_idx in 0..lane_count {
81         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
82
83         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
84         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
85
86         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
87     }
88 }
89
90 fn simd_pair_for_each_lane_typed<'tcx>(
91     fx: &mut FunctionCx<'_, '_, 'tcx>,
92     x: CValue<'tcx>,
93     y: CValue<'tcx>,
94     ret: CPlace<'tcx>,
95     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
96 ) {
97     assert_eq!(x.layout(), y.layout());
98     let layout = x.layout();
99
100     let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
101     let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
102     assert_eq!(lane_count, ret_lane_count);
103
104     for lane_idx in 0..lane_count {
105         let x_lane = x.value_lane(fx, lane_idx);
106         let y_lane = y.value_lane(fx, lane_idx);
107
108         let res_lane = f(fx, x_lane, y_lane);
109
110         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
111     }
112 }
113
114 fn simd_pair_for_each_lane<'tcx>(
115     fx: &mut FunctionCx<'_, '_, 'tcx>,
116     x: CValue<'tcx>,
117     y: CValue<'tcx>,
118     ret: CPlace<'tcx>,
119     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
120 ) {
121     assert_eq!(x.layout(), y.layout());
122     let layout = x.layout();
123
124     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
125     let lane_layout = fx.layout_of(lane_ty);
126     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
127     let ret_lane_layout = fx.layout_of(ret_lane_ty);
128     assert_eq!(lane_count, ret_lane_count);
129
130     for lane_idx in 0..lane_count {
131         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
132         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
133
134         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
135         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
136
137         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
138     }
139 }
140
141 fn simd_reduce<'tcx>(
142     fx: &mut FunctionCx<'_, '_, 'tcx>,
143     val: CValue<'tcx>,
144     acc: Option<Value>,
145     ret: CPlace<'tcx>,
146     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
147 ) {
148     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
149     let lane_layout = fx.layout_of(lane_ty);
150     assert_eq!(lane_layout, ret.layout());
151
152     let (mut res_val, start_lane) =
153         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
154     for lane_idx in start_lane..lane_count {
155         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
156         res_val = f(fx, lane_layout.ty, res_val, lane);
157     }
158     let res = CValue::by_val(res_val, lane_layout);
159     ret.write_cvalue(fx, res);
160 }
161
162 // FIXME move all uses to `simd_reduce`
163 fn simd_reduce_bool<'tcx>(
164     fx: &mut FunctionCx<'_, '_, 'tcx>,
165     val: CValue<'tcx>,
166     ret: CPlace<'tcx>,
167     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
168 ) {
169     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
170     assert!(ret.layout().ty.is_bool());
171
172     let res_val = val.value_lane(fx, 0).load_scalar(fx);
173     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
174     for lane_idx in 1..lane_count {
175         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
176         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
177         res_val = f(fx, res_val, lane);
178     }
179     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
180         fx.bcx.ins().ireduce(types::I8, res_val)
181     } else {
182         res_val
183     };
184     let res = CValue::by_val(res_val, ret.layout());
185     ret.write_cvalue(fx, res);
186 }
187
188 fn bool_to_zero_or_max_uint<'tcx>(
189     fx: &mut FunctionCx<'_, '_, 'tcx>,
190     ty: Ty<'tcx>,
191     val: Value,
192 ) -> Value {
193     let ty = fx.clif_type(ty).unwrap();
194
195     let int_ty = match ty {
196         types::F32 => types::I32,
197         types::F64 => types::I64,
198         ty => ty,
199     };
200
201     let mut res = fx.bcx.ins().bmask(int_ty, val);
202
203     if ty.is_float() {
204         res = fx.bcx.ins().bitcast(ty, res);
205     }
206
207     res
208 }
209
210 pub(crate) fn codegen_intrinsic_call<'tcx>(
211     fx: &mut FunctionCx<'_, '_, 'tcx>,
212     instance: Instance<'tcx>,
213     args: &[mir::Operand<'tcx>],
214     destination: CPlace<'tcx>,
215     target: Option<BasicBlock>,
216     source_info: mir::SourceInfo,
217 ) {
218     let intrinsic = fx.tcx.item_name(instance.def_id());
219     let substs = instance.substs;
220
221     let target = if let Some(target) = target {
222         target
223     } else {
224         // Insert non returning intrinsics here
225         match intrinsic {
226             sym::abort => {
227                 fx.bcx.ins().trap(TrapCode::User(0));
228             }
229             sym::transmute => {
230                 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
231             }
232             _ => unimplemented!("unsupported intrinsic {}", intrinsic),
233         }
234         return;
235     };
236
237     if intrinsic.as_str().starts_with("simd_") {
238         self::simd::codegen_simd_intrinsic_call(
239             fx,
240             intrinsic,
241             substs,
242             args,
243             destination,
244             source_info.span,
245         );
246         let ret_block = fx.get_block(target);
247         fx.bcx.ins().jump(ret_block, &[]);
248     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
249         let ret_block = fx.get_block(target);
250         fx.bcx.ins().jump(ret_block, &[]);
251     } else {
252         codegen_regular_intrinsic_call(
253             fx,
254             instance,
255             intrinsic,
256             substs,
257             args,
258             destination,
259             Some(target),
260             source_info,
261         );
262     }
263 }
264
265 fn codegen_float_intrinsic_call<'tcx>(
266     fx: &mut FunctionCx<'_, '_, 'tcx>,
267     intrinsic: Symbol,
268     args: &[mir::Operand<'tcx>],
269     ret: CPlace<'tcx>,
270 ) -> bool {
271     let (name, arg_count, ty) = match intrinsic {
272         sym::expf32 => ("expf", 1, fx.tcx.types.f32),
273         sym::expf64 => ("exp", 1, fx.tcx.types.f64),
274         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
275         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
276         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
277         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
278         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
279         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
280         sym::powf32 => ("powf", 2, fx.tcx.types.f32),
281         sym::powf64 => ("pow", 2, fx.tcx.types.f64),
282         sym::logf32 => ("logf", 1, fx.tcx.types.f32),
283         sym::logf64 => ("log", 1, fx.tcx.types.f64),
284         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
285         sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
286         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
287         sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
288         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
289         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
290         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
291         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
292         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
293         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
294         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
295         sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
296         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
297         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
298         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
299         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
300         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
301         sym::roundf64 => ("round", 1, fx.tcx.types.f64),
302         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
303         sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
304         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
305         sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
306         _ => return false,
307     };
308
309     if args.len() != arg_count {
310         bug!("wrong number of args for intrinsic {:?}", intrinsic);
311     }
312
313     let (a, b, c);
314     let args = match args {
315         [x] => {
316             a = [codegen_operand(fx, x)];
317             &a as &[_]
318         }
319         [x, y] => {
320             b = [codegen_operand(fx, x), codegen_operand(fx, y)];
321             &b
322         }
323         [x, y, z] => {
324             c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
325             &c
326         }
327         _ => unreachable!(),
328     };
329
330     let layout = fx.layout_of(ty);
331     let res = match intrinsic {
332         sym::fmaf32 | sym::fmaf64 => {
333             let a = args[0].load_scalar(fx);
334             let b = args[1].load_scalar(fx);
335             let c = args[2].load_scalar(fx);
336             CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
337         }
338         sym::copysignf32 | sym::copysignf64 => {
339             let a = args[0].load_scalar(fx);
340             let b = args[1].load_scalar(fx);
341             CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
342         }
343         sym::fabsf32
344         | sym::fabsf64
345         | sym::floorf32
346         | sym::floorf64
347         | sym::ceilf32
348         | sym::ceilf64
349         | sym::truncf32
350         | sym::truncf64 => {
351             let a = args[0].load_scalar(fx);
352
353             let val = match intrinsic {
354                 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
355                 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
356                 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
357                 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
358                 _ => unreachable!(),
359             };
360
361             CValue::by_val(val, layout)
362         }
363         // These intrinsics aren't supported natively by Cranelift.
364         // Lower them to a libcall.
365         _ => fx.easy_call(name, &args, ty),
366     };
367
368     ret.write_cvalue(fx, res);
369
370     true
371 }
372
373 fn codegen_regular_intrinsic_call<'tcx>(
374     fx: &mut FunctionCx<'_, '_, 'tcx>,
375     instance: Instance<'tcx>,
376     intrinsic: Symbol,
377     substs: SubstsRef<'tcx>,
378     args: &[mir::Operand<'tcx>],
379     ret: CPlace<'tcx>,
380     destination: Option<BasicBlock>,
381     source_info: mir::SourceInfo,
382 ) {
383     let usize_layout = fx.layout_of(fx.tcx.types.usize);
384
385     match intrinsic {
386         sym::likely | sym::unlikely => {
387             intrinsic_args!(fx, args => (a); intrinsic);
388
389             ret.write_cvalue(fx, a);
390         }
391         sym::breakpoint => {
392             intrinsic_args!(fx, args => (); intrinsic);
393
394             fx.bcx.ins().debugtrap();
395         }
396         sym::copy | sym::copy_nonoverlapping => {
397             intrinsic_args!(fx, args => (src, dst, count); intrinsic);
398             let src = src.load_scalar(fx);
399             let dst = dst.load_scalar(fx);
400             let count = count.load_scalar(fx);
401
402             let elem_ty = substs.type_at(0);
403             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
404             assert_eq!(args.len(), 3);
405             let byte_amount =
406                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
407
408             if intrinsic == sym::copy_nonoverlapping {
409                 // FIXME emit_small_memcpy
410                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
411             } else {
412                 // FIXME emit_small_memmove
413                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
414             }
415         }
416         sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
417             // NOTE: the volatile variants have src and dst swapped
418             intrinsic_args!(fx, args => (dst, src, count); intrinsic);
419             let dst = dst.load_scalar(fx);
420             let src = src.load_scalar(fx);
421             let count = count.load_scalar(fx);
422
423             let elem_ty = substs.type_at(0);
424             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
425             assert_eq!(args.len(), 3);
426             let byte_amount =
427                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
428
429             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
430             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
431                 // FIXME emit_small_memcpy
432                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
433             } else {
434                 // FIXME emit_small_memmove
435                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
436             }
437         }
438         sym::size_of_val => {
439             intrinsic_args!(fx, args => (ptr); intrinsic);
440
441             let layout = fx.layout_of(substs.type_at(0));
442             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
443             // branch
444             let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
445                 let (_ptr, info) = ptr.load_scalar_pair(fx);
446                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
447                 size
448             } else {
449                 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
450             };
451             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
452         }
453         sym::min_align_of_val => {
454             intrinsic_args!(fx, args => (ptr); intrinsic);
455
456             let layout = fx.layout_of(substs.type_at(0));
457             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
458             // branch
459             let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
460                 let (_ptr, info) = ptr.load_scalar_pair(fx);
461                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
462                 align
463             } else {
464                 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
465             };
466             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
467         }
468
469         sym::vtable_size => {
470             intrinsic_args!(fx, args => (vtable); intrinsic);
471             let vtable = vtable.load_scalar(fx);
472
473             let size = crate::vtable::size_of_obj(fx, vtable);
474             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
475         }
476
477         sym::vtable_align => {
478             intrinsic_args!(fx, args => (vtable); intrinsic);
479             let vtable = vtable.load_scalar(fx);
480
481             let align = crate::vtable::min_align_of_obj(fx, vtable);
482             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
483         }
484
485         sym::unchecked_add
486         | sym::unchecked_sub
487         | sym::unchecked_mul
488         | sym::unchecked_div
489         | sym::exact_div
490         | sym::unchecked_rem
491         | sym::unchecked_shl
492         | sym::unchecked_shr => {
493             intrinsic_args!(fx, args => (x, y); intrinsic);
494
495             // FIXME trap on overflow
496             let bin_op = match intrinsic {
497                 sym::unchecked_add => BinOp::Add,
498                 sym::unchecked_sub => BinOp::Sub,
499                 sym::unchecked_mul => BinOp::Mul,
500                 sym::unchecked_div | sym::exact_div => BinOp::Div,
501                 sym::unchecked_rem => BinOp::Rem,
502                 sym::unchecked_shl => BinOp::Shl,
503                 sym::unchecked_shr => BinOp::Shr,
504                 _ => unreachable!(),
505             };
506             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
507             ret.write_cvalue(fx, res);
508         }
509         sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
510             intrinsic_args!(fx, args => (x, y); intrinsic);
511
512             assert_eq!(x.layout().ty, y.layout().ty);
513             let bin_op = match intrinsic {
514                 sym::add_with_overflow => BinOp::Add,
515                 sym::sub_with_overflow => BinOp::Sub,
516                 sym::mul_with_overflow => BinOp::Mul,
517                 _ => unreachable!(),
518             };
519
520             let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
521             ret.write_cvalue(fx, res);
522         }
523         sym::saturating_add | sym::saturating_sub => {
524             intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
525
526             assert_eq!(lhs.layout().ty, rhs.layout().ty);
527             let bin_op = match intrinsic {
528                 sym::saturating_add => BinOp::Add,
529                 sym::saturating_sub => BinOp::Sub,
530                 _ => unreachable!(),
531             };
532
533             let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
534             ret.write_cvalue(fx, res);
535         }
536         sym::rotate_left => {
537             intrinsic_args!(fx, args => (x, y); intrinsic);
538             let y = y.load_scalar(fx);
539
540             let layout = x.layout();
541             let x = x.load_scalar(fx);
542             let res = fx.bcx.ins().rotl(x, y);
543             ret.write_cvalue(fx, CValue::by_val(res, layout));
544         }
545         sym::rotate_right => {
546             intrinsic_args!(fx, args => (x, y); intrinsic);
547             let y = y.load_scalar(fx);
548
549             let layout = x.layout();
550             let x = x.load_scalar(fx);
551             let res = fx.bcx.ins().rotr(x, y);
552             ret.write_cvalue(fx, CValue::by_val(res, layout));
553         }
554
555         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
556         // doesn't have UB both are codegen'ed the same way
557         sym::offset | sym::arith_offset => {
558             intrinsic_args!(fx, args => (base, offset); intrinsic);
559             let offset = offset.load_scalar(fx);
560
561             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
562             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
563             let ptr_diff = if pointee_size != 1 {
564                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
565             } else {
566                 offset
567             };
568             let base_val = base.load_scalar(fx);
569             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
570             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
571         }
572
573         sym::ptr_mask => {
574             intrinsic_args!(fx, args => (ptr, mask); intrinsic);
575             let ptr = ptr.load_scalar(fx);
576             let mask = mask.load_scalar(fx);
577             fx.bcx.ins().band(ptr, mask);
578         }
579
580         sym::transmute => {
581             intrinsic_args!(fx, args => (from); intrinsic);
582
583             ret.write_cvalue_transmute(fx, from);
584         }
585         sym::write_bytes | sym::volatile_set_memory => {
586             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
587             let val = val.load_scalar(fx);
588             let count = count.load_scalar(fx);
589
590             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
591             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
592             let count = if pointee_size != 1 {
593                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
594             } else {
595                 count
596             };
597             let dst_ptr = dst.load_scalar(fx);
598             // FIXME make the memset actually volatile when switching to emit_small_memset
599             // FIXME use emit_small_memset
600             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
601         }
602         sym::ctlz | sym::ctlz_nonzero => {
603             intrinsic_args!(fx, args => (arg); intrinsic);
604             let val = arg.load_scalar(fx);
605
606             // FIXME trap on `ctlz_nonzero` with zero arg.
607             let res = fx.bcx.ins().clz(val);
608             let res = CValue::by_val(res, arg.layout());
609             ret.write_cvalue(fx, res);
610         }
611         sym::cttz | sym::cttz_nonzero => {
612             intrinsic_args!(fx, args => (arg); intrinsic);
613             let val = arg.load_scalar(fx);
614
615             // FIXME trap on `cttz_nonzero` with zero arg.
616             let res = fx.bcx.ins().ctz(val);
617             let res = CValue::by_val(res, arg.layout());
618             ret.write_cvalue(fx, res);
619         }
620         sym::ctpop => {
621             intrinsic_args!(fx, args => (arg); intrinsic);
622             let val = arg.load_scalar(fx);
623
624             let res = fx.bcx.ins().popcnt(val);
625             let res = CValue::by_val(res, arg.layout());
626             ret.write_cvalue(fx, res);
627         }
628         sym::bitreverse => {
629             intrinsic_args!(fx, args => (arg); intrinsic);
630             let val = arg.load_scalar(fx);
631
632             let res = fx.bcx.ins().bitrev(val);
633             let res = CValue::by_val(res, arg.layout());
634             ret.write_cvalue(fx, res);
635         }
636         sym::bswap => {
637             intrinsic_args!(fx, args => (arg); intrinsic);
638             let val = arg.load_scalar(fx);
639
640             let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
641                 val
642             } else {
643                 fx.bcx.ins().bswap(val)
644             };
645             let res = CValue::by_val(res, arg.layout());
646             ret.write_cvalue(fx, res);
647         }
648         sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
649             intrinsic_args!(fx, args => (); intrinsic);
650
651             let layout = fx.layout_of(substs.type_at(0));
652             if layout.abi.is_uninhabited() {
653                 with_no_trimmed_paths!({
654                     crate::base::codegen_panic(
655                         fx,
656                         &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
657                         source_info,
658                     )
659                 });
660                 return;
661             }
662
663             if intrinsic == sym::assert_zero_valid
664                 && !fx.tcx.permits_zero_init(fx.param_env().and(layout))
665             {
666                 with_no_trimmed_paths!({
667                     crate::base::codegen_panic(
668                         fx,
669                         &format!(
670                             "attempted to zero-initialize type `{}`, which is invalid",
671                             layout.ty
672                         ),
673                         source_info,
674                     );
675                 });
676                 return;
677             }
678
679             if intrinsic == sym::assert_mem_uninitialized_valid
680                 && !fx.tcx.permits_uninit_init(fx.param_env().and(layout))
681             {
682                 with_no_trimmed_paths!({
683                     crate::base::codegen_panic(
684                         fx,
685                         &format!(
686                             "attempted to leave type `{}` uninitialized, which is invalid",
687                             layout.ty
688                         ),
689                         source_info,
690                     )
691                 });
692                 return;
693             }
694         }
695
696         sym::volatile_load | sym::unaligned_volatile_load => {
697             intrinsic_args!(fx, args => (ptr); intrinsic);
698
699             // Cranelift treats loads as volatile by default
700             // FIXME correctly handle unaligned_volatile_load
701             let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
702             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
703             ret.write_cvalue(fx, val);
704         }
705         sym::volatile_store | sym::unaligned_volatile_store => {
706             intrinsic_args!(fx, args => (ptr, val); intrinsic);
707             let ptr = ptr.load_scalar(fx);
708
709             // Cranelift treats stores as volatile by default
710             // FIXME correctly handle unaligned_volatile_store
711             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
712             dest.write_cvalue(fx, val);
713         }
714
715         sym::pref_align_of
716         | sym::needs_drop
717         | sym::type_id
718         | sym::type_name
719         | sym::variant_count => {
720             intrinsic_args!(fx, args => (); intrinsic);
721
722             let const_val =
723                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
724             let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
725             ret.write_cvalue(fx, val);
726         }
727
728         sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
729             intrinsic_args!(fx, args => (ptr, base); intrinsic);
730             let ptr = ptr.load_scalar(fx);
731             let base = base.load_scalar(fx);
732             let ty = substs.type_at(0);
733
734             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
735             let diff_bytes = fx.bcx.ins().isub(ptr, base);
736             // FIXME this can be an exact division.
737             let val = if intrinsic == sym::ptr_offset_from_unsigned {
738                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
739                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
740                 // but unsigned is slightly easier to codegen, so might as well.
741                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
742             } else {
743                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
744                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
745             };
746             ret.write_cvalue(fx, val);
747         }
748
749         sym::ptr_guaranteed_cmp => {
750             intrinsic_args!(fx, args => (a, b); intrinsic);
751
752             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
753             ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
754         }
755
756         sym::caller_location => {
757             intrinsic_args!(fx, args => (); intrinsic);
758
759             let caller_location = fx.get_caller_location(source_info);
760             ret.write_cvalue(fx, caller_location);
761         }
762
763         _ if intrinsic.as_str().starts_with("atomic_fence") => {
764             intrinsic_args!(fx, args => (); intrinsic);
765
766             fx.bcx.ins().fence();
767         }
768         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
769             intrinsic_args!(fx, args => (); intrinsic);
770
771             // FIXME use a compiler fence once Cranelift supports it
772             fx.bcx.ins().fence();
773         }
774         _ if intrinsic.as_str().starts_with("atomic_load") => {
775             intrinsic_args!(fx, args => (ptr); intrinsic);
776             let ptr = ptr.load_scalar(fx);
777
778             let ty = substs.type_at(0);
779             match ty.kind() {
780                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
781                     // FIXME implement 128bit atomics
782                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
783                         // special case for compiler-builtins to avoid having to patch it
784                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
785                         return;
786                     } else {
787                         fx.tcx
788                             .sess
789                             .span_fatal(source_info.span, "128bit atomics not yet supported");
790                     }
791                 }
792                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
793                 _ => {
794                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
795                     return;
796                 }
797             }
798             let clif_ty = fx.clif_type(ty).unwrap();
799
800             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
801
802             let val = CValue::by_val(val, fx.layout_of(ty));
803             ret.write_cvalue(fx, val);
804         }
805         _ if intrinsic.as_str().starts_with("atomic_store") => {
806             intrinsic_args!(fx, args => (ptr, val); intrinsic);
807             let ptr = ptr.load_scalar(fx);
808
809             let ty = substs.type_at(0);
810             match ty.kind() {
811                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
812                     // FIXME implement 128bit atomics
813                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
814                         // special case for compiler-builtins to avoid having to patch it
815                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
816                         return;
817                     } else {
818                         fx.tcx
819                             .sess
820                             .span_fatal(source_info.span, "128bit atomics not yet supported");
821                     }
822                 }
823                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
824                 _ => {
825                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
826                     return;
827                 }
828             }
829
830             let val = val.load_scalar(fx);
831
832             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
833         }
834         _ if intrinsic.as_str().starts_with("atomic_xchg") => {
835             intrinsic_args!(fx, args => (ptr, new); intrinsic);
836             let ptr = ptr.load_scalar(fx);
837
838             let layout = new.layout();
839             match layout.ty.kind() {
840                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
841                 _ => {
842                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
843                     return;
844                 }
845             }
846             let ty = fx.clif_type(layout.ty).unwrap();
847
848             let new = new.load_scalar(fx);
849
850             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
851
852             let old = CValue::by_val(old, layout);
853             ret.write_cvalue(fx, old);
854         }
855         _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
856             // both atomic_cxchg_* and atomic_cxchgweak_*
857             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
858             let ptr = ptr.load_scalar(fx);
859
860             let layout = new.layout();
861             match layout.ty.kind() {
862                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
863                 _ => {
864                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
865                     return;
866                 }
867             }
868
869             let test_old = test_old.load_scalar(fx);
870             let new = new.load_scalar(fx);
871
872             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
873             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
874
875             let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
876             ret.write_cvalue(fx, ret_val)
877         }
878
879         _ if intrinsic.as_str().starts_with("atomic_xadd") => {
880             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
881             let ptr = ptr.load_scalar(fx);
882
883             let layout = amount.layout();
884             match layout.ty.kind() {
885                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
886                 _ => {
887                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
888                     return;
889                 }
890             }
891             let ty = fx.clif_type(layout.ty).unwrap();
892
893             let amount = amount.load_scalar(fx);
894
895             let old =
896                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
897
898             let old = CValue::by_val(old, layout);
899             ret.write_cvalue(fx, old);
900         }
901         _ if intrinsic.as_str().starts_with("atomic_xsub") => {
902             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
903             let ptr = ptr.load_scalar(fx);
904
905             let layout = amount.layout();
906             match layout.ty.kind() {
907                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
908                 _ => {
909                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
910                     return;
911                 }
912             }
913             let ty = fx.clif_type(layout.ty).unwrap();
914
915             let amount = amount.load_scalar(fx);
916
917             let old =
918                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
919
920             let old = CValue::by_val(old, layout);
921             ret.write_cvalue(fx, old);
922         }
923         _ if intrinsic.as_str().starts_with("atomic_and") => {
924             intrinsic_args!(fx, args => (ptr, src); intrinsic);
925             let ptr = ptr.load_scalar(fx);
926
927             let layout = src.layout();
928             match layout.ty.kind() {
929                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
930                 _ => {
931                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
932                     return;
933                 }
934             }
935             let ty = fx.clif_type(layout.ty).unwrap();
936
937             let src = src.load_scalar(fx);
938
939             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
940
941             let old = CValue::by_val(old, layout);
942             ret.write_cvalue(fx, old);
943         }
944         _ if intrinsic.as_str().starts_with("atomic_or") => {
945             intrinsic_args!(fx, args => (ptr, src); intrinsic);
946             let ptr = ptr.load_scalar(fx);
947
948             let layout = src.layout();
949             match layout.ty.kind() {
950                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
951                 _ => {
952                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
953                     return;
954                 }
955             }
956             let ty = fx.clif_type(layout.ty).unwrap();
957
958             let src = src.load_scalar(fx);
959
960             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
961
962             let old = CValue::by_val(old, layout);
963             ret.write_cvalue(fx, old);
964         }
965         _ if intrinsic.as_str().starts_with("atomic_xor") => {
966             intrinsic_args!(fx, args => (ptr, src); intrinsic);
967             let ptr = ptr.load_scalar(fx);
968
969             let layout = src.layout();
970             match layout.ty.kind() {
971                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
972                 _ => {
973                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
974                     return;
975                 }
976             }
977             let ty = fx.clif_type(layout.ty).unwrap();
978
979             let src = src.load_scalar(fx);
980
981             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
982
983             let old = CValue::by_val(old, layout);
984             ret.write_cvalue(fx, old);
985         }
986         _ if intrinsic.as_str().starts_with("atomic_nand") => {
987             intrinsic_args!(fx, args => (ptr, src); intrinsic);
988             let ptr = ptr.load_scalar(fx);
989
990             let layout = src.layout();
991             match layout.ty.kind() {
992                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
993                 _ => {
994                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
995                     return;
996                 }
997             }
998             let ty = fx.clif_type(layout.ty).unwrap();
999
1000             let src = src.load_scalar(fx);
1001
1002             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1003
1004             let old = CValue::by_val(old, layout);
1005             ret.write_cvalue(fx, old);
1006         }
1007         _ if intrinsic.as_str().starts_with("atomic_max") => {
1008             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1009             let ptr = ptr.load_scalar(fx);
1010
1011             let layout = src.layout();
1012             match layout.ty.kind() {
1013                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1014                 _ => {
1015                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1016                     return;
1017                 }
1018             }
1019             let ty = fx.clif_type(layout.ty).unwrap();
1020
1021             let src = src.load_scalar(fx);
1022
1023             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1024
1025             let old = CValue::by_val(old, layout);
1026             ret.write_cvalue(fx, old);
1027         }
1028         _ if intrinsic.as_str().starts_with("atomic_umax") => {
1029             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1030             let ptr = ptr.load_scalar(fx);
1031
1032             let layout = src.layout();
1033             match layout.ty.kind() {
1034                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1035                 _ => {
1036                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1037                     return;
1038                 }
1039             }
1040             let ty = fx.clif_type(layout.ty).unwrap();
1041
1042             let src = src.load_scalar(fx);
1043
1044             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1045
1046             let old = CValue::by_val(old, layout);
1047             ret.write_cvalue(fx, old);
1048         }
1049         _ if intrinsic.as_str().starts_with("atomic_min") => {
1050             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1051             let ptr = ptr.load_scalar(fx);
1052
1053             let layout = src.layout();
1054             match layout.ty.kind() {
1055                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1056                 _ => {
1057                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1058                     return;
1059                 }
1060             }
1061             let ty = fx.clif_type(layout.ty).unwrap();
1062
1063             let src = src.load_scalar(fx);
1064
1065             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1066
1067             let old = CValue::by_val(old, layout);
1068             ret.write_cvalue(fx, old);
1069         }
1070         _ if intrinsic.as_str().starts_with("atomic_umin") => {
1071             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1072             let ptr = ptr.load_scalar(fx);
1073
1074             let layout = src.layout();
1075             match layout.ty.kind() {
1076                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1077                 _ => {
1078                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1079                     return;
1080                 }
1081             }
1082             let ty = fx.clif_type(layout.ty).unwrap();
1083
1084             let src = src.load_scalar(fx);
1085
1086             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1087
1088             let old = CValue::by_val(old, layout);
1089             ret.write_cvalue(fx, old);
1090         }
1091
1092         sym::minnumf32 => {
1093             intrinsic_args!(fx, args => (a, b); intrinsic);
1094             let a = a.load_scalar(fx);
1095             let b = b.load_scalar(fx);
1096
1097             let val = crate::num::codegen_float_min(fx, a, b);
1098             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1099             ret.write_cvalue(fx, val);
1100         }
1101         sym::minnumf64 => {
1102             intrinsic_args!(fx, args => (a, b); intrinsic);
1103             let a = a.load_scalar(fx);
1104             let b = b.load_scalar(fx);
1105
1106             let val = crate::num::codegen_float_min(fx, a, b);
1107             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1108             ret.write_cvalue(fx, val);
1109         }
1110         sym::maxnumf32 => {
1111             intrinsic_args!(fx, args => (a, b); intrinsic);
1112             let a = a.load_scalar(fx);
1113             let b = b.load_scalar(fx);
1114
1115             let val = crate::num::codegen_float_max(fx, a, b);
1116             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1117             ret.write_cvalue(fx, val);
1118         }
1119         sym::maxnumf64 => {
1120             intrinsic_args!(fx, args => (a, b); intrinsic);
1121             let a = a.load_scalar(fx);
1122             let b = b.load_scalar(fx);
1123
1124             let val = crate::num::codegen_float_max(fx, a, b);
1125             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1126             ret.write_cvalue(fx, val);
1127         }
1128
1129         kw::Try => {
1130             intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1131             let f = f.load_scalar(fx);
1132             let data = data.load_scalar(fx);
1133             let _catch_fn = catch_fn.load_scalar(fx);
1134
1135             // FIXME once unwinding is supported, change this to actually catch panics
1136             let f_sig = fx.bcx.func.import_signature(Signature {
1137                 call_conv: fx.target_config.default_call_conv,
1138                 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
1139                 returns: vec![],
1140             });
1141
1142             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1143
1144             let layout = ret.layout();
1145             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1146             ret.write_cvalue(fx, ret_val);
1147         }
1148
1149         sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1150             intrinsic_args!(fx, args => (x, y); intrinsic);
1151
1152             let res = crate::num::codegen_float_binop(
1153                 fx,
1154                 match intrinsic {
1155                     sym::fadd_fast => BinOp::Add,
1156                     sym::fsub_fast => BinOp::Sub,
1157                     sym::fmul_fast => BinOp::Mul,
1158                     sym::fdiv_fast => BinOp::Div,
1159                     sym::frem_fast => BinOp::Rem,
1160                     _ => unreachable!(),
1161                 },
1162                 x,
1163                 y,
1164             );
1165             ret.write_cvalue(fx, res);
1166         }
1167         sym::float_to_int_unchecked => {
1168             intrinsic_args!(fx, args => (f); intrinsic);
1169             let f = f.load_scalar(fx);
1170
1171             let res = crate::cast::clif_int_or_float_cast(
1172                 fx,
1173                 f,
1174                 false,
1175                 fx.clif_type(ret.layout().ty).unwrap(),
1176                 type_sign(ret.layout().ty),
1177             );
1178             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1179         }
1180
1181         sym::raw_eq => {
1182             intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1183             let lhs_ref = lhs_ref.load_scalar(fx);
1184             let rhs_ref = rhs_ref.load_scalar(fx);
1185
1186             let size = fx.layout_of(substs.type_at(0)).layout.size();
1187             // FIXME add and use emit_small_memcmp
1188             let is_eq_value = if size == Size::ZERO {
1189                 // No bytes means they're trivially equal
1190                 fx.bcx.ins().iconst(types::I8, 1)
1191             } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1192                 // Can't use `trusted` for these loads; they could be unaligned.
1193                 let mut flags = MemFlags::new();
1194                 flags.set_notrap();
1195                 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1196                 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1197                 fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
1198             } else {
1199                 // Just call `memcmp` (like slices do in core) when the
1200                 // size is too large or it's not a power-of-two.
1201                 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1202                 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1203                 let params = vec![AbiParam::new(fx.pointer_type); 3];
1204                 let returns = vec![AbiParam::new(types::I32)];
1205                 let args = &[lhs_ref, rhs_ref, bytes_val];
1206                 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1207                 fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
1208             };
1209             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1210         }
1211
1212         sym::const_allocate => {
1213             intrinsic_args!(fx, args => (_size, _align); intrinsic);
1214
1215             // returns a null pointer at runtime.
1216             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1217             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1218         }
1219
1220         sym::const_deallocate => {
1221             intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1222             // nop at runtime.
1223         }
1224
1225         sym::black_box => {
1226             intrinsic_args!(fx, args => (a); intrinsic);
1227
1228             // FIXME implement black_box semantics
1229             ret.write_cvalue(fx, a);
1230         }
1231
1232         // FIXME implement variadics in cranelift
1233         sym::va_copy | sym::va_arg | sym::va_end => {
1234             fx.tcx.sess.span_fatal(
1235                 source_info.span,
1236                 "Defining variadic functions is not yet supported by Cranelift",
1237             );
1238         }
1239
1240         _ => {
1241             fx.tcx
1242                 .sess
1243                 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1244         }
1245     }
1246
1247     let ret_block = fx.get_block(destination.unwrap());
1248     fx.bcx.ins().jump(ret_block, &[]);
1249 }