]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
Auto merge of #100942 - ehuss:update-cargo, r=ehuss
[rust.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 macro_rules! intrinsic_args {
5     ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6         #[allow(unused_parens)]
7         let ($($arg),*) = if let [$($arg),*] = $args {
8             ($(codegen_operand($fx, $arg)),*)
9         } else {
10             $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11         };
12     }
13 }
14
15 mod cpuid;
16 mod llvm;
17 mod simd;
18
19 pub(crate) use cpuid::codegen_cpuid_call;
20 pub(crate) use llvm::codegen_llvm_intrinsic_call;
21
22 use rustc_middle::ty::print::with_no_trimmed_paths;
23 use rustc_middle::ty::subst::SubstsRef;
24 use rustc_span::symbol::{kw, sym, Symbol};
25
26 use crate::prelude::*;
27 use cranelift_codegen::ir::AtomicRmwOp;
28
29 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
30     bug!("wrong number of args for intrinsic {}", intrinsic);
31 }
32
33 fn report_atomic_type_validation_error<'tcx>(
34     fx: &mut FunctionCx<'_, '_, 'tcx>,
35     intrinsic: Symbol,
36     span: Span,
37     ty: Ty<'tcx>,
38 ) {
39     fx.tcx.sess.span_err(
40         span,
41         &format!(
42             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
43             intrinsic, ty
44         ),
45     );
46     // Prevent verifier error
47     crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
48 }
49
50 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
51     let (element, count) = match layout.abi {
52         Abi::Vector { element, count } => (element, count),
53         _ => unreachable!(),
54     };
55
56     match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
57         // Cranelift currently only implements icmp for 128bit vectors.
58         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
59         _ => None,
60     }
61 }
62
63 fn simd_for_each_lane<'tcx>(
64     fx: &mut FunctionCx<'_, '_, 'tcx>,
65     val: CValue<'tcx>,
66     ret: CPlace<'tcx>,
67     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
68 ) {
69     let layout = val.layout();
70
71     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
72     let lane_layout = fx.layout_of(lane_ty);
73     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
74     let ret_lane_layout = fx.layout_of(ret_lane_ty);
75     assert_eq!(lane_count, ret_lane_count);
76
77     for lane_idx in 0..lane_count {
78         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
79
80         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
81         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
82
83         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
84     }
85 }
86
87 fn simd_pair_for_each_lane<'tcx>(
88     fx: &mut FunctionCx<'_, '_, 'tcx>,
89     x: CValue<'tcx>,
90     y: CValue<'tcx>,
91     ret: CPlace<'tcx>,
92     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
93 ) {
94     assert_eq!(x.layout(), y.layout());
95     let layout = x.layout();
96
97     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
98     let lane_layout = fx.layout_of(lane_ty);
99     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
100     let ret_lane_layout = fx.layout_of(ret_lane_ty);
101     assert_eq!(lane_count, ret_lane_count);
102
103     for lane_idx in 0..lane_count {
104         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
105         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
106
107         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
108         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
109
110         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
111     }
112 }
113
114 fn simd_reduce<'tcx>(
115     fx: &mut FunctionCx<'_, '_, 'tcx>,
116     val: CValue<'tcx>,
117     acc: Option<Value>,
118     ret: CPlace<'tcx>,
119     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
120 ) {
121     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
122     let lane_layout = fx.layout_of(lane_ty);
123     assert_eq!(lane_layout, ret.layout());
124
125     let (mut res_val, start_lane) =
126         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
127     for lane_idx in start_lane..lane_count {
128         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
129         res_val = f(fx, lane_layout.ty, res_val, lane);
130     }
131     let res = CValue::by_val(res_val, lane_layout);
132     ret.write_cvalue(fx, res);
133 }
134
135 // FIXME move all uses to `simd_reduce`
136 fn simd_reduce_bool<'tcx>(
137     fx: &mut FunctionCx<'_, '_, 'tcx>,
138     val: CValue<'tcx>,
139     ret: CPlace<'tcx>,
140     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
141 ) {
142     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
143     assert!(ret.layout().ty.is_bool());
144
145     let res_val = val.value_lane(fx, 0).load_scalar(fx);
146     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
147     for lane_idx in 1..lane_count {
148         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
149         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
150         res_val = f(fx, res_val, lane);
151     }
152     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
153         fx.bcx.ins().ireduce(types::I8, res_val)
154     } else {
155         res_val
156     };
157     let res = CValue::by_val(res_val, ret.layout());
158     ret.write_cvalue(fx, res);
159 }
160
161 fn bool_to_zero_or_max_uint<'tcx>(
162     fx: &mut FunctionCx<'_, '_, 'tcx>,
163     ty: Ty<'tcx>,
164     val: Value,
165 ) -> Value {
166     let ty = fx.clif_type(ty).unwrap();
167
168     let int_ty = match ty {
169         types::F32 => types::I32,
170         types::F64 => types::I64,
171         ty => ty,
172     };
173
174     let val = fx.bcx.ins().bint(int_ty, val);
175     let mut res = fx.bcx.ins().ineg(val);
176
177     if ty.is_float() {
178         res = fx.bcx.ins().bitcast(ty, res);
179     }
180
181     res
182 }
183
184 pub(crate) fn codegen_intrinsic_call<'tcx>(
185     fx: &mut FunctionCx<'_, '_, 'tcx>,
186     instance: Instance<'tcx>,
187     args: &[mir::Operand<'tcx>],
188     destination: CPlace<'tcx>,
189     target: Option<BasicBlock>,
190     source_info: mir::SourceInfo,
191 ) {
192     let intrinsic = fx.tcx.item_name(instance.def_id());
193     let substs = instance.substs;
194
195     let target = if let Some(target) = target {
196         target
197     } else {
198         // Insert non returning intrinsics here
199         match intrinsic {
200             sym::abort => {
201                 fx.bcx.ins().trap(TrapCode::User(0));
202             }
203             sym::transmute => {
204                 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
205             }
206             _ => unimplemented!("unsupported instrinsic {}", intrinsic),
207         }
208         return;
209     };
210
211     if intrinsic.as_str().starts_with("simd_") {
212         self::simd::codegen_simd_intrinsic_call(
213             fx,
214             intrinsic,
215             substs,
216             args,
217             destination,
218             source_info.span,
219         );
220         let ret_block = fx.get_block(target);
221         fx.bcx.ins().jump(ret_block, &[]);
222     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
223         let ret_block = fx.get_block(target);
224         fx.bcx.ins().jump(ret_block, &[]);
225     } else {
226         codegen_regular_intrinsic_call(
227             fx,
228             instance,
229             intrinsic,
230             substs,
231             args,
232             destination,
233             Some(target),
234             source_info,
235         );
236     }
237 }
238
239 fn codegen_float_intrinsic_call<'tcx>(
240     fx: &mut FunctionCx<'_, '_, 'tcx>,
241     intrinsic: Symbol,
242     args: &[mir::Operand<'tcx>],
243     ret: CPlace<'tcx>,
244 ) -> bool {
245     let (name, arg_count, ty) = match intrinsic {
246         sym::expf32 => ("expf", 1, fx.tcx.types.f32),
247         sym::expf64 => ("exp", 1, fx.tcx.types.f64),
248         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
249         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
250         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
251         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
252         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
253         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
254         sym::powf32 => ("powf", 2, fx.tcx.types.f32),
255         sym::powf64 => ("pow", 2, fx.tcx.types.f64),
256         sym::logf32 => ("logf", 1, fx.tcx.types.f32),
257         sym::logf64 => ("log", 1, fx.tcx.types.f64),
258         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
259         sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
260         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
261         sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
262         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
263         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
264         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
265         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
266         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
267         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
268         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
269         sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
270         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
271         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
272         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
273         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
274         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
275         sym::roundf64 => ("round", 1, fx.tcx.types.f64),
276         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
277         sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
278         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
279         sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
280         _ => return false,
281     };
282
283     if args.len() != arg_count {
284         bug!("wrong number of args for intrinsic {:?}", intrinsic);
285     }
286
287     let (a, b, c);
288     let args = match args {
289         [x] => {
290             a = [codegen_operand(fx, x)];
291             &a as &[_]
292         }
293         [x, y] => {
294             b = [codegen_operand(fx, x), codegen_operand(fx, y)];
295             &b
296         }
297         [x, y, z] => {
298             c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
299             &c
300         }
301         _ => unreachable!(),
302     };
303
304     let res = fx.easy_call(name, &args, ty);
305     ret.write_cvalue(fx, res);
306
307     true
308 }
309
310 fn codegen_regular_intrinsic_call<'tcx>(
311     fx: &mut FunctionCx<'_, '_, 'tcx>,
312     instance: Instance<'tcx>,
313     intrinsic: Symbol,
314     substs: SubstsRef<'tcx>,
315     args: &[mir::Operand<'tcx>],
316     ret: CPlace<'tcx>,
317     destination: Option<BasicBlock>,
318     source_info: mir::SourceInfo,
319 ) {
320     let usize_layout = fx.layout_of(fx.tcx.types.usize);
321
322     match intrinsic {
323         sym::assume => {
324             intrinsic_args!(fx, args => (_a); intrinsic);
325         }
326         sym::likely | sym::unlikely => {
327             intrinsic_args!(fx, args => (a); intrinsic);
328
329             ret.write_cvalue(fx, a);
330         }
331         sym::breakpoint => {
332             intrinsic_args!(fx, args => (); intrinsic);
333
334             fx.bcx.ins().debugtrap();
335         }
336         sym::copy | sym::copy_nonoverlapping => {
337             intrinsic_args!(fx, args => (src, dst, count); intrinsic);
338             let src = src.load_scalar(fx);
339             let dst = dst.load_scalar(fx);
340             let count = count.load_scalar(fx);
341
342             let elem_ty = substs.type_at(0);
343             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
344             assert_eq!(args.len(), 3);
345             let byte_amount =
346                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
347
348             if intrinsic == sym::copy_nonoverlapping {
349                 // FIXME emit_small_memcpy
350                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
351             } else {
352                 // FIXME emit_small_memmove
353                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
354             }
355         }
356         sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
357             // NOTE: the volatile variants have src and dst swapped
358             intrinsic_args!(fx, args => (dst, src, count); intrinsic);
359             let dst = dst.load_scalar(fx);
360             let src = src.load_scalar(fx);
361             let count = count.load_scalar(fx);
362
363             let elem_ty = substs.type_at(0);
364             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
365             assert_eq!(args.len(), 3);
366             let byte_amount =
367                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
368
369             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
370             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
371                 // FIXME emit_small_memcpy
372                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
373             } else {
374                 // FIXME emit_small_memmove
375                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
376             }
377         }
378         sym::size_of_val => {
379             intrinsic_args!(fx, args => (ptr); intrinsic);
380
381             let layout = fx.layout_of(substs.type_at(0));
382             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
383             // branch
384             let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
385                 let (_ptr, info) = ptr.load_scalar_pair(fx);
386                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
387                 size
388             } else {
389                 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
390             };
391             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
392         }
393         sym::min_align_of_val => {
394             intrinsic_args!(fx, args => (ptr); intrinsic);
395
396             let layout = fx.layout_of(substs.type_at(0));
397             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
398             // branch
399             let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
400                 let (_ptr, info) = ptr.load_scalar_pair(fx);
401                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
402                 align
403             } else {
404                 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
405             };
406             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
407         }
408
409         sym::vtable_size => {
410             intrinsic_args!(fx, args => (vtable); intrinsic);
411             let vtable = vtable.load_scalar(fx);
412
413             let size = crate::vtable::size_of_obj(fx, vtable);
414             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
415         }
416
417         sym::vtable_align => {
418             intrinsic_args!(fx, args => (vtable); intrinsic);
419             let vtable = vtable.load_scalar(fx);
420
421             let align = crate::vtable::min_align_of_obj(fx, vtable);
422             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
423         }
424
425         sym::unchecked_add
426         | sym::unchecked_sub
427         | sym::unchecked_mul
428         | sym::unchecked_div
429         | sym::exact_div
430         | sym::unchecked_rem
431         | sym::unchecked_shl
432         | sym::unchecked_shr => {
433             intrinsic_args!(fx, args => (x, y); intrinsic);
434
435             // FIXME trap on overflow
436             let bin_op = match intrinsic {
437                 sym::unchecked_add => BinOp::Add,
438                 sym::unchecked_sub => BinOp::Sub,
439                 sym::unchecked_mul => BinOp::Mul,
440                 sym::unchecked_div | sym::exact_div => BinOp::Div,
441                 sym::unchecked_rem => BinOp::Rem,
442                 sym::unchecked_shl => BinOp::Shl,
443                 sym::unchecked_shr => BinOp::Shr,
444                 _ => unreachable!(),
445             };
446             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
447             ret.write_cvalue(fx, res);
448         }
449         sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
450             intrinsic_args!(fx, args => (x, y); intrinsic);
451
452             assert_eq!(x.layout().ty, y.layout().ty);
453             let bin_op = match intrinsic {
454                 sym::add_with_overflow => BinOp::Add,
455                 sym::sub_with_overflow => BinOp::Sub,
456                 sym::mul_with_overflow => BinOp::Mul,
457                 _ => unreachable!(),
458             };
459
460             let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
461             ret.write_cvalue(fx, res);
462         }
463         sym::saturating_add | sym::saturating_sub => {
464             intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
465
466             assert_eq!(lhs.layout().ty, rhs.layout().ty);
467             let bin_op = match intrinsic {
468                 sym::saturating_add => BinOp::Add,
469                 sym::saturating_sub => BinOp::Sub,
470                 _ => unreachable!(),
471             };
472
473             let signed = type_sign(lhs.layout().ty);
474
475             let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
476
477             let (val, has_overflow) = checked_res.load_scalar_pair(fx);
478             let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
479
480             let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
481
482             let val = match (intrinsic, signed) {
483                 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
484                 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
485                 (sym::saturating_add, true) => {
486                     let rhs = rhs.load_scalar(fx);
487                     let rhs_ge_zero =
488                         fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
489                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
490                     fx.bcx.ins().select(has_overflow, sat_val, val)
491                 }
492                 (sym::saturating_sub, true) => {
493                     let rhs = rhs.load_scalar(fx);
494                     let rhs_ge_zero =
495                         fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
496                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
497                     fx.bcx.ins().select(has_overflow, sat_val, val)
498                 }
499                 _ => unreachable!(),
500             };
501
502             let res = CValue::by_val(val, lhs.layout());
503
504             ret.write_cvalue(fx, res);
505         }
506         sym::rotate_left => {
507             intrinsic_args!(fx, args => (x, y); intrinsic);
508             let y = y.load_scalar(fx);
509
510             let layout = x.layout();
511             let x = x.load_scalar(fx);
512             let res = fx.bcx.ins().rotl(x, y);
513             ret.write_cvalue(fx, CValue::by_val(res, layout));
514         }
515         sym::rotate_right => {
516             intrinsic_args!(fx, args => (x, y); intrinsic);
517             let y = y.load_scalar(fx);
518
519             let layout = x.layout();
520             let x = x.load_scalar(fx);
521             let res = fx.bcx.ins().rotr(x, y);
522             ret.write_cvalue(fx, CValue::by_val(res, layout));
523         }
524
525         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
526         // doesn't have UB both are codegen'ed the same way
527         sym::offset | sym::arith_offset => {
528             intrinsic_args!(fx, args => (base, offset); intrinsic);
529             let offset = offset.load_scalar(fx);
530
531             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
532             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
533             let ptr_diff = if pointee_size != 1 {
534                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
535             } else {
536                 offset
537             };
538             let base_val = base.load_scalar(fx);
539             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
540             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
541         }
542
543         sym::transmute => {
544             intrinsic_args!(fx, args => (from); intrinsic);
545
546             ret.write_cvalue_transmute(fx, from);
547         }
548         sym::write_bytes | sym::volatile_set_memory => {
549             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
550             let val = val.load_scalar(fx);
551             let count = count.load_scalar(fx);
552
553             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
554             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
555             let count = if pointee_size != 1 {
556                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
557             } else {
558                 count
559             };
560             let dst_ptr = dst.load_scalar(fx);
561             // FIXME make the memset actually volatile when switching to emit_small_memset
562             // FIXME use emit_small_memset
563             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
564         }
565         sym::ctlz | sym::ctlz_nonzero => {
566             intrinsic_args!(fx, args => (arg); intrinsic);
567             let val = arg.load_scalar(fx);
568
569             // FIXME trap on `ctlz_nonzero` with zero arg.
570             let res = fx.bcx.ins().clz(val);
571             let res = CValue::by_val(res, arg.layout());
572             ret.write_cvalue(fx, res);
573         }
574         sym::cttz | sym::cttz_nonzero => {
575             intrinsic_args!(fx, args => (arg); intrinsic);
576             let val = arg.load_scalar(fx);
577
578             // FIXME trap on `cttz_nonzero` with zero arg.
579             let res = fx.bcx.ins().ctz(val);
580             let res = CValue::by_val(res, arg.layout());
581             ret.write_cvalue(fx, res);
582         }
583         sym::ctpop => {
584             intrinsic_args!(fx, args => (arg); intrinsic);
585             let val = arg.load_scalar(fx);
586
587             let res = fx.bcx.ins().popcnt(val);
588             let res = CValue::by_val(res, arg.layout());
589             ret.write_cvalue(fx, res);
590         }
591         sym::bitreverse => {
592             intrinsic_args!(fx, args => (arg); intrinsic);
593             let val = arg.load_scalar(fx);
594
595             let res = fx.bcx.ins().bitrev(val);
596             let res = CValue::by_val(res, arg.layout());
597             ret.write_cvalue(fx, res);
598         }
599         sym::bswap => {
600             // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
601             fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
602                 match bcx.func.dfg.value_type(v) {
603                     types::I8 => v,
604
605                     // https://code.woboq.org/gcc/include/bits/byteswap.h.html
606                     types::I16 => {
607                         let tmp1 = bcx.ins().ishl_imm(v, 8);
608                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
609
610                         let tmp2 = bcx.ins().ushr_imm(v, 8);
611                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
612
613                         bcx.ins().bor(n1, n2)
614                     }
615                     types::I32 => {
616                         let tmp1 = bcx.ins().ishl_imm(v, 24);
617                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
618
619                         let tmp2 = bcx.ins().ishl_imm(v, 8);
620                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
621
622                         let tmp3 = bcx.ins().ushr_imm(v, 8);
623                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
624
625                         let tmp4 = bcx.ins().ushr_imm(v, 24);
626                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
627
628                         let or_tmp1 = bcx.ins().bor(n1, n2);
629                         let or_tmp2 = bcx.ins().bor(n3, n4);
630                         bcx.ins().bor(or_tmp1, or_tmp2)
631                     }
632                     types::I64 => {
633                         let tmp1 = bcx.ins().ishl_imm(v, 56);
634                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
635
636                         let tmp2 = bcx.ins().ishl_imm(v, 40);
637                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
638
639                         let tmp3 = bcx.ins().ishl_imm(v, 24);
640                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
641
642                         let tmp4 = bcx.ins().ishl_imm(v, 8);
643                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
644
645                         let tmp5 = bcx.ins().ushr_imm(v, 8);
646                         let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
647
648                         let tmp6 = bcx.ins().ushr_imm(v, 24);
649                         let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
650
651                         let tmp7 = bcx.ins().ushr_imm(v, 40);
652                         let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
653
654                         let tmp8 = bcx.ins().ushr_imm(v, 56);
655                         let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
656
657                         let or_tmp1 = bcx.ins().bor(n1, n2);
658                         let or_tmp2 = bcx.ins().bor(n3, n4);
659                         let or_tmp3 = bcx.ins().bor(n5, n6);
660                         let or_tmp4 = bcx.ins().bor(n7, n8);
661
662                         let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
663                         let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
664                         bcx.ins().bor(or_tmp5, or_tmp6)
665                     }
666                     types::I128 => {
667                         let (lo, hi) = bcx.ins().isplit(v);
668                         let lo = swap(bcx, lo);
669                         let hi = swap(bcx, hi);
670                         bcx.ins().iconcat(hi, lo)
671                     }
672                     ty => unreachable!("bswap {}", ty),
673                 }
674             }
675             intrinsic_args!(fx, args => (arg); intrinsic);
676             let val = arg.load_scalar(fx);
677
678             let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
679             ret.write_cvalue(fx, res);
680         }
681         sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
682             intrinsic_args!(fx, args => (); intrinsic);
683
684             let layout = fx.layout_of(substs.type_at(0));
685             if layout.abi.is_uninhabited() {
686                 with_no_trimmed_paths!({
687                     crate::base::codegen_panic(
688                         fx,
689                         &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
690                         source_info,
691                     )
692                 });
693                 return;
694             }
695
696             if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
697                 with_no_trimmed_paths!({
698                     crate::base::codegen_panic(
699                         fx,
700                         &format!(
701                             "attempted to zero-initialize type `{}`, which is invalid",
702                             layout.ty
703                         ),
704                         source_info,
705                     );
706                 });
707                 return;
708             }
709
710             if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
711                 with_no_trimmed_paths!({
712                     crate::base::codegen_panic(
713                         fx,
714                         &format!(
715                             "attempted to leave type `{}` uninitialized, which is invalid",
716                             layout.ty
717                         ),
718                         source_info,
719                     )
720                 });
721                 return;
722             }
723         }
724
725         sym::volatile_load | sym::unaligned_volatile_load => {
726             intrinsic_args!(fx, args => (ptr); intrinsic);
727
728             // Cranelift treats loads as volatile by default
729             // FIXME correctly handle unaligned_volatile_load
730             let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
731             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
732             ret.write_cvalue(fx, val);
733         }
734         sym::volatile_store | sym::unaligned_volatile_store => {
735             intrinsic_args!(fx, args => (ptr, val); intrinsic);
736             let ptr = ptr.load_scalar(fx);
737
738             // Cranelift treats stores as volatile by default
739             // FIXME correctly handle unaligned_volatile_store
740             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
741             dest.write_cvalue(fx, val);
742         }
743
744         sym::pref_align_of
745         | sym::needs_drop
746         | sym::type_id
747         | sym::type_name
748         | sym::variant_count => {
749             intrinsic_args!(fx, args => (); intrinsic);
750
751             let const_val =
752                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
753             let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
754             ret.write_cvalue(fx, val);
755         }
756
757         sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
758             intrinsic_args!(fx, args => (ptr, base); intrinsic);
759             let ptr = ptr.load_scalar(fx);
760             let base = base.load_scalar(fx);
761             let ty = substs.type_at(0);
762
763             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
764             let diff_bytes = fx.bcx.ins().isub(ptr, base);
765             // FIXME this can be an exact division.
766             let val = if intrinsic == sym::ptr_offset_from_unsigned {
767                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
768                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
769                 // but unsigned is slightly easier to codegen, so might as well.
770                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
771             } else {
772                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
773                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
774             };
775             ret.write_cvalue(fx, val);
776         }
777
778         sym::ptr_guaranteed_eq => {
779             intrinsic_args!(fx, args => (a, b); intrinsic);
780
781             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
782             ret.write_cvalue(fx, val);
783         }
784
785         sym::ptr_guaranteed_ne => {
786             intrinsic_args!(fx, args => (a, b); intrinsic);
787
788             let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
789             ret.write_cvalue(fx, val);
790         }
791
792         sym::caller_location => {
793             intrinsic_args!(fx, args => (); intrinsic);
794
795             let caller_location = fx.get_caller_location(source_info);
796             ret.write_cvalue(fx, caller_location);
797         }
798
799         _ if intrinsic.as_str().starts_with("atomic_fence") => {
800             intrinsic_args!(fx, args => (); intrinsic);
801
802             fx.bcx.ins().fence();
803         }
804         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
805             intrinsic_args!(fx, args => (); intrinsic);
806
807             // FIXME use a compiler fence once Cranelift supports it
808             fx.bcx.ins().fence();
809         }
810         _ if intrinsic.as_str().starts_with("atomic_load") => {
811             intrinsic_args!(fx, args => (ptr); intrinsic);
812             let ptr = ptr.load_scalar(fx);
813
814             let ty = substs.type_at(0);
815             match ty.kind() {
816                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
817                     // FIXME implement 128bit atomics
818                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
819                         // special case for compiler-builtins to avoid having to patch it
820                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
821                         let ret_block = fx.get_block(destination.unwrap());
822                         fx.bcx.ins().jump(ret_block, &[]);
823                         return;
824                     } else {
825                         fx.tcx
826                             .sess
827                             .span_fatal(source_info.span, "128bit atomics not yet supported");
828                     }
829                 }
830                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
831                 _ => {
832                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
833                     return;
834                 }
835             }
836             let clif_ty = fx.clif_type(ty).unwrap();
837
838             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
839
840             let val = CValue::by_val(val, fx.layout_of(ty));
841             ret.write_cvalue(fx, val);
842         }
843         _ if intrinsic.as_str().starts_with("atomic_store") => {
844             intrinsic_args!(fx, args => (ptr, val); intrinsic);
845             let ptr = ptr.load_scalar(fx);
846
847             let ty = substs.type_at(0);
848             match ty.kind() {
849                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
850                     // FIXME implement 128bit atomics
851                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
852                         // special case for compiler-builtins to avoid having to patch it
853                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
854                         let ret_block = fx.get_block(destination.unwrap());
855                         fx.bcx.ins().jump(ret_block, &[]);
856                         return;
857                     } else {
858                         fx.tcx
859                             .sess
860                             .span_fatal(source_info.span, "128bit atomics not yet supported");
861                     }
862                 }
863                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
864                 _ => {
865                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
866                     return;
867                 }
868             }
869
870             let val = val.load_scalar(fx);
871
872             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
873         }
874         _ if intrinsic.as_str().starts_with("atomic_xchg") => {
875             intrinsic_args!(fx, args => (ptr, new); intrinsic);
876             let ptr = ptr.load_scalar(fx);
877
878             let layout = new.layout();
879             match layout.ty.kind() {
880                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
881                 _ => {
882                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
883                     return;
884                 }
885             }
886             let ty = fx.clif_type(layout.ty).unwrap();
887
888             let new = new.load_scalar(fx);
889
890             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
891
892             let old = CValue::by_val(old, layout);
893             ret.write_cvalue(fx, old);
894         }
895         _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
896             // both atomic_cxchg_* and atomic_cxchgweak_*
897             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
898             let ptr = ptr.load_scalar(fx);
899
900             let layout = new.layout();
901             match layout.ty.kind() {
902                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
903                 _ => {
904                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
905                     return;
906                 }
907             }
908
909             let test_old = test_old.load_scalar(fx);
910             let new = new.load_scalar(fx);
911
912             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
913             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
914
915             let ret_val =
916                 CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
917             ret.write_cvalue(fx, ret_val)
918         }
919
920         _ if intrinsic.as_str().starts_with("atomic_xadd") => {
921             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
922             let ptr = ptr.load_scalar(fx);
923
924             let layout = amount.layout();
925             match layout.ty.kind() {
926                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
927                 _ => {
928                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
929                     return;
930                 }
931             }
932             let ty = fx.clif_type(layout.ty).unwrap();
933
934             let amount = amount.load_scalar(fx);
935
936             let old =
937                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
938
939             let old = CValue::by_val(old, layout);
940             ret.write_cvalue(fx, old);
941         }
942         _ if intrinsic.as_str().starts_with("atomic_xsub") => {
943             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
944             let ptr = ptr.load_scalar(fx);
945
946             let layout = amount.layout();
947             match layout.ty.kind() {
948                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
949                 _ => {
950                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
951                     return;
952                 }
953             }
954             let ty = fx.clif_type(layout.ty).unwrap();
955
956             let amount = amount.load_scalar(fx);
957
958             let old =
959                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
960
961             let old = CValue::by_val(old, layout);
962             ret.write_cvalue(fx, old);
963         }
964         _ if intrinsic.as_str().starts_with("atomic_and") => {
965             intrinsic_args!(fx, args => (ptr, src); intrinsic);
966             let ptr = ptr.load_scalar(fx);
967
968             let layout = src.layout();
969             match layout.ty.kind() {
970                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
971                 _ => {
972                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
973                     return;
974                 }
975             }
976             let ty = fx.clif_type(layout.ty).unwrap();
977
978             let src = src.load_scalar(fx);
979
980             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
981
982             let old = CValue::by_val(old, layout);
983             ret.write_cvalue(fx, old);
984         }
985         _ if intrinsic.as_str().starts_with("atomic_or") => {
986             intrinsic_args!(fx, args => (ptr, src); intrinsic);
987             let ptr = ptr.load_scalar(fx);
988
989             let layout = src.layout();
990             match layout.ty.kind() {
991                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
992                 _ => {
993                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
994                     return;
995                 }
996             }
997             let ty = fx.clif_type(layout.ty).unwrap();
998
999             let src = src.load_scalar(fx);
1000
1001             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
1002
1003             let old = CValue::by_val(old, layout);
1004             ret.write_cvalue(fx, old);
1005         }
1006         _ if intrinsic.as_str().starts_with("atomic_xor") => {
1007             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1008             let ptr = ptr.load_scalar(fx);
1009
1010             let layout = src.layout();
1011             match layout.ty.kind() {
1012                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1013                 _ => {
1014                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1015                     return;
1016                 }
1017             }
1018             let ty = fx.clif_type(layout.ty).unwrap();
1019
1020             let src = src.load_scalar(fx);
1021
1022             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
1023
1024             let old = CValue::by_val(old, layout);
1025             ret.write_cvalue(fx, old);
1026         }
1027         _ if intrinsic.as_str().starts_with("atomic_nand") => {
1028             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1029             let ptr = ptr.load_scalar(fx);
1030
1031             let layout = src.layout();
1032             match layout.ty.kind() {
1033                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1034                 _ => {
1035                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1036                     return;
1037                 }
1038             }
1039             let ty = fx.clif_type(layout.ty).unwrap();
1040
1041             let src = src.load_scalar(fx);
1042
1043             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1044
1045             let old = CValue::by_val(old, layout);
1046             ret.write_cvalue(fx, old);
1047         }
1048         _ if intrinsic.as_str().starts_with("atomic_max") => {
1049             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1050             let ptr = ptr.load_scalar(fx);
1051
1052             let layout = src.layout();
1053             match layout.ty.kind() {
1054                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1055                 _ => {
1056                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1057                     return;
1058                 }
1059             }
1060             let ty = fx.clif_type(layout.ty).unwrap();
1061
1062             let src = src.load_scalar(fx);
1063
1064             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1065
1066             let old = CValue::by_val(old, layout);
1067             ret.write_cvalue(fx, old);
1068         }
1069         _ if intrinsic.as_str().starts_with("atomic_umax") => {
1070             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1071             let ptr = ptr.load_scalar(fx);
1072
1073             let layout = src.layout();
1074             match layout.ty.kind() {
1075                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1076                 _ => {
1077                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1078                     return;
1079                 }
1080             }
1081             let ty = fx.clif_type(layout.ty).unwrap();
1082
1083             let src = src.load_scalar(fx);
1084
1085             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1086
1087             let old = CValue::by_val(old, layout);
1088             ret.write_cvalue(fx, old);
1089         }
1090         _ if intrinsic.as_str().starts_with("atomic_min") => {
1091             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1092             let ptr = ptr.load_scalar(fx);
1093
1094             let layout = src.layout();
1095             match layout.ty.kind() {
1096                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1097                 _ => {
1098                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1099                     return;
1100                 }
1101             }
1102             let ty = fx.clif_type(layout.ty).unwrap();
1103
1104             let src = src.load_scalar(fx);
1105
1106             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1107
1108             let old = CValue::by_val(old, layout);
1109             ret.write_cvalue(fx, old);
1110         }
1111         _ if intrinsic.as_str().starts_with("atomic_umin") => {
1112             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1113             let ptr = ptr.load_scalar(fx);
1114
1115             let layout = src.layout();
1116             match layout.ty.kind() {
1117                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1118                 _ => {
1119                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1120                     return;
1121                 }
1122             }
1123             let ty = fx.clif_type(layout.ty).unwrap();
1124
1125             let src = src.load_scalar(fx);
1126
1127             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1128
1129             let old = CValue::by_val(old, layout);
1130             ret.write_cvalue(fx, old);
1131         }
1132
1133         sym::minnumf32 => {
1134             intrinsic_args!(fx, args => (a, b); intrinsic);
1135             let a = a.load_scalar(fx);
1136             let b = b.load_scalar(fx);
1137
1138             let val = crate::num::codegen_float_min(fx, a, b);
1139             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1140             ret.write_cvalue(fx, val);
1141         }
1142         sym::minnumf64 => {
1143             intrinsic_args!(fx, args => (a, b); intrinsic);
1144             let a = a.load_scalar(fx);
1145             let b = b.load_scalar(fx);
1146
1147             let val = crate::num::codegen_float_min(fx, a, b);
1148             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1149             ret.write_cvalue(fx, val);
1150         }
1151         sym::maxnumf32 => {
1152             intrinsic_args!(fx, args => (a, b); intrinsic);
1153             let a = a.load_scalar(fx);
1154             let b = b.load_scalar(fx);
1155
1156             let val = crate::num::codegen_float_max(fx, a, b);
1157             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1158             ret.write_cvalue(fx, val);
1159         }
1160         sym::maxnumf64 => {
1161             intrinsic_args!(fx, args => (a, b); intrinsic);
1162             let a = a.load_scalar(fx);
1163             let b = b.load_scalar(fx);
1164
1165             let val = crate::num::codegen_float_max(fx, a, b);
1166             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1167             ret.write_cvalue(fx, val);
1168         }
1169
1170         kw::Try => {
1171             intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1172             let f = f.load_scalar(fx);
1173             let data = data.load_scalar(fx);
1174             let _catch_fn = catch_fn.load_scalar(fx);
1175
1176             // FIXME once unwinding is supported, change this to actually catch panics
1177             let f_sig = fx.bcx.func.import_signature(Signature {
1178                 call_conv: fx.target_config.default_call_conv,
1179                 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1180                 returns: vec![],
1181             });
1182
1183             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1184
1185             let layout = ret.layout();
1186             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1187             ret.write_cvalue(fx, ret_val);
1188         }
1189
1190         sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1191             intrinsic_args!(fx, args => (x, y); intrinsic);
1192
1193             let res = crate::num::codegen_float_binop(
1194                 fx,
1195                 match intrinsic {
1196                     sym::fadd_fast => BinOp::Add,
1197                     sym::fsub_fast => BinOp::Sub,
1198                     sym::fmul_fast => BinOp::Mul,
1199                     sym::fdiv_fast => BinOp::Div,
1200                     sym::frem_fast => BinOp::Rem,
1201                     _ => unreachable!(),
1202                 },
1203                 x,
1204                 y,
1205             );
1206             ret.write_cvalue(fx, res);
1207         }
1208         sym::float_to_int_unchecked => {
1209             intrinsic_args!(fx, args => (f); intrinsic);
1210             let f = f.load_scalar(fx);
1211
1212             let res = crate::cast::clif_int_or_float_cast(
1213                 fx,
1214                 f,
1215                 false,
1216                 fx.clif_type(ret.layout().ty).unwrap(),
1217                 type_sign(ret.layout().ty),
1218             );
1219             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1220         }
1221
1222         sym::raw_eq => {
1223             intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1224             let lhs_ref = lhs_ref.load_scalar(fx);
1225             let rhs_ref = rhs_ref.load_scalar(fx);
1226
1227             let size = fx.layout_of(substs.type_at(0)).layout.size();
1228             // FIXME add and use emit_small_memcmp
1229             let is_eq_value = if size == Size::ZERO {
1230                 // No bytes means they're trivially equal
1231                 fx.bcx.ins().iconst(types::I8, 1)
1232             } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1233                 // Can't use `trusted` for these loads; they could be unaligned.
1234                 let mut flags = MemFlags::new();
1235                 flags.set_notrap();
1236                 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1237                 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1238                 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1239                 fx.bcx.ins().bint(types::I8, eq)
1240             } else {
1241                 // Just call `memcmp` (like slices do in core) when the
1242                 // size is too large or it's not a power-of-two.
1243                 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1244                 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1245                 let params = vec![AbiParam::new(fx.pointer_type); 3];
1246                 let returns = vec![AbiParam::new(types::I32)];
1247                 let args = &[lhs_ref, rhs_ref, bytes_val];
1248                 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1249                 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1250                 fx.bcx.ins().bint(types::I8, eq)
1251             };
1252             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1253         }
1254
1255         sym::const_allocate => {
1256             intrinsic_args!(fx, args => (_size, _align); intrinsic);
1257
1258             // returns a null pointer at runtime.
1259             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1260             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1261         }
1262
1263         sym::const_deallocate => {
1264             intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1265             // nop at runtime.
1266         }
1267
1268         sym::black_box => {
1269             intrinsic_args!(fx, args => (a); intrinsic);
1270
1271             // FIXME implement black_box semantics
1272             ret.write_cvalue(fx, a);
1273         }
1274
1275         // FIXME implement variadics in cranelift
1276         sym::va_copy | sym::va_arg | sym::va_end => {
1277             fx.tcx.sess.span_fatal(
1278                 source_info.span,
1279                 "Defining variadic functions is not yet supported by Cranelift",
1280             );
1281         }
1282
1283         _ => {
1284             fx.tcx
1285                 .sess
1286                 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1287         }
1288     }
1289
1290     let ret_block = fx.get_block(destination.unwrap());
1291     fx.bcx.ins().jump(ret_block, &[]);
1292 }