]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
Auto merge of #98840 - cjgillot:span-inline-ctxt, r=wesleywiser
[rust.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 macro_rules! intrinsic_args {
5     ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6         #[allow(unused_parens)]
7         let ($($arg),*) = if let [$($arg),*] = $args {
8             ($(codegen_operand($fx, $arg)),*)
9         } else {
10             $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11         };
12     }
13 }
14
15 mod cpuid;
16 mod llvm;
17 mod simd;
18
19 pub(crate) use cpuid::codegen_cpuid_call;
20 pub(crate) use llvm::codegen_llvm_intrinsic_call;
21
22 use rustc_middle::ty::print::with_no_trimmed_paths;
23 use rustc_middle::ty::subst::SubstsRef;
24 use rustc_span::symbol::{kw, sym, Symbol};
25
26 use crate::prelude::*;
27 use cranelift_codegen::ir::AtomicRmwOp;
28
29 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
30     bug!("wrong number of args for intrinsic {}", intrinsic);
31 }
32
33 fn report_atomic_type_validation_error<'tcx>(
34     fx: &mut FunctionCx<'_, '_, 'tcx>,
35     intrinsic: Symbol,
36     span: Span,
37     ty: Ty<'tcx>,
38 ) {
39     fx.tcx.sess.span_err(
40         span,
41         &format!(
42             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
43             intrinsic, ty
44         ),
45     );
46     // Prevent verifier error
47     fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
48 }
49
50 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
51     let (element, count) = match layout.abi {
52         Abi::Vector { element, count } => (element, count),
53         _ => unreachable!(),
54     };
55
56     match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
57         // Cranelift currently only implements icmp for 128bit vectors.
58         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
59         _ => None,
60     }
61 }
62
63 fn simd_for_each_lane<'tcx>(
64     fx: &mut FunctionCx<'_, '_, 'tcx>,
65     val: CValue<'tcx>,
66     ret: CPlace<'tcx>,
67     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
68 ) {
69     let layout = val.layout();
70
71     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
72     let lane_layout = fx.layout_of(lane_ty);
73     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
74     let ret_lane_layout = fx.layout_of(ret_lane_ty);
75     assert_eq!(lane_count, ret_lane_count);
76
77     for lane_idx in 0..lane_count {
78         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
79
80         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
81         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
82
83         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
84     }
85 }
86
87 fn simd_pair_for_each_lane<'tcx>(
88     fx: &mut FunctionCx<'_, '_, 'tcx>,
89     x: CValue<'tcx>,
90     y: CValue<'tcx>,
91     ret: CPlace<'tcx>,
92     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
93 ) {
94     assert_eq!(x.layout(), y.layout());
95     let layout = x.layout();
96
97     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
98     let lane_layout = fx.layout_of(lane_ty);
99     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
100     let ret_lane_layout = fx.layout_of(ret_lane_ty);
101     assert_eq!(lane_count, ret_lane_count);
102
103     for lane_idx in 0..lane_count {
104         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
105         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
106
107         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
108         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
109
110         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
111     }
112 }
113
114 fn simd_reduce<'tcx>(
115     fx: &mut FunctionCx<'_, '_, 'tcx>,
116     val: CValue<'tcx>,
117     acc: Option<Value>,
118     ret: CPlace<'tcx>,
119     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
120 ) {
121     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
122     let lane_layout = fx.layout_of(lane_ty);
123     assert_eq!(lane_layout, ret.layout());
124
125     let (mut res_val, start_lane) =
126         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
127     for lane_idx in start_lane..lane_count {
128         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
129         res_val = f(fx, lane_layout.ty, res_val, lane);
130     }
131     let res = CValue::by_val(res_val, lane_layout);
132     ret.write_cvalue(fx, res);
133 }
134
135 // FIXME move all uses to `simd_reduce`
136 fn simd_reduce_bool<'tcx>(
137     fx: &mut FunctionCx<'_, '_, 'tcx>,
138     val: CValue<'tcx>,
139     ret: CPlace<'tcx>,
140     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
141 ) {
142     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
143     assert!(ret.layout().ty.is_bool());
144
145     let res_val = val.value_lane(fx, 0).load_scalar(fx);
146     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
147     for lane_idx in 1..lane_count {
148         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
149         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
150         res_val = f(fx, res_val, lane);
151     }
152     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
153         fx.bcx.ins().ireduce(types::I8, res_val)
154     } else {
155         res_val
156     };
157     let res = CValue::by_val(res_val, ret.layout());
158     ret.write_cvalue(fx, res);
159 }
160
161 fn bool_to_zero_or_max_uint<'tcx>(
162     fx: &mut FunctionCx<'_, '_, 'tcx>,
163     ty: Ty<'tcx>,
164     val: Value,
165 ) -> Value {
166     let ty = fx.clif_type(ty).unwrap();
167
168     let int_ty = match ty {
169         types::F32 => types::I32,
170         types::F64 => types::I64,
171         ty => ty,
172     };
173
174     let val = fx.bcx.ins().bint(int_ty, val);
175     let mut res = fx.bcx.ins().ineg(val);
176
177     if ty.is_float() {
178         res = fx.bcx.ins().bitcast(ty, res);
179     }
180
181     res
182 }
183
184 pub(crate) fn codegen_intrinsic_call<'tcx>(
185     fx: &mut FunctionCx<'_, '_, 'tcx>,
186     instance: Instance<'tcx>,
187     args: &[mir::Operand<'tcx>],
188     destination: CPlace<'tcx>,
189     target: Option<BasicBlock>,
190     source_info: mir::SourceInfo,
191 ) {
192     let intrinsic = fx.tcx.item_name(instance.def_id());
193     let substs = instance.substs;
194
195     let target = if let Some(target) = target {
196         target
197     } else {
198         // Insert non returning intrinsics here
199         match intrinsic {
200             sym::abort => {
201                 fx.bcx.ins().trap(TrapCode::User(0));
202             }
203             sym::transmute => {
204                 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
205             }
206             _ => unimplemented!("unsupported intrinsic {}", intrinsic),
207         }
208         return;
209     };
210
211     if intrinsic.as_str().starts_with("simd_") {
212         self::simd::codegen_simd_intrinsic_call(
213             fx,
214             intrinsic,
215             substs,
216             args,
217             destination,
218             source_info.span,
219         );
220         let ret_block = fx.get_block(target);
221         fx.bcx.ins().jump(ret_block, &[]);
222     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
223         let ret_block = fx.get_block(target);
224         fx.bcx.ins().jump(ret_block, &[]);
225     } else {
226         codegen_regular_intrinsic_call(
227             fx,
228             instance,
229             intrinsic,
230             substs,
231             args,
232             destination,
233             Some(target),
234             source_info,
235         );
236     }
237 }
238
239 fn codegen_float_intrinsic_call<'tcx>(
240     fx: &mut FunctionCx<'_, '_, 'tcx>,
241     intrinsic: Symbol,
242     args: &[mir::Operand<'tcx>],
243     ret: CPlace<'tcx>,
244 ) -> bool {
245     let (name, arg_count, ty) = match intrinsic {
246         sym::expf32 => ("expf", 1, fx.tcx.types.f32),
247         sym::expf64 => ("exp", 1, fx.tcx.types.f64),
248         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
249         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
250         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
251         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
252         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
253         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
254         sym::powf32 => ("powf", 2, fx.tcx.types.f32),
255         sym::powf64 => ("pow", 2, fx.tcx.types.f64),
256         sym::logf32 => ("logf", 1, fx.tcx.types.f32),
257         sym::logf64 => ("log", 1, fx.tcx.types.f64),
258         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
259         sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
260         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
261         sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
262         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
263         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
264         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
265         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
266         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
267         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
268         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
269         sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
270         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
271         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
272         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
273         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
274         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
275         sym::roundf64 => ("round", 1, fx.tcx.types.f64),
276         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
277         sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
278         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
279         sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
280         _ => return false,
281     };
282
283     if args.len() != arg_count {
284         bug!("wrong number of args for intrinsic {:?}", intrinsic);
285     }
286
287     let (a, b, c);
288     let args = match args {
289         [x] => {
290             a = [codegen_operand(fx, x)];
291             &a as &[_]
292         }
293         [x, y] => {
294             b = [codegen_operand(fx, x), codegen_operand(fx, y)];
295             &b
296         }
297         [x, y, z] => {
298             c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
299             &c
300         }
301         _ => unreachable!(),
302     };
303
304     let layout = fx.layout_of(ty);
305     let res = match intrinsic {
306         sym::fmaf32 | sym::fmaf64 => {
307             let a = args[0].load_scalar(fx);
308             let b = args[1].load_scalar(fx);
309             let c = args[2].load_scalar(fx);
310             CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
311         }
312         sym::copysignf32 | sym::copysignf64 => {
313             let a = args[0].load_scalar(fx);
314             let b = args[1].load_scalar(fx);
315             CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
316         }
317         sym::fabsf32
318         | sym::fabsf64
319         | sym::floorf32
320         | sym::floorf64
321         | sym::ceilf32
322         | sym::ceilf64
323         | sym::truncf32
324         | sym::truncf64 => {
325             let a = args[0].load_scalar(fx);
326
327             let val = match intrinsic {
328                 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
329                 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
330                 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
331                 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
332                 _ => unreachable!(),
333             };
334
335             CValue::by_val(val, layout)
336         }
337         // These intrinsics aren't supported natively by Cranelift.
338         // Lower them to a libcall.
339         _ => fx.easy_call(name, &args, ty),
340     };
341
342     ret.write_cvalue(fx, res);
343
344     true
345 }
346
347 fn codegen_regular_intrinsic_call<'tcx>(
348     fx: &mut FunctionCx<'_, '_, 'tcx>,
349     instance: Instance<'tcx>,
350     intrinsic: Symbol,
351     substs: SubstsRef<'tcx>,
352     args: &[mir::Operand<'tcx>],
353     ret: CPlace<'tcx>,
354     destination: Option<BasicBlock>,
355     source_info: mir::SourceInfo,
356 ) {
357     let usize_layout = fx.layout_of(fx.tcx.types.usize);
358
359     match intrinsic {
360         sym::likely | sym::unlikely => {
361             intrinsic_args!(fx, args => (a); intrinsic);
362
363             ret.write_cvalue(fx, a);
364         }
365         sym::breakpoint => {
366             intrinsic_args!(fx, args => (); intrinsic);
367
368             fx.bcx.ins().debugtrap();
369         }
370         sym::copy | sym::copy_nonoverlapping => {
371             intrinsic_args!(fx, args => (src, dst, count); intrinsic);
372             let src = src.load_scalar(fx);
373             let dst = dst.load_scalar(fx);
374             let count = count.load_scalar(fx);
375
376             let elem_ty = substs.type_at(0);
377             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
378             assert_eq!(args.len(), 3);
379             let byte_amount =
380                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
381
382             if intrinsic == sym::copy_nonoverlapping {
383                 // FIXME emit_small_memcpy
384                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
385             } else {
386                 // FIXME emit_small_memmove
387                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
388             }
389         }
390         sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
391             // NOTE: the volatile variants have src and dst swapped
392             intrinsic_args!(fx, args => (dst, src, count); intrinsic);
393             let dst = dst.load_scalar(fx);
394             let src = src.load_scalar(fx);
395             let count = count.load_scalar(fx);
396
397             let elem_ty = substs.type_at(0);
398             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
399             assert_eq!(args.len(), 3);
400             let byte_amount =
401                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
402
403             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
404             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
405                 // FIXME emit_small_memcpy
406                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
407             } else {
408                 // FIXME emit_small_memmove
409                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
410             }
411         }
412         sym::size_of_val => {
413             intrinsic_args!(fx, args => (ptr); intrinsic);
414
415             let layout = fx.layout_of(substs.type_at(0));
416             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
417             // branch
418             let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
419                 let (_ptr, info) = ptr.load_scalar_pair(fx);
420                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
421                 size
422             } else {
423                 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
424             };
425             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
426         }
427         sym::min_align_of_val => {
428             intrinsic_args!(fx, args => (ptr); intrinsic);
429
430             let layout = fx.layout_of(substs.type_at(0));
431             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
432             // branch
433             let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
434                 let (_ptr, info) = ptr.load_scalar_pair(fx);
435                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
436                 align
437             } else {
438                 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
439             };
440             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
441         }
442
443         sym::vtable_size => {
444             intrinsic_args!(fx, args => (vtable); intrinsic);
445             let vtable = vtable.load_scalar(fx);
446
447             let size = crate::vtable::size_of_obj(fx, vtable);
448             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
449         }
450
451         sym::vtable_align => {
452             intrinsic_args!(fx, args => (vtable); intrinsic);
453             let vtable = vtable.load_scalar(fx);
454
455             let align = crate::vtable::min_align_of_obj(fx, vtable);
456             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
457         }
458
459         sym::unchecked_add
460         | sym::unchecked_sub
461         | sym::unchecked_mul
462         | sym::unchecked_div
463         | sym::exact_div
464         | sym::unchecked_rem
465         | sym::unchecked_shl
466         | sym::unchecked_shr => {
467             intrinsic_args!(fx, args => (x, y); intrinsic);
468
469             // FIXME trap on overflow
470             let bin_op = match intrinsic {
471                 sym::unchecked_add => BinOp::Add,
472                 sym::unchecked_sub => BinOp::Sub,
473                 sym::unchecked_mul => BinOp::Mul,
474                 sym::unchecked_div | sym::exact_div => BinOp::Div,
475                 sym::unchecked_rem => BinOp::Rem,
476                 sym::unchecked_shl => BinOp::Shl,
477                 sym::unchecked_shr => BinOp::Shr,
478                 _ => unreachable!(),
479             };
480             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
481             ret.write_cvalue(fx, res);
482         }
483         sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
484             intrinsic_args!(fx, args => (x, y); intrinsic);
485
486             assert_eq!(x.layout().ty, y.layout().ty);
487             let bin_op = match intrinsic {
488                 sym::add_with_overflow => BinOp::Add,
489                 sym::sub_with_overflow => BinOp::Sub,
490                 sym::mul_with_overflow => BinOp::Mul,
491                 _ => unreachable!(),
492             };
493
494             let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
495             ret.write_cvalue(fx, res);
496         }
497         sym::saturating_add | sym::saturating_sub => {
498             intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
499
500             assert_eq!(lhs.layout().ty, rhs.layout().ty);
501             let bin_op = match intrinsic {
502                 sym::saturating_add => BinOp::Add,
503                 sym::saturating_sub => BinOp::Sub,
504                 _ => unreachable!(),
505             };
506
507             let signed = type_sign(lhs.layout().ty);
508
509             let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
510
511             let (val, has_overflow) = checked_res.load_scalar_pair(fx);
512             let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
513
514             let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
515
516             let val = match (intrinsic, signed) {
517                 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
518                 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
519                 (sym::saturating_add, true) => {
520                     let rhs = rhs.load_scalar(fx);
521                     let rhs_ge_zero =
522                         fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
523                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
524                     fx.bcx.ins().select(has_overflow, sat_val, val)
525                 }
526                 (sym::saturating_sub, true) => {
527                     let rhs = rhs.load_scalar(fx);
528                     let rhs_ge_zero =
529                         fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
530                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
531                     fx.bcx.ins().select(has_overflow, sat_val, val)
532                 }
533                 _ => unreachable!(),
534             };
535
536             let res = CValue::by_val(val, lhs.layout());
537
538             ret.write_cvalue(fx, res);
539         }
540         sym::rotate_left => {
541             intrinsic_args!(fx, args => (x, y); intrinsic);
542             let y = y.load_scalar(fx);
543
544             let layout = x.layout();
545             let x = x.load_scalar(fx);
546             let res = fx.bcx.ins().rotl(x, y);
547             ret.write_cvalue(fx, CValue::by_val(res, layout));
548         }
549         sym::rotate_right => {
550             intrinsic_args!(fx, args => (x, y); intrinsic);
551             let y = y.load_scalar(fx);
552
553             let layout = x.layout();
554             let x = x.load_scalar(fx);
555             let res = fx.bcx.ins().rotr(x, y);
556             ret.write_cvalue(fx, CValue::by_val(res, layout));
557         }
558
559         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
560         // doesn't have UB both are codegen'ed the same way
561         sym::offset | sym::arith_offset => {
562             intrinsic_args!(fx, args => (base, offset); intrinsic);
563             let offset = offset.load_scalar(fx);
564
565             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
566             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
567             let ptr_diff = if pointee_size != 1 {
568                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
569             } else {
570                 offset
571             };
572             let base_val = base.load_scalar(fx);
573             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
574             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
575         }
576
577         sym::ptr_mask => {
578             intrinsic_args!(fx, args => (ptr, mask); intrinsic);
579             let ptr = ptr.load_scalar(fx);
580             let mask = mask.load_scalar(fx);
581             fx.bcx.ins().band(ptr, mask);
582         }
583
584         sym::transmute => {
585             intrinsic_args!(fx, args => (from); intrinsic);
586
587             ret.write_cvalue_transmute(fx, from);
588         }
589         sym::write_bytes | sym::volatile_set_memory => {
590             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
591             let val = val.load_scalar(fx);
592             let count = count.load_scalar(fx);
593
594             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
595             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
596             let count = if pointee_size != 1 {
597                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
598             } else {
599                 count
600             };
601             let dst_ptr = dst.load_scalar(fx);
602             // FIXME make the memset actually volatile when switching to emit_small_memset
603             // FIXME use emit_small_memset
604             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
605         }
606         sym::ctlz | sym::ctlz_nonzero => {
607             intrinsic_args!(fx, args => (arg); intrinsic);
608             let val = arg.load_scalar(fx);
609
610             // FIXME trap on `ctlz_nonzero` with zero arg.
611             let res = fx.bcx.ins().clz(val);
612             let res = CValue::by_val(res, arg.layout());
613             ret.write_cvalue(fx, res);
614         }
615         sym::cttz | sym::cttz_nonzero => {
616             intrinsic_args!(fx, args => (arg); intrinsic);
617             let val = arg.load_scalar(fx);
618
619             // FIXME trap on `cttz_nonzero` with zero arg.
620             let res = fx.bcx.ins().ctz(val);
621             let res = CValue::by_val(res, arg.layout());
622             ret.write_cvalue(fx, res);
623         }
624         sym::ctpop => {
625             intrinsic_args!(fx, args => (arg); intrinsic);
626             let val = arg.load_scalar(fx);
627
628             let res = fx.bcx.ins().popcnt(val);
629             let res = CValue::by_val(res, arg.layout());
630             ret.write_cvalue(fx, res);
631         }
632         sym::bitreverse => {
633             intrinsic_args!(fx, args => (arg); intrinsic);
634             let val = arg.load_scalar(fx);
635
636             let res = fx.bcx.ins().bitrev(val);
637             let res = CValue::by_val(res, arg.layout());
638             ret.write_cvalue(fx, res);
639         }
640         sym::bswap => {
641             // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
642             fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
643                 match bcx.func.dfg.value_type(v) {
644                     types::I8 => v,
645
646                     // https://code.woboq.org/gcc/include/bits/byteswap.h.html
647                     types::I16 => {
648                         let tmp1 = bcx.ins().ishl_imm(v, 8);
649                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
650
651                         let tmp2 = bcx.ins().ushr_imm(v, 8);
652                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
653
654                         bcx.ins().bor(n1, n2)
655                     }
656                     types::I32 => {
657                         let tmp1 = bcx.ins().ishl_imm(v, 24);
658                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
659
660                         let tmp2 = bcx.ins().ishl_imm(v, 8);
661                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
662
663                         let tmp3 = bcx.ins().ushr_imm(v, 8);
664                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
665
666                         let tmp4 = bcx.ins().ushr_imm(v, 24);
667                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
668
669                         let or_tmp1 = bcx.ins().bor(n1, n2);
670                         let or_tmp2 = bcx.ins().bor(n3, n4);
671                         bcx.ins().bor(or_tmp1, or_tmp2)
672                     }
673                     types::I64 => {
674                         let tmp1 = bcx.ins().ishl_imm(v, 56);
675                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
676
677                         let tmp2 = bcx.ins().ishl_imm(v, 40);
678                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
679
680                         let tmp3 = bcx.ins().ishl_imm(v, 24);
681                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
682
683                         let tmp4 = bcx.ins().ishl_imm(v, 8);
684                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
685
686                         let tmp5 = bcx.ins().ushr_imm(v, 8);
687                         let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
688
689                         let tmp6 = bcx.ins().ushr_imm(v, 24);
690                         let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
691
692                         let tmp7 = bcx.ins().ushr_imm(v, 40);
693                         let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
694
695                         let tmp8 = bcx.ins().ushr_imm(v, 56);
696                         let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
697
698                         let or_tmp1 = bcx.ins().bor(n1, n2);
699                         let or_tmp2 = bcx.ins().bor(n3, n4);
700                         let or_tmp3 = bcx.ins().bor(n5, n6);
701                         let or_tmp4 = bcx.ins().bor(n7, n8);
702
703                         let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
704                         let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
705                         bcx.ins().bor(or_tmp5, or_tmp6)
706                     }
707                     types::I128 => {
708                         let (lo, hi) = bcx.ins().isplit(v);
709                         let lo = swap(bcx, lo);
710                         let hi = swap(bcx, hi);
711                         bcx.ins().iconcat(hi, lo)
712                     }
713                     ty => unreachable!("bswap {}", ty),
714                 }
715             }
716             intrinsic_args!(fx, args => (arg); intrinsic);
717             let val = arg.load_scalar(fx);
718
719             let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
720             ret.write_cvalue(fx, res);
721         }
722         sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
723             intrinsic_args!(fx, args => (); intrinsic);
724
725             let layout = fx.layout_of(substs.type_at(0));
726             if layout.abi.is_uninhabited() {
727                 with_no_trimmed_paths!({
728                     crate::base::codegen_panic(
729                         fx,
730                         &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
731                         source_info,
732                     )
733                 });
734                 return;
735             }
736
737             if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
738                 with_no_trimmed_paths!({
739                     crate::base::codegen_panic(
740                         fx,
741                         &format!(
742                             "attempted to zero-initialize type `{}`, which is invalid",
743                             layout.ty
744                         ),
745                         source_info,
746                     );
747                 });
748                 return;
749             }
750
751             if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
752                 with_no_trimmed_paths!({
753                     crate::base::codegen_panic(
754                         fx,
755                         &format!(
756                             "attempted to leave type `{}` uninitialized, which is invalid",
757                             layout.ty
758                         ),
759                         source_info,
760                     )
761                 });
762                 return;
763             }
764         }
765
766         sym::volatile_load | sym::unaligned_volatile_load => {
767             intrinsic_args!(fx, args => (ptr); intrinsic);
768
769             // Cranelift treats loads as volatile by default
770             // FIXME correctly handle unaligned_volatile_load
771             let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
772             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
773             ret.write_cvalue(fx, val);
774         }
775         sym::volatile_store | sym::unaligned_volatile_store => {
776             intrinsic_args!(fx, args => (ptr, val); intrinsic);
777             let ptr = ptr.load_scalar(fx);
778
779             // Cranelift treats stores as volatile by default
780             // FIXME correctly handle unaligned_volatile_store
781             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
782             dest.write_cvalue(fx, val);
783         }
784
785         sym::pref_align_of
786         | sym::needs_drop
787         | sym::type_id
788         | sym::type_name
789         | sym::variant_count => {
790             intrinsic_args!(fx, args => (); intrinsic);
791
792             let const_val =
793                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
794             let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
795             ret.write_cvalue(fx, val);
796         }
797
798         sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
799             intrinsic_args!(fx, args => (ptr, base); intrinsic);
800             let ptr = ptr.load_scalar(fx);
801             let base = base.load_scalar(fx);
802             let ty = substs.type_at(0);
803
804             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
805             let diff_bytes = fx.bcx.ins().isub(ptr, base);
806             // FIXME this can be an exact division.
807             let val = if intrinsic == sym::ptr_offset_from_unsigned {
808                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
809                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
810                 // but unsigned is slightly easier to codegen, so might as well.
811                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
812             } else {
813                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
814                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
815             };
816             ret.write_cvalue(fx, val);
817         }
818
819         sym::ptr_guaranteed_cmp => {
820             intrinsic_args!(fx, args => (a, b); intrinsic);
821
822             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
823             ret.write_cvalue(fx, val);
824         }
825
826         sym::caller_location => {
827             intrinsic_args!(fx, args => (); intrinsic);
828
829             let caller_location = fx.get_caller_location(source_info);
830             ret.write_cvalue(fx, caller_location);
831         }
832
833         _ if intrinsic.as_str().starts_with("atomic_fence") => {
834             intrinsic_args!(fx, args => (); intrinsic);
835
836             fx.bcx.ins().fence();
837         }
838         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
839             intrinsic_args!(fx, args => (); intrinsic);
840
841             // FIXME use a compiler fence once Cranelift supports it
842             fx.bcx.ins().fence();
843         }
844         _ if intrinsic.as_str().starts_with("atomic_load") => {
845             intrinsic_args!(fx, args => (ptr); intrinsic);
846             let ptr = ptr.load_scalar(fx);
847
848             let ty = substs.type_at(0);
849             match ty.kind() {
850                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
851                     // FIXME implement 128bit atomics
852                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
853                         // special case for compiler-builtins to avoid having to patch it
854                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
855                         return;
856                     } else {
857                         fx.tcx
858                             .sess
859                             .span_fatal(source_info.span, "128bit atomics not yet supported");
860                     }
861                 }
862                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
863                 _ => {
864                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
865                     return;
866                 }
867             }
868             let clif_ty = fx.clif_type(ty).unwrap();
869
870             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
871
872             let val = CValue::by_val(val, fx.layout_of(ty));
873             ret.write_cvalue(fx, val);
874         }
875         _ if intrinsic.as_str().starts_with("atomic_store") => {
876             intrinsic_args!(fx, args => (ptr, val); intrinsic);
877             let ptr = ptr.load_scalar(fx);
878
879             let ty = substs.type_at(0);
880             match ty.kind() {
881                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
882                     // FIXME implement 128bit atomics
883                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
884                         // special case for compiler-builtins to avoid having to patch it
885                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
886                         return;
887                     } else {
888                         fx.tcx
889                             .sess
890                             .span_fatal(source_info.span, "128bit atomics not yet supported");
891                     }
892                 }
893                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
894                 _ => {
895                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
896                     return;
897                 }
898             }
899
900             let val = val.load_scalar(fx);
901
902             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
903         }
904         _ if intrinsic.as_str().starts_with("atomic_xchg") => {
905             intrinsic_args!(fx, args => (ptr, new); intrinsic);
906             let ptr = ptr.load_scalar(fx);
907
908             let layout = new.layout();
909             match layout.ty.kind() {
910                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
911                 _ => {
912                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
913                     return;
914                 }
915             }
916             let ty = fx.clif_type(layout.ty).unwrap();
917
918             let new = new.load_scalar(fx);
919
920             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
921
922             let old = CValue::by_val(old, layout);
923             ret.write_cvalue(fx, old);
924         }
925         _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
926             // both atomic_cxchg_* and atomic_cxchgweak_*
927             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
928             let ptr = ptr.load_scalar(fx);
929
930             let layout = new.layout();
931             match layout.ty.kind() {
932                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
933                 _ => {
934                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
935                     return;
936                 }
937             }
938
939             let test_old = test_old.load_scalar(fx);
940             let new = new.load_scalar(fx);
941
942             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
943             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
944
945             let ret_val =
946                 CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
947             ret.write_cvalue(fx, ret_val)
948         }
949
950         _ if intrinsic.as_str().starts_with("atomic_xadd") => {
951             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
952             let ptr = ptr.load_scalar(fx);
953
954             let layout = amount.layout();
955             match layout.ty.kind() {
956                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
957                 _ => {
958                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
959                     return;
960                 }
961             }
962             let ty = fx.clif_type(layout.ty).unwrap();
963
964             let amount = amount.load_scalar(fx);
965
966             let old =
967                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
968
969             let old = CValue::by_val(old, layout);
970             ret.write_cvalue(fx, old);
971         }
972         _ if intrinsic.as_str().starts_with("atomic_xsub") => {
973             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
974             let ptr = ptr.load_scalar(fx);
975
976             let layout = amount.layout();
977             match layout.ty.kind() {
978                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
979                 _ => {
980                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
981                     return;
982                 }
983             }
984             let ty = fx.clif_type(layout.ty).unwrap();
985
986             let amount = amount.load_scalar(fx);
987
988             let old =
989                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
990
991             let old = CValue::by_val(old, layout);
992             ret.write_cvalue(fx, old);
993         }
994         _ if intrinsic.as_str().starts_with("atomic_and") => {
995             intrinsic_args!(fx, args => (ptr, src); intrinsic);
996             let ptr = ptr.load_scalar(fx);
997
998             let layout = src.layout();
999             match layout.ty.kind() {
1000                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1001                 _ => {
1002                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1003                     return;
1004                 }
1005             }
1006             let ty = fx.clif_type(layout.ty).unwrap();
1007
1008             let src = src.load_scalar(fx);
1009
1010             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
1011
1012             let old = CValue::by_val(old, layout);
1013             ret.write_cvalue(fx, old);
1014         }
1015         _ if intrinsic.as_str().starts_with("atomic_or") => {
1016             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1017             let ptr = ptr.load_scalar(fx);
1018
1019             let layout = src.layout();
1020             match layout.ty.kind() {
1021                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1022                 _ => {
1023                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1024                     return;
1025                 }
1026             }
1027             let ty = fx.clif_type(layout.ty).unwrap();
1028
1029             let src = src.load_scalar(fx);
1030
1031             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
1032
1033             let old = CValue::by_val(old, layout);
1034             ret.write_cvalue(fx, old);
1035         }
1036         _ if intrinsic.as_str().starts_with("atomic_xor") => {
1037             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1038             let ptr = ptr.load_scalar(fx);
1039
1040             let layout = src.layout();
1041             match layout.ty.kind() {
1042                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1043                 _ => {
1044                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1045                     return;
1046                 }
1047             }
1048             let ty = fx.clif_type(layout.ty).unwrap();
1049
1050             let src = src.load_scalar(fx);
1051
1052             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
1053
1054             let old = CValue::by_val(old, layout);
1055             ret.write_cvalue(fx, old);
1056         }
1057         _ if intrinsic.as_str().starts_with("atomic_nand") => {
1058             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1059             let ptr = ptr.load_scalar(fx);
1060
1061             let layout = src.layout();
1062             match layout.ty.kind() {
1063                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1064                 _ => {
1065                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1066                     return;
1067                 }
1068             }
1069             let ty = fx.clif_type(layout.ty).unwrap();
1070
1071             let src = src.load_scalar(fx);
1072
1073             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1074
1075             let old = CValue::by_val(old, layout);
1076             ret.write_cvalue(fx, old);
1077         }
1078         _ if intrinsic.as_str().starts_with("atomic_max") => {
1079             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1080             let ptr = ptr.load_scalar(fx);
1081
1082             let layout = src.layout();
1083             match layout.ty.kind() {
1084                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1085                 _ => {
1086                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1087                     return;
1088                 }
1089             }
1090             let ty = fx.clif_type(layout.ty).unwrap();
1091
1092             let src = src.load_scalar(fx);
1093
1094             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1095
1096             let old = CValue::by_val(old, layout);
1097             ret.write_cvalue(fx, old);
1098         }
1099         _ if intrinsic.as_str().starts_with("atomic_umax") => {
1100             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1101             let ptr = ptr.load_scalar(fx);
1102
1103             let layout = src.layout();
1104             match layout.ty.kind() {
1105                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1106                 _ => {
1107                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1108                     return;
1109                 }
1110             }
1111             let ty = fx.clif_type(layout.ty).unwrap();
1112
1113             let src = src.load_scalar(fx);
1114
1115             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1116
1117             let old = CValue::by_val(old, layout);
1118             ret.write_cvalue(fx, old);
1119         }
1120         _ if intrinsic.as_str().starts_with("atomic_min") => {
1121             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1122             let ptr = ptr.load_scalar(fx);
1123
1124             let layout = src.layout();
1125             match layout.ty.kind() {
1126                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1127                 _ => {
1128                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1129                     return;
1130                 }
1131             }
1132             let ty = fx.clif_type(layout.ty).unwrap();
1133
1134             let src = src.load_scalar(fx);
1135
1136             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1137
1138             let old = CValue::by_val(old, layout);
1139             ret.write_cvalue(fx, old);
1140         }
1141         _ if intrinsic.as_str().starts_with("atomic_umin") => {
1142             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1143             let ptr = ptr.load_scalar(fx);
1144
1145             let layout = src.layout();
1146             match layout.ty.kind() {
1147                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1148                 _ => {
1149                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1150                     return;
1151                 }
1152             }
1153             let ty = fx.clif_type(layout.ty).unwrap();
1154
1155             let src = src.load_scalar(fx);
1156
1157             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1158
1159             let old = CValue::by_val(old, layout);
1160             ret.write_cvalue(fx, old);
1161         }
1162
1163         sym::minnumf32 => {
1164             intrinsic_args!(fx, args => (a, b); intrinsic);
1165             let a = a.load_scalar(fx);
1166             let b = b.load_scalar(fx);
1167
1168             let val = crate::num::codegen_float_min(fx, a, b);
1169             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1170             ret.write_cvalue(fx, val);
1171         }
1172         sym::minnumf64 => {
1173             intrinsic_args!(fx, args => (a, b); intrinsic);
1174             let a = a.load_scalar(fx);
1175             let b = b.load_scalar(fx);
1176
1177             let val = crate::num::codegen_float_min(fx, a, b);
1178             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1179             ret.write_cvalue(fx, val);
1180         }
1181         sym::maxnumf32 => {
1182             intrinsic_args!(fx, args => (a, b); intrinsic);
1183             let a = a.load_scalar(fx);
1184             let b = b.load_scalar(fx);
1185
1186             let val = crate::num::codegen_float_max(fx, a, b);
1187             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1188             ret.write_cvalue(fx, val);
1189         }
1190         sym::maxnumf64 => {
1191             intrinsic_args!(fx, args => (a, b); intrinsic);
1192             let a = a.load_scalar(fx);
1193             let b = b.load_scalar(fx);
1194
1195             let val = crate::num::codegen_float_max(fx, a, b);
1196             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1197             ret.write_cvalue(fx, val);
1198         }
1199
1200         kw::Try => {
1201             intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1202             let f = f.load_scalar(fx);
1203             let data = data.load_scalar(fx);
1204             let _catch_fn = catch_fn.load_scalar(fx);
1205
1206             // FIXME once unwinding is supported, change this to actually catch panics
1207             let f_sig = fx.bcx.func.import_signature(Signature {
1208                 call_conv: fx.target_config.default_call_conv,
1209                 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1210                 returns: vec![],
1211             });
1212
1213             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1214
1215             let layout = ret.layout();
1216             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1217             ret.write_cvalue(fx, ret_val);
1218         }
1219
1220         sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1221             intrinsic_args!(fx, args => (x, y); intrinsic);
1222
1223             let res = crate::num::codegen_float_binop(
1224                 fx,
1225                 match intrinsic {
1226                     sym::fadd_fast => BinOp::Add,
1227                     sym::fsub_fast => BinOp::Sub,
1228                     sym::fmul_fast => BinOp::Mul,
1229                     sym::fdiv_fast => BinOp::Div,
1230                     sym::frem_fast => BinOp::Rem,
1231                     _ => unreachable!(),
1232                 },
1233                 x,
1234                 y,
1235             );
1236             ret.write_cvalue(fx, res);
1237         }
1238         sym::float_to_int_unchecked => {
1239             intrinsic_args!(fx, args => (f); intrinsic);
1240             let f = f.load_scalar(fx);
1241
1242             let res = crate::cast::clif_int_or_float_cast(
1243                 fx,
1244                 f,
1245                 false,
1246                 fx.clif_type(ret.layout().ty).unwrap(),
1247                 type_sign(ret.layout().ty),
1248             );
1249             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1250         }
1251
1252         sym::raw_eq => {
1253             intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1254             let lhs_ref = lhs_ref.load_scalar(fx);
1255             let rhs_ref = rhs_ref.load_scalar(fx);
1256
1257             let size = fx.layout_of(substs.type_at(0)).layout.size();
1258             // FIXME add and use emit_small_memcmp
1259             let is_eq_value = if size == Size::ZERO {
1260                 // No bytes means they're trivially equal
1261                 fx.bcx.ins().iconst(types::I8, 1)
1262             } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1263                 // Can't use `trusted` for these loads; they could be unaligned.
1264                 let mut flags = MemFlags::new();
1265                 flags.set_notrap();
1266                 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1267                 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1268                 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1269                 fx.bcx.ins().bint(types::I8, eq)
1270             } else {
1271                 // Just call `memcmp` (like slices do in core) when the
1272                 // size is too large or it's not a power-of-two.
1273                 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1274                 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1275                 let params = vec![AbiParam::new(fx.pointer_type); 3];
1276                 let returns = vec![AbiParam::new(types::I32)];
1277                 let args = &[lhs_ref, rhs_ref, bytes_val];
1278                 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1279                 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1280                 fx.bcx.ins().bint(types::I8, eq)
1281             };
1282             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1283         }
1284
1285         sym::const_allocate => {
1286             intrinsic_args!(fx, args => (_size, _align); intrinsic);
1287
1288             // returns a null pointer at runtime.
1289             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1290             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1291         }
1292
1293         sym::const_deallocate => {
1294             intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1295             // nop at runtime.
1296         }
1297
1298         sym::black_box => {
1299             intrinsic_args!(fx, args => (a); intrinsic);
1300
1301             // FIXME implement black_box semantics
1302             ret.write_cvalue(fx, a);
1303         }
1304
1305         // FIXME implement variadics in cranelift
1306         sym::va_copy | sym::va_arg | sym::va_end => {
1307             fx.tcx.sess.span_fatal(
1308                 source_info.span,
1309                 "Defining variadic functions is not yet supported by Cranelift",
1310             );
1311         }
1312
1313         _ => {
1314             fx.tcx
1315                 .sess
1316                 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1317         }
1318     }
1319
1320     let ret_block = fx.get_block(destination.unwrap());
1321     fx.bcx.ins().jump(ret_block, &[]);
1322 }