]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs
drive-by: Fix path spans
[rust.git] / compiler / rustc_codegen_cranelift / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 macro_rules! intrinsic_args {
5     ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6         #[allow(unused_parens)]
7         let ($($arg),*) = if let [$($arg),*] = $args {
8             ($(codegen_operand($fx, $arg)),*)
9         } else {
10             $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
11         };
12     }
13 }
14
15 mod cpuid;
16 mod llvm;
17 mod simd;
18
19 pub(crate) use cpuid::codegen_cpuid_call;
20 pub(crate) use llvm::codegen_llvm_intrinsic_call;
21
22 use rustc_middle::ty::print::with_no_trimmed_paths;
23 use rustc_middle::ty::subst::SubstsRef;
24 use rustc_span::symbol::{kw, sym, Symbol};
25
26 use crate::prelude::*;
27 use cranelift_codegen::ir::AtomicRmwOp;
28
29 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
30     bug!("wrong number of args for intrinsic {}", intrinsic);
31 }
32
33 fn report_atomic_type_validation_error<'tcx>(
34     fx: &mut FunctionCx<'_, '_, 'tcx>,
35     intrinsic: Symbol,
36     span: Span,
37     ty: Ty<'tcx>,
38 ) {
39     fx.tcx.sess.span_err(
40         span,
41         &format!(
42             "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
43             intrinsic, ty
44         ),
45     );
46     // Prevent verifier error
47     fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
48 }
49
50 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
51     let (element, count) = match layout.abi {
52         Abi::Vector { element, count } => (element, count),
53         _ => unreachable!(),
54     };
55
56     match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
57         // Cranelift currently only implements icmp for 128bit vectors.
58         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
59         _ => None,
60     }
61 }
62
63 fn simd_for_each_lane<'tcx>(
64     fx: &mut FunctionCx<'_, '_, 'tcx>,
65     val: CValue<'tcx>,
66     ret: CPlace<'tcx>,
67     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
68 ) {
69     let layout = val.layout();
70
71     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
72     let lane_layout = fx.layout_of(lane_ty);
73     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
74     let ret_lane_layout = fx.layout_of(ret_lane_ty);
75     assert_eq!(lane_count, ret_lane_count);
76
77     for lane_idx in 0..lane_count {
78         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
79
80         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
81         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
82
83         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
84     }
85 }
86
87 fn simd_pair_for_each_lane_typed<'tcx>(
88     fx: &mut FunctionCx<'_, '_, 'tcx>,
89     x: CValue<'tcx>,
90     y: CValue<'tcx>,
91     ret: CPlace<'tcx>,
92     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
93 ) {
94     assert_eq!(x.layout(), y.layout());
95     let layout = x.layout();
96
97     let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
98     let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
99     assert_eq!(lane_count, ret_lane_count);
100
101     for lane_idx in 0..lane_count {
102         let x_lane = x.value_lane(fx, lane_idx);
103         let y_lane = y.value_lane(fx, lane_idx);
104
105         let res_lane = f(fx, x_lane, y_lane);
106
107         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
108     }
109 }
110
111 fn simd_pair_for_each_lane<'tcx>(
112     fx: &mut FunctionCx<'_, '_, 'tcx>,
113     x: CValue<'tcx>,
114     y: CValue<'tcx>,
115     ret: CPlace<'tcx>,
116     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
117 ) {
118     assert_eq!(x.layout(), y.layout());
119     let layout = x.layout();
120
121     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
122     let lane_layout = fx.layout_of(lane_ty);
123     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
124     let ret_lane_layout = fx.layout_of(ret_lane_ty);
125     assert_eq!(lane_count, ret_lane_count);
126
127     for lane_idx in 0..lane_count {
128         let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
129         let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
130
131         let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
132         let res_lane = CValue::by_val(res_lane, ret_lane_layout);
133
134         ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
135     }
136 }
137
138 fn simd_reduce<'tcx>(
139     fx: &mut FunctionCx<'_, '_, 'tcx>,
140     val: CValue<'tcx>,
141     acc: Option<Value>,
142     ret: CPlace<'tcx>,
143     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
144 ) {
145     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
146     let lane_layout = fx.layout_of(lane_ty);
147     assert_eq!(lane_layout, ret.layout());
148
149     let (mut res_val, start_lane) =
150         if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
151     for lane_idx in start_lane..lane_count {
152         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
153         res_val = f(fx, lane_layout.ty, res_val, lane);
154     }
155     let res = CValue::by_val(res_val, lane_layout);
156     ret.write_cvalue(fx, res);
157 }
158
159 // FIXME move all uses to `simd_reduce`
160 fn simd_reduce_bool<'tcx>(
161     fx: &mut FunctionCx<'_, '_, 'tcx>,
162     val: CValue<'tcx>,
163     ret: CPlace<'tcx>,
164     f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
165 ) {
166     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
167     assert!(ret.layout().ty.is_bool());
168
169     let res_val = val.value_lane(fx, 0).load_scalar(fx);
170     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
171     for lane_idx in 1..lane_count {
172         let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
173         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
174         res_val = f(fx, res_val, lane);
175     }
176     let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
177         fx.bcx.ins().ireduce(types::I8, res_val)
178     } else {
179         res_val
180     };
181     let res = CValue::by_val(res_val, ret.layout());
182     ret.write_cvalue(fx, res);
183 }
184
185 fn bool_to_zero_or_max_uint<'tcx>(
186     fx: &mut FunctionCx<'_, '_, 'tcx>,
187     ty: Ty<'tcx>,
188     val: Value,
189 ) -> Value {
190     let ty = fx.clif_type(ty).unwrap();
191
192     let int_ty = match ty {
193         types::F32 => types::I32,
194         types::F64 => types::I64,
195         ty => ty,
196     };
197
198     let val = fx.bcx.ins().bint(int_ty, val);
199     let mut res = fx.bcx.ins().ineg(val);
200
201     if ty.is_float() {
202         res = fx.bcx.ins().bitcast(ty, res);
203     }
204
205     res
206 }
207
208 pub(crate) fn codegen_intrinsic_call<'tcx>(
209     fx: &mut FunctionCx<'_, '_, 'tcx>,
210     instance: Instance<'tcx>,
211     args: &[mir::Operand<'tcx>],
212     destination: CPlace<'tcx>,
213     target: Option<BasicBlock>,
214     source_info: mir::SourceInfo,
215 ) {
216     let intrinsic = fx.tcx.item_name(instance.def_id());
217     let substs = instance.substs;
218
219     let target = if let Some(target) = target {
220         target
221     } else {
222         // Insert non returning intrinsics here
223         match intrinsic {
224             sym::abort => {
225                 fx.bcx.ins().trap(TrapCode::User(0));
226             }
227             sym::transmute => {
228                 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
229             }
230             _ => unimplemented!("unsupported intrinsic {}", intrinsic),
231         }
232         return;
233     };
234
235     if intrinsic.as_str().starts_with("simd_") {
236         self::simd::codegen_simd_intrinsic_call(
237             fx,
238             intrinsic,
239             substs,
240             args,
241             destination,
242             source_info.span,
243         );
244         let ret_block = fx.get_block(target);
245         fx.bcx.ins().jump(ret_block, &[]);
246     } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
247         let ret_block = fx.get_block(target);
248         fx.bcx.ins().jump(ret_block, &[]);
249     } else {
250         codegen_regular_intrinsic_call(
251             fx,
252             instance,
253             intrinsic,
254             substs,
255             args,
256             destination,
257             Some(target),
258             source_info,
259         );
260     }
261 }
262
263 fn codegen_float_intrinsic_call<'tcx>(
264     fx: &mut FunctionCx<'_, '_, 'tcx>,
265     intrinsic: Symbol,
266     args: &[mir::Operand<'tcx>],
267     ret: CPlace<'tcx>,
268 ) -> bool {
269     let (name, arg_count, ty) = match intrinsic {
270         sym::expf32 => ("expf", 1, fx.tcx.types.f32),
271         sym::expf64 => ("exp", 1, fx.tcx.types.f64),
272         sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
273         sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
274         sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
275         sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
276         sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
277         sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
278         sym::powf32 => ("powf", 2, fx.tcx.types.f32),
279         sym::powf64 => ("pow", 2, fx.tcx.types.f64),
280         sym::logf32 => ("logf", 1, fx.tcx.types.f32),
281         sym::logf64 => ("log", 1, fx.tcx.types.f64),
282         sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
283         sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
284         sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
285         sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
286         sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
287         sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
288         sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
289         sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
290         sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
291         sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
292         sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
293         sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
294         sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
295         sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
296         sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
297         sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
298         sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
299         sym::roundf64 => ("round", 1, fx.tcx.types.f64),
300         sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
301         sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
302         sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
303         sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
304         _ => return false,
305     };
306
307     if args.len() != arg_count {
308         bug!("wrong number of args for intrinsic {:?}", intrinsic);
309     }
310
311     let (a, b, c);
312     let args = match args {
313         [x] => {
314             a = [codegen_operand(fx, x)];
315             &a as &[_]
316         }
317         [x, y] => {
318             b = [codegen_operand(fx, x), codegen_operand(fx, y)];
319             &b
320         }
321         [x, y, z] => {
322             c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
323             &c
324         }
325         _ => unreachable!(),
326     };
327
328     let layout = fx.layout_of(ty);
329     let res = match intrinsic {
330         sym::fmaf32 | sym::fmaf64 => {
331             let a = args[0].load_scalar(fx);
332             let b = args[1].load_scalar(fx);
333             let c = args[2].load_scalar(fx);
334             CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
335         }
336         sym::copysignf32 | sym::copysignf64 => {
337             let a = args[0].load_scalar(fx);
338             let b = args[1].load_scalar(fx);
339             CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
340         }
341         sym::fabsf32
342         | sym::fabsf64
343         | sym::floorf32
344         | sym::floorf64
345         | sym::ceilf32
346         | sym::ceilf64
347         | sym::truncf32
348         | sym::truncf64 => {
349             let a = args[0].load_scalar(fx);
350
351             let val = match intrinsic {
352                 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
353                 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
354                 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
355                 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
356                 _ => unreachable!(),
357             };
358
359             CValue::by_val(val, layout)
360         }
361         // These intrinsics aren't supported natively by Cranelift.
362         // Lower them to a libcall.
363         _ => fx.easy_call(name, &args, ty),
364     };
365
366     ret.write_cvalue(fx, res);
367
368     true
369 }
370
371 fn codegen_regular_intrinsic_call<'tcx>(
372     fx: &mut FunctionCx<'_, '_, 'tcx>,
373     instance: Instance<'tcx>,
374     intrinsic: Symbol,
375     substs: SubstsRef<'tcx>,
376     args: &[mir::Operand<'tcx>],
377     ret: CPlace<'tcx>,
378     destination: Option<BasicBlock>,
379     source_info: mir::SourceInfo,
380 ) {
381     let usize_layout = fx.layout_of(fx.tcx.types.usize);
382
383     match intrinsic {
384         sym::likely | sym::unlikely => {
385             intrinsic_args!(fx, args => (a); intrinsic);
386
387             ret.write_cvalue(fx, a);
388         }
389         sym::breakpoint => {
390             intrinsic_args!(fx, args => (); intrinsic);
391
392             fx.bcx.ins().debugtrap();
393         }
394         sym::copy | sym::copy_nonoverlapping => {
395             intrinsic_args!(fx, args => (src, dst, count); intrinsic);
396             let src = src.load_scalar(fx);
397             let dst = dst.load_scalar(fx);
398             let count = count.load_scalar(fx);
399
400             let elem_ty = substs.type_at(0);
401             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
402             assert_eq!(args.len(), 3);
403             let byte_amount =
404                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
405
406             if intrinsic == sym::copy_nonoverlapping {
407                 // FIXME emit_small_memcpy
408                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
409             } else {
410                 // FIXME emit_small_memmove
411                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
412             }
413         }
414         sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
415             // NOTE: the volatile variants have src and dst swapped
416             intrinsic_args!(fx, args => (dst, src, count); intrinsic);
417             let dst = dst.load_scalar(fx);
418             let src = src.load_scalar(fx);
419             let count = count.load_scalar(fx);
420
421             let elem_ty = substs.type_at(0);
422             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
423             assert_eq!(args.len(), 3);
424             let byte_amount =
425                 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
426
427             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
428             if intrinsic == sym::volatile_copy_nonoverlapping_memory {
429                 // FIXME emit_small_memcpy
430                 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
431             } else {
432                 // FIXME emit_small_memmove
433                 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
434             }
435         }
436         sym::size_of_val => {
437             intrinsic_args!(fx, args => (ptr); intrinsic);
438
439             let layout = fx.layout_of(substs.type_at(0));
440             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
441             // branch
442             let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
443                 let (_ptr, info) = ptr.load_scalar_pair(fx);
444                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
445                 size
446             } else {
447                 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
448             };
449             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
450         }
451         sym::min_align_of_val => {
452             intrinsic_args!(fx, args => (ptr); intrinsic);
453
454             let layout = fx.layout_of(substs.type_at(0));
455             // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
456             // branch
457             let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
458                 let (_ptr, info) = ptr.load_scalar_pair(fx);
459                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
460                 align
461             } else {
462                 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
463             };
464             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
465         }
466
467         sym::vtable_size => {
468             intrinsic_args!(fx, args => (vtable); intrinsic);
469             let vtable = vtable.load_scalar(fx);
470
471             let size = crate::vtable::size_of_obj(fx, vtable);
472             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
473         }
474
475         sym::vtable_align => {
476             intrinsic_args!(fx, args => (vtable); intrinsic);
477             let vtable = vtable.load_scalar(fx);
478
479             let align = crate::vtable::min_align_of_obj(fx, vtable);
480             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
481         }
482
483         sym::unchecked_add
484         | sym::unchecked_sub
485         | sym::unchecked_mul
486         | sym::unchecked_div
487         | sym::exact_div
488         | sym::unchecked_rem
489         | sym::unchecked_shl
490         | sym::unchecked_shr => {
491             intrinsic_args!(fx, args => (x, y); intrinsic);
492
493             // FIXME trap on overflow
494             let bin_op = match intrinsic {
495                 sym::unchecked_add => BinOp::Add,
496                 sym::unchecked_sub => BinOp::Sub,
497                 sym::unchecked_mul => BinOp::Mul,
498                 sym::unchecked_div | sym::exact_div => BinOp::Div,
499                 sym::unchecked_rem => BinOp::Rem,
500                 sym::unchecked_shl => BinOp::Shl,
501                 sym::unchecked_shr => BinOp::Shr,
502                 _ => unreachable!(),
503             };
504             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
505             ret.write_cvalue(fx, res);
506         }
507         sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
508             intrinsic_args!(fx, args => (x, y); intrinsic);
509
510             assert_eq!(x.layout().ty, y.layout().ty);
511             let bin_op = match intrinsic {
512                 sym::add_with_overflow => BinOp::Add,
513                 sym::sub_with_overflow => BinOp::Sub,
514                 sym::mul_with_overflow => BinOp::Mul,
515                 _ => unreachable!(),
516             };
517
518             let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
519             ret.write_cvalue(fx, res);
520         }
521         sym::saturating_add | sym::saturating_sub => {
522             intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
523
524             assert_eq!(lhs.layout().ty, rhs.layout().ty);
525             let bin_op = match intrinsic {
526                 sym::saturating_add => BinOp::Add,
527                 sym::saturating_sub => BinOp::Sub,
528                 _ => unreachable!(),
529             };
530
531             let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
532             ret.write_cvalue(fx, res);
533         }
534         sym::rotate_left => {
535             intrinsic_args!(fx, args => (x, y); intrinsic);
536             let y = y.load_scalar(fx);
537
538             let layout = x.layout();
539             let x = x.load_scalar(fx);
540             let res = fx.bcx.ins().rotl(x, y);
541             ret.write_cvalue(fx, CValue::by_val(res, layout));
542         }
543         sym::rotate_right => {
544             intrinsic_args!(fx, args => (x, y); intrinsic);
545             let y = y.load_scalar(fx);
546
547             let layout = x.layout();
548             let x = x.load_scalar(fx);
549             let res = fx.bcx.ins().rotr(x, y);
550             ret.write_cvalue(fx, CValue::by_val(res, layout));
551         }
552
553         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
554         // doesn't have UB both are codegen'ed the same way
555         sym::offset | sym::arith_offset => {
556             intrinsic_args!(fx, args => (base, offset); intrinsic);
557             let offset = offset.load_scalar(fx);
558
559             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
560             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
561             let ptr_diff = if pointee_size != 1 {
562                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
563             } else {
564                 offset
565             };
566             let base_val = base.load_scalar(fx);
567             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
568             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
569         }
570
571         sym::ptr_mask => {
572             intrinsic_args!(fx, args => (ptr, mask); intrinsic);
573             let ptr = ptr.load_scalar(fx);
574             let mask = mask.load_scalar(fx);
575             fx.bcx.ins().band(ptr, mask);
576         }
577
578         sym::transmute => {
579             intrinsic_args!(fx, args => (from); intrinsic);
580
581             ret.write_cvalue_transmute(fx, from);
582         }
583         sym::write_bytes | sym::volatile_set_memory => {
584             intrinsic_args!(fx, args => (dst, val, count); intrinsic);
585             let val = val.load_scalar(fx);
586             let count = count.load_scalar(fx);
587
588             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
589             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
590             let count = if pointee_size != 1 {
591                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
592             } else {
593                 count
594             };
595             let dst_ptr = dst.load_scalar(fx);
596             // FIXME make the memset actually volatile when switching to emit_small_memset
597             // FIXME use emit_small_memset
598             fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
599         }
600         sym::ctlz | sym::ctlz_nonzero => {
601             intrinsic_args!(fx, args => (arg); intrinsic);
602             let val = arg.load_scalar(fx);
603
604             // FIXME trap on `ctlz_nonzero` with zero arg.
605             let res = fx.bcx.ins().clz(val);
606             let res = CValue::by_val(res, arg.layout());
607             ret.write_cvalue(fx, res);
608         }
609         sym::cttz | sym::cttz_nonzero => {
610             intrinsic_args!(fx, args => (arg); intrinsic);
611             let val = arg.load_scalar(fx);
612
613             // FIXME trap on `cttz_nonzero` with zero arg.
614             let res = fx.bcx.ins().ctz(val);
615             let res = CValue::by_val(res, arg.layout());
616             ret.write_cvalue(fx, res);
617         }
618         sym::ctpop => {
619             intrinsic_args!(fx, args => (arg); intrinsic);
620             let val = arg.load_scalar(fx);
621
622             let res = fx.bcx.ins().popcnt(val);
623             let res = CValue::by_val(res, arg.layout());
624             ret.write_cvalue(fx, res);
625         }
626         sym::bitreverse => {
627             intrinsic_args!(fx, args => (arg); intrinsic);
628             let val = arg.load_scalar(fx);
629
630             let res = fx.bcx.ins().bitrev(val);
631             let res = CValue::by_val(res, arg.layout());
632             ret.write_cvalue(fx, res);
633         }
634         sym::bswap => {
635             // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
636             fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
637                 match bcx.func.dfg.value_type(v) {
638                     types::I8 => v,
639
640                     // https://code.woboq.org/gcc/include/bits/byteswap.h.html
641                     types::I16 => {
642                         let tmp1 = bcx.ins().ishl_imm(v, 8);
643                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
644
645                         let tmp2 = bcx.ins().ushr_imm(v, 8);
646                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
647
648                         bcx.ins().bor(n1, n2)
649                     }
650                     types::I32 => {
651                         let tmp1 = bcx.ins().ishl_imm(v, 24);
652                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
653
654                         let tmp2 = bcx.ins().ishl_imm(v, 8);
655                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
656
657                         let tmp3 = bcx.ins().ushr_imm(v, 8);
658                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
659
660                         let tmp4 = bcx.ins().ushr_imm(v, 24);
661                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
662
663                         let or_tmp1 = bcx.ins().bor(n1, n2);
664                         let or_tmp2 = bcx.ins().bor(n3, n4);
665                         bcx.ins().bor(or_tmp1, or_tmp2)
666                     }
667                     types::I64 => {
668                         let tmp1 = bcx.ins().ishl_imm(v, 56);
669                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
670
671                         let tmp2 = bcx.ins().ishl_imm(v, 40);
672                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
673
674                         let tmp3 = bcx.ins().ishl_imm(v, 24);
675                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
676
677                         let tmp4 = bcx.ins().ishl_imm(v, 8);
678                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
679
680                         let tmp5 = bcx.ins().ushr_imm(v, 8);
681                         let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
682
683                         let tmp6 = bcx.ins().ushr_imm(v, 24);
684                         let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
685
686                         let tmp7 = bcx.ins().ushr_imm(v, 40);
687                         let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
688
689                         let tmp8 = bcx.ins().ushr_imm(v, 56);
690                         let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
691
692                         let or_tmp1 = bcx.ins().bor(n1, n2);
693                         let or_tmp2 = bcx.ins().bor(n3, n4);
694                         let or_tmp3 = bcx.ins().bor(n5, n6);
695                         let or_tmp4 = bcx.ins().bor(n7, n8);
696
697                         let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
698                         let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
699                         bcx.ins().bor(or_tmp5, or_tmp6)
700                     }
701                     types::I128 => {
702                         let (lo, hi) = bcx.ins().isplit(v);
703                         let lo = swap(bcx, lo);
704                         let hi = swap(bcx, hi);
705                         bcx.ins().iconcat(hi, lo)
706                     }
707                     ty => unreachable!("bswap {}", ty),
708                 }
709             }
710             intrinsic_args!(fx, args => (arg); intrinsic);
711             let val = arg.load_scalar(fx);
712
713             let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
714             ret.write_cvalue(fx, res);
715         }
716         sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
717             intrinsic_args!(fx, args => (); intrinsic);
718
719             let layout = fx.layout_of(substs.type_at(0));
720             if layout.abi.is_uninhabited() {
721                 with_no_trimmed_paths!({
722                     crate::base::codegen_panic(
723                         fx,
724                         &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
725                         source_info,
726                     )
727                 });
728                 return;
729             }
730
731             if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
732                 with_no_trimmed_paths!({
733                     crate::base::codegen_panic(
734                         fx,
735                         &format!(
736                             "attempted to zero-initialize type `{}`, which is invalid",
737                             layout.ty
738                         ),
739                         source_info,
740                     );
741                 });
742                 return;
743             }
744
745             if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
746                 with_no_trimmed_paths!({
747                     crate::base::codegen_panic(
748                         fx,
749                         &format!(
750                             "attempted to leave type `{}` uninitialized, which is invalid",
751                             layout.ty
752                         ),
753                         source_info,
754                     )
755                 });
756                 return;
757             }
758         }
759
760         sym::volatile_load | sym::unaligned_volatile_load => {
761             intrinsic_args!(fx, args => (ptr); intrinsic);
762
763             // Cranelift treats loads as volatile by default
764             // FIXME correctly handle unaligned_volatile_load
765             let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
766             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
767             ret.write_cvalue(fx, val);
768         }
769         sym::volatile_store | sym::unaligned_volatile_store => {
770             intrinsic_args!(fx, args => (ptr, val); intrinsic);
771             let ptr = ptr.load_scalar(fx);
772
773             // Cranelift treats stores as volatile by default
774             // FIXME correctly handle unaligned_volatile_store
775             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
776             dest.write_cvalue(fx, val);
777         }
778
779         sym::pref_align_of
780         | sym::needs_drop
781         | sym::type_id
782         | sym::type_name
783         | sym::variant_count => {
784             intrinsic_args!(fx, args => (); intrinsic);
785
786             let const_val =
787                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
788             let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
789             ret.write_cvalue(fx, val);
790         }
791
792         sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
793             intrinsic_args!(fx, args => (ptr, base); intrinsic);
794             let ptr = ptr.load_scalar(fx);
795             let base = base.load_scalar(fx);
796             let ty = substs.type_at(0);
797
798             let pointee_size: u64 = fx.layout_of(ty).size.bytes();
799             let diff_bytes = fx.bcx.ins().isub(ptr, base);
800             // FIXME this can be an exact division.
801             let val = if intrinsic == sym::ptr_offset_from_unsigned {
802                 let usize_layout = fx.layout_of(fx.tcx.types.usize);
803                 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
804                 // but unsigned is slightly easier to codegen, so might as well.
805                 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
806             } else {
807                 let isize_layout = fx.layout_of(fx.tcx.types.isize);
808                 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
809             };
810             ret.write_cvalue(fx, val);
811         }
812
813         sym::ptr_guaranteed_cmp => {
814             intrinsic_args!(fx, args => (a, b); intrinsic);
815
816             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
817             ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
818         }
819
820         sym::caller_location => {
821             intrinsic_args!(fx, args => (); intrinsic);
822
823             let caller_location = fx.get_caller_location(source_info);
824             ret.write_cvalue(fx, caller_location);
825         }
826
827         _ if intrinsic.as_str().starts_with("atomic_fence") => {
828             intrinsic_args!(fx, args => (); intrinsic);
829
830             fx.bcx.ins().fence();
831         }
832         _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
833             intrinsic_args!(fx, args => (); intrinsic);
834
835             // FIXME use a compiler fence once Cranelift supports it
836             fx.bcx.ins().fence();
837         }
838         _ if intrinsic.as_str().starts_with("atomic_load") => {
839             intrinsic_args!(fx, args => (ptr); intrinsic);
840             let ptr = ptr.load_scalar(fx);
841
842             let ty = substs.type_at(0);
843             match ty.kind() {
844                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
845                     // FIXME implement 128bit atomics
846                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
847                         // special case for compiler-builtins to avoid having to patch it
848                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
849                         return;
850                     } else {
851                         fx.tcx
852                             .sess
853                             .span_fatal(source_info.span, "128bit atomics not yet supported");
854                     }
855                 }
856                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
857                 _ => {
858                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
859                     return;
860                 }
861             }
862             let clif_ty = fx.clif_type(ty).unwrap();
863
864             let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
865
866             let val = CValue::by_val(val, fx.layout_of(ty));
867             ret.write_cvalue(fx, val);
868         }
869         _ if intrinsic.as_str().starts_with("atomic_store") => {
870             intrinsic_args!(fx, args => (ptr, val); intrinsic);
871             let ptr = ptr.load_scalar(fx);
872
873             let ty = substs.type_at(0);
874             match ty.kind() {
875                 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
876                     // FIXME implement 128bit atomics
877                     if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
878                         // special case for compiler-builtins to avoid having to patch it
879                         crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
880                         return;
881                     } else {
882                         fx.tcx
883                             .sess
884                             .span_fatal(source_info.span, "128bit atomics not yet supported");
885                     }
886                 }
887                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
888                 _ => {
889                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
890                     return;
891                 }
892             }
893
894             let val = val.load_scalar(fx);
895
896             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
897         }
898         _ if intrinsic.as_str().starts_with("atomic_xchg") => {
899             intrinsic_args!(fx, args => (ptr, new); intrinsic);
900             let ptr = ptr.load_scalar(fx);
901
902             let layout = new.layout();
903             match layout.ty.kind() {
904                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
905                 _ => {
906                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
907                     return;
908                 }
909             }
910             let ty = fx.clif_type(layout.ty).unwrap();
911
912             let new = new.load_scalar(fx);
913
914             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
915
916             let old = CValue::by_val(old, layout);
917             ret.write_cvalue(fx, old);
918         }
919         _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
920             // both atomic_cxchg_* and atomic_cxchgweak_*
921             intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
922             let ptr = ptr.load_scalar(fx);
923
924             let layout = new.layout();
925             match layout.ty.kind() {
926                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
927                 _ => {
928                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
929                     return;
930                 }
931             }
932
933             let test_old = test_old.load_scalar(fx);
934             let new = new.load_scalar(fx);
935
936             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
937             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
938
939             let ret_val =
940                 CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
941             ret.write_cvalue(fx, ret_val)
942         }
943
944         _ if intrinsic.as_str().starts_with("atomic_xadd") => {
945             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
946             let ptr = ptr.load_scalar(fx);
947
948             let layout = amount.layout();
949             match layout.ty.kind() {
950                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
951                 _ => {
952                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
953                     return;
954                 }
955             }
956             let ty = fx.clif_type(layout.ty).unwrap();
957
958             let amount = amount.load_scalar(fx);
959
960             let old =
961                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
962
963             let old = CValue::by_val(old, layout);
964             ret.write_cvalue(fx, old);
965         }
966         _ if intrinsic.as_str().starts_with("atomic_xsub") => {
967             intrinsic_args!(fx, args => (ptr, amount); intrinsic);
968             let ptr = ptr.load_scalar(fx);
969
970             let layout = amount.layout();
971             match layout.ty.kind() {
972                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
973                 _ => {
974                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
975                     return;
976                 }
977             }
978             let ty = fx.clif_type(layout.ty).unwrap();
979
980             let amount = amount.load_scalar(fx);
981
982             let old =
983                 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
984
985             let old = CValue::by_val(old, layout);
986             ret.write_cvalue(fx, old);
987         }
988         _ if intrinsic.as_str().starts_with("atomic_and") => {
989             intrinsic_args!(fx, args => (ptr, src); intrinsic);
990             let ptr = ptr.load_scalar(fx);
991
992             let layout = src.layout();
993             match layout.ty.kind() {
994                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
995                 _ => {
996                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
997                     return;
998                 }
999             }
1000             let ty = fx.clif_type(layout.ty).unwrap();
1001
1002             let src = src.load_scalar(fx);
1003
1004             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
1005
1006             let old = CValue::by_val(old, layout);
1007             ret.write_cvalue(fx, old);
1008         }
1009         _ if intrinsic.as_str().starts_with("atomic_or") => {
1010             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1011             let ptr = ptr.load_scalar(fx);
1012
1013             let layout = src.layout();
1014             match layout.ty.kind() {
1015                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1016                 _ => {
1017                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1018                     return;
1019                 }
1020             }
1021             let ty = fx.clif_type(layout.ty).unwrap();
1022
1023             let src = src.load_scalar(fx);
1024
1025             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
1026
1027             let old = CValue::by_val(old, layout);
1028             ret.write_cvalue(fx, old);
1029         }
1030         _ if intrinsic.as_str().starts_with("atomic_xor") => {
1031             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1032             let ptr = ptr.load_scalar(fx);
1033
1034             let layout = src.layout();
1035             match layout.ty.kind() {
1036                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1037                 _ => {
1038                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1039                     return;
1040                 }
1041             }
1042             let ty = fx.clif_type(layout.ty).unwrap();
1043
1044             let src = src.load_scalar(fx);
1045
1046             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
1047
1048             let old = CValue::by_val(old, layout);
1049             ret.write_cvalue(fx, old);
1050         }
1051         _ if intrinsic.as_str().starts_with("atomic_nand") => {
1052             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1053             let ptr = ptr.load_scalar(fx);
1054
1055             let layout = src.layout();
1056             match layout.ty.kind() {
1057                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1058                 _ => {
1059                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1060                     return;
1061                 }
1062             }
1063             let ty = fx.clif_type(layout.ty).unwrap();
1064
1065             let src = src.load_scalar(fx);
1066
1067             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1068
1069             let old = CValue::by_val(old, layout);
1070             ret.write_cvalue(fx, old);
1071         }
1072         _ if intrinsic.as_str().starts_with("atomic_max") => {
1073             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1074             let ptr = ptr.load_scalar(fx);
1075
1076             let layout = src.layout();
1077             match layout.ty.kind() {
1078                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1079                 _ => {
1080                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1081                     return;
1082                 }
1083             }
1084             let ty = fx.clif_type(layout.ty).unwrap();
1085
1086             let src = src.load_scalar(fx);
1087
1088             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1089
1090             let old = CValue::by_val(old, layout);
1091             ret.write_cvalue(fx, old);
1092         }
1093         _ if intrinsic.as_str().starts_with("atomic_umax") => {
1094             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1095             let ptr = ptr.load_scalar(fx);
1096
1097             let layout = src.layout();
1098             match layout.ty.kind() {
1099                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1100                 _ => {
1101                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1102                     return;
1103                 }
1104             }
1105             let ty = fx.clif_type(layout.ty).unwrap();
1106
1107             let src = src.load_scalar(fx);
1108
1109             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1110
1111             let old = CValue::by_val(old, layout);
1112             ret.write_cvalue(fx, old);
1113         }
1114         _ if intrinsic.as_str().starts_with("atomic_min") => {
1115             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1116             let ptr = ptr.load_scalar(fx);
1117
1118             let layout = src.layout();
1119             match layout.ty.kind() {
1120                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1121                 _ => {
1122                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1123                     return;
1124                 }
1125             }
1126             let ty = fx.clif_type(layout.ty).unwrap();
1127
1128             let src = src.load_scalar(fx);
1129
1130             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1131
1132             let old = CValue::by_val(old, layout);
1133             ret.write_cvalue(fx, old);
1134         }
1135         _ if intrinsic.as_str().starts_with("atomic_umin") => {
1136             intrinsic_args!(fx, args => (ptr, src); intrinsic);
1137             let ptr = ptr.load_scalar(fx);
1138
1139             let layout = src.layout();
1140             match layout.ty.kind() {
1141                 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1142                 _ => {
1143                     report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1144                     return;
1145                 }
1146             }
1147             let ty = fx.clif_type(layout.ty).unwrap();
1148
1149             let src = src.load_scalar(fx);
1150
1151             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1152
1153             let old = CValue::by_val(old, layout);
1154             ret.write_cvalue(fx, old);
1155         }
1156
1157         sym::minnumf32 => {
1158             intrinsic_args!(fx, args => (a, b); intrinsic);
1159             let a = a.load_scalar(fx);
1160             let b = b.load_scalar(fx);
1161
1162             let val = crate::num::codegen_float_min(fx, a, b);
1163             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1164             ret.write_cvalue(fx, val);
1165         }
1166         sym::minnumf64 => {
1167             intrinsic_args!(fx, args => (a, b); intrinsic);
1168             let a = a.load_scalar(fx);
1169             let b = b.load_scalar(fx);
1170
1171             let val = crate::num::codegen_float_min(fx, a, b);
1172             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1173             ret.write_cvalue(fx, val);
1174         }
1175         sym::maxnumf32 => {
1176             intrinsic_args!(fx, args => (a, b); intrinsic);
1177             let a = a.load_scalar(fx);
1178             let b = b.load_scalar(fx);
1179
1180             let val = crate::num::codegen_float_max(fx, a, b);
1181             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1182             ret.write_cvalue(fx, val);
1183         }
1184         sym::maxnumf64 => {
1185             intrinsic_args!(fx, args => (a, b); intrinsic);
1186             let a = a.load_scalar(fx);
1187             let b = b.load_scalar(fx);
1188
1189             let val = crate::num::codegen_float_max(fx, a, b);
1190             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1191             ret.write_cvalue(fx, val);
1192         }
1193
1194         kw::Try => {
1195             intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1196             let f = f.load_scalar(fx);
1197             let data = data.load_scalar(fx);
1198             let _catch_fn = catch_fn.load_scalar(fx);
1199
1200             // FIXME once unwinding is supported, change this to actually catch panics
1201             let f_sig = fx.bcx.func.import_signature(Signature {
1202                 call_conv: fx.target_config.default_call_conv,
1203                 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
1204                 returns: vec![],
1205             });
1206
1207             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1208
1209             let layout = ret.layout();
1210             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1211             ret.write_cvalue(fx, ret_val);
1212         }
1213
1214         sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1215             intrinsic_args!(fx, args => (x, y); intrinsic);
1216
1217             let res = crate::num::codegen_float_binop(
1218                 fx,
1219                 match intrinsic {
1220                     sym::fadd_fast => BinOp::Add,
1221                     sym::fsub_fast => BinOp::Sub,
1222                     sym::fmul_fast => BinOp::Mul,
1223                     sym::fdiv_fast => BinOp::Div,
1224                     sym::frem_fast => BinOp::Rem,
1225                     _ => unreachable!(),
1226                 },
1227                 x,
1228                 y,
1229             );
1230             ret.write_cvalue(fx, res);
1231         }
1232         sym::float_to_int_unchecked => {
1233             intrinsic_args!(fx, args => (f); intrinsic);
1234             let f = f.load_scalar(fx);
1235
1236             let res = crate::cast::clif_int_or_float_cast(
1237                 fx,
1238                 f,
1239                 false,
1240                 fx.clif_type(ret.layout().ty).unwrap(),
1241                 type_sign(ret.layout().ty),
1242             );
1243             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1244         }
1245
1246         sym::raw_eq => {
1247             intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1248             let lhs_ref = lhs_ref.load_scalar(fx);
1249             let rhs_ref = rhs_ref.load_scalar(fx);
1250
1251             let size = fx.layout_of(substs.type_at(0)).layout.size();
1252             // FIXME add and use emit_small_memcmp
1253             let is_eq_value = if size == Size::ZERO {
1254                 // No bytes means they're trivially equal
1255                 fx.bcx.ins().iconst(types::I8, 1)
1256             } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1257                 // Can't use `trusted` for these loads; they could be unaligned.
1258                 let mut flags = MemFlags::new();
1259                 flags.set_notrap();
1260                 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1261                 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1262                 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1263                 fx.bcx.ins().bint(types::I8, eq)
1264             } else {
1265                 // Just call `memcmp` (like slices do in core) when the
1266                 // size is too large or it's not a power-of-two.
1267                 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1268                 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1269                 let params = vec![AbiParam::new(fx.pointer_type); 3];
1270                 let returns = vec![AbiParam::new(types::I32)];
1271                 let args = &[lhs_ref, rhs_ref, bytes_val];
1272                 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1273                 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1274                 fx.bcx.ins().bint(types::I8, eq)
1275             };
1276             ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1277         }
1278
1279         sym::const_allocate => {
1280             intrinsic_args!(fx, args => (_size, _align); intrinsic);
1281
1282             // returns a null pointer at runtime.
1283             let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1284             ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1285         }
1286
1287         sym::const_deallocate => {
1288             intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1289             // nop at runtime.
1290         }
1291
1292         sym::black_box => {
1293             intrinsic_args!(fx, args => (a); intrinsic);
1294
1295             // FIXME implement black_box semantics
1296             ret.write_cvalue(fx, a);
1297         }
1298
1299         // FIXME implement variadics in cranelift
1300         sym::va_copy | sym::va_arg | sym::va_end => {
1301             fx.tcx.sess.span_fatal(
1302                 source_info.span,
1303                 "Defining variadic functions is not yet supported by Cranelift",
1304             );
1305         }
1306
1307         _ => {
1308             fx.tcx
1309                 .sess
1310                 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1311         }
1312     }
1313
1314     let ret_block = fx.get_block(destination.unwrap());
1315     fx.bcx.ins().jump(ret_block, &[]);
1316 }