]> git.lizzy.rs Git - rust.git/blob - src/intrinsics/mod.rs
Disable new test
[rust.git] / src / intrinsics / mod.rs
1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
3
4 mod cpuid;
5 mod llvm;
6 mod simd;
7
8 pub(crate) use cpuid::codegen_cpuid_call;
9 pub(crate) use llvm::codegen_llvm_intrinsic_call;
10
11 use crate::prelude::*;
12 use cranelift_codegen::ir::AtomicRmwOp;
13 use rustc_middle::ty::print::with_no_trimmed_paths;
14
15 macro intrinsic_pat {
16     (_) => {
17         _
18     },
19     ($name:ident) => {
20         stringify!($name)
21     },
22     ($name:literal) => {
23         stringify!($name)
24     },
25     ($x:ident . $($xs:tt).*) => {
26         concat!(stringify!($x), ".", intrinsic_pat!($($xs).*))
27     }
28 }
29
30 macro intrinsic_arg {
31     (o $fx:expr, $arg:ident) => {
32         $arg
33     },
34     (c $fx:expr, $arg:ident) => {
35         codegen_operand($fx, $arg)
36     },
37     (v $fx:expr, $arg:ident) => {
38         codegen_operand($fx, $arg).load_scalar($fx)
39     }
40 }
41
42 macro intrinsic_substs {
43     ($substs:expr, $index:expr,) => {},
44     ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
45         let $first = $substs.type_at($index);
46         intrinsic_substs!($substs, $index+1, $($rest),*);
47     }
48 }
49
50 macro intrinsic_match {
51     ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
52     _ => $unknown:block;
53     $(
54         $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
55     )*) => {
56         let _ = $substs; // Silence warning when substs is unused.
57         match $intrinsic {
58             $(
59                 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
60                     #[allow(unused_parens, non_snake_case)]
61                     {
62                         $(
63                             intrinsic_substs!($substs, 0, $($subst),*);
64                         )?
65                         if let [$($arg),*] = $args {
66                             let ($($arg,)*) = (
67                                 $(intrinsic_arg!($a $fx, $arg),)*
68                             );
69                             #[warn(unused_parens, non_snake_case)]
70                             {
71                                 $content
72                             }
73                         } else {
74                             bug!("wrong number of args for intrinsic {:?}", $intrinsic);
75                         }
76                     }
77                 }
78             )*
79             _ => $unknown,
80         }
81     }
82 }
83
84 macro call_intrinsic_match {
85     ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
86         $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
87     )*) => {
88         match $intrinsic {
89             $(
90                 stringify!($name) => {
91                     assert!($substs.is_noop());
92                     if let [$(ref $arg),*] = *$args {
93                         let ($($arg,)*) = (
94                             $(codegen_operand($fx, $arg),)*
95                         );
96                         let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
97                         $ret.write_cvalue($fx, res);
98
99                         if let Some((_, dest)) = $destination {
100                             let ret_block = $fx.get_block(dest);
101                             $fx.bcx.ins().jump(ret_block, &[]);
102                             return;
103                         } else {
104                             unreachable!();
105                         }
106                     } else {
107                         bug!("wrong number of args for intrinsic {:?}", $intrinsic);
108                     }
109                 }
110             )*
111             _ => {}
112         }
113     }
114 }
115
116 macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
117     match $ty.kind() {
118         ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
119         _ => {
120             $fx.tcx.sess.span_err(
121                 $span,
122                 &format!(
123                     "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
124                     $intrinsic, $ty
125                 ),
126             );
127             // Prevent verifier error
128             crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
129             return;
130         }
131     }
132 }
133
134 macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
135     if !$ty.is_simd() {
136         $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
137         // Prevent verifier error
138         crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
139         return;
140     }
141 }
142
143 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
144     let (element, count) = match &layout.abi {
145         Abi::Vector { element, count } => (element.clone(), *count),
146         _ => unreachable!(),
147     };
148
149     match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
150         // Cranelift currently only implements icmp for 128bit vectors.
151         Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
152         _ => None,
153     }
154 }
155
156 fn simd_for_each_lane<'tcx>(
157     fx: &mut FunctionCx<'_, '_, 'tcx>,
158     val: CValue<'tcx>,
159     ret: CPlace<'tcx>,
160     f: impl Fn(
161         &mut FunctionCx<'_, '_, 'tcx>,
162         TyAndLayout<'tcx>,
163         TyAndLayout<'tcx>,
164         Value,
165     ) -> CValue<'tcx>,
166 ) {
167     let layout = val.layout();
168
169     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
170     let lane_layout = fx.layout_of(lane_ty);
171     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
172     let ret_lane_layout = fx.layout_of(ret_lane_ty);
173     assert_eq!(lane_count, ret_lane_count);
174
175     for lane_idx in 0..lane_count {
176         let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
177         let lane = val.value_field(fx, lane_idx).load_scalar(fx);
178
179         let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
180
181         ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
182     }
183 }
184
185 fn simd_pair_for_each_lane<'tcx>(
186     fx: &mut FunctionCx<'_, '_, 'tcx>,
187     x: CValue<'tcx>,
188     y: CValue<'tcx>,
189     ret: CPlace<'tcx>,
190     f: impl Fn(
191         &mut FunctionCx<'_, '_, 'tcx>,
192         TyAndLayout<'tcx>,
193         TyAndLayout<'tcx>,
194         Value,
195         Value,
196     ) -> CValue<'tcx>,
197 ) {
198     assert_eq!(x.layout(), y.layout());
199     let layout = x.layout();
200
201     let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
202     let lane_layout = fx.layout_of(lane_ty);
203     let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
204     let ret_lane_layout = fx.layout_of(ret_lane_ty);
205     assert_eq!(lane_count, ret_lane_count);
206
207     for lane in 0..lane_count {
208         let lane = mir::Field::new(lane.try_into().unwrap());
209         let x_lane = x.value_field(fx, lane).load_scalar(fx);
210         let y_lane = y.value_field(fx, lane).load_scalar(fx);
211
212         let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
213
214         ret.place_field(fx, lane).write_cvalue(fx, res_lane);
215     }
216 }
217
218 fn simd_reduce<'tcx>(
219     fx: &mut FunctionCx<'_, '_, 'tcx>,
220     val: CValue<'tcx>,
221     ret: CPlace<'tcx>,
222     f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
223 ) {
224     let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
225     let lane_layout = fx.layout_of(lane_ty);
226     assert_eq!(lane_layout, ret.layout());
227
228     let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
229     for lane_idx in 1..lane_count {
230         let lane =
231             val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
232         res_val = f(fx, lane_layout, res_val, lane);
233     }
234     let res = CValue::by_val(res_val, lane_layout);
235     ret.write_cvalue(fx, res);
236 }
237
238 fn simd_reduce_bool<'tcx>(
239     fx: &mut FunctionCx<'_, '_, 'tcx>,
240     val: CValue<'tcx>,
241     ret: CPlace<'tcx>,
242     f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
243 ) {
244     let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
245     assert!(ret.layout().ty.is_bool());
246
247     let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
248     let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
249     for lane_idx in 1..lane_count {
250         let lane =
251             val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
252         let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
253         res_val = f(fx, res_val, lane);
254     }
255     let res = CValue::by_val(res_val, ret.layout());
256     ret.write_cvalue(fx, res);
257 }
258
259 fn bool_to_zero_or_max_uint<'tcx>(
260     fx: &mut FunctionCx<'_, '_, 'tcx>,
261     layout: TyAndLayout<'tcx>,
262     val: Value,
263 ) -> CValue<'tcx> {
264     let ty = fx.clif_type(layout.ty).unwrap();
265
266     let int_ty = match ty {
267         types::F32 => types::I32,
268         types::F64 => types::I64,
269         ty => ty,
270     };
271
272     let val = fx.bcx.ins().bint(int_ty, val);
273     let mut res = fx.bcx.ins().ineg(val);
274
275     if ty.is_float() {
276         res = fx.bcx.ins().bitcast(ty, res);
277     }
278
279     CValue::by_val(res, layout)
280 }
281
282 macro simd_cmp {
283     ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
284         let vector_ty = clif_vector_type($fx.tcx, $x.layout());
285
286         if let Some(vector_ty) = vector_ty {
287             let x = $x.load_scalar($fx);
288             let y = $y.load_scalar($fx);
289             let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
290
291             // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
292             let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
293
294             $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
295         } else {
296             simd_pair_for_each_lane(
297                 $fx,
298                 $x,
299                 $y,
300                 $ret,
301                 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
302                     let res_lane = match lane_layout.ty.kind() {
303                         ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
304                         ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
305                         _ => unreachable!("{:?}", lane_layout.ty),
306                     };
307                     bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
308                 },
309             );
310         }
311     },
312     ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
313         // FIXME use vector icmp when possible
314         simd_pair_for_each_lane(
315             $fx,
316             $x,
317             $y,
318             $ret,
319             |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
320                 let res_lane = match lane_layout.ty.kind() {
321                     ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
322                     ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
323                     ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
324                     _ => unreachable!("{:?}", lane_layout.ty),
325                 };
326                 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
327             },
328         );
329     },
330 }
331
332 macro simd_int_binop {
333     ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
334         simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
335     },
336     ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
337         simd_pair_for_each_lane(
338             $fx,
339             $x,
340             $y,
341             $ret,
342             |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
343                 let res_lane = match lane_layout.ty.kind() {
344                     ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
345                     ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
346                     _ => unreachable!("{:?}", lane_layout.ty),
347                 };
348                 CValue::by_val(res_lane, ret_lane_layout)
349             },
350         );
351     },
352 }
353
354 macro simd_int_flt_binop {
355     ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
356         simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
357     },
358     ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
359         simd_pair_for_each_lane(
360             $fx,
361             $x,
362             $y,
363             $ret,
364             |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
365                 let res_lane = match lane_layout.ty.kind() {
366                     ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
367                     ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
368                     ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
369                     _ => unreachable!("{:?}", lane_layout.ty),
370                 };
371                 CValue::by_val(res_lane, ret_lane_layout)
372             },
373         );
374     },
375 }
376
377 macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
378     simd_pair_for_each_lane(
379         $fx,
380         $x,
381         $y,
382         $ret,
383         |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
384             let res_lane = match lane_layout.ty.kind() {
385                 ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
386                 _ => unreachable!("{:?}", lane_layout.ty),
387             };
388             CValue::by_val(res_lane, ret_lane_layout)
389         },
390     );
391 }
392
393 pub(crate) fn codegen_intrinsic_call<'tcx>(
394     fx: &mut FunctionCx<'_, '_, 'tcx>,
395     instance: Instance<'tcx>,
396     args: &[mir::Operand<'tcx>],
397     destination: Option<(CPlace<'tcx>, BasicBlock)>,
398     span: Span,
399 ) {
400     let def_id = instance.def_id();
401     let substs = instance.substs;
402
403     let intrinsic = fx.tcx.item_name(def_id).as_str();
404     let intrinsic = &intrinsic[..];
405
406     let ret = match destination {
407         Some((place, _)) => place,
408         None => {
409             // Insert non returning intrinsics here
410             match intrinsic {
411                 "abort" => {
412                     trap_abort(fx, "Called intrinsic::abort.");
413                 }
414                 "transmute" => {
415                     crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
416                 }
417                 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
418             }
419             return;
420         }
421     };
422
423     if intrinsic.starts_with("simd_") {
424         self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
425         let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
426         fx.bcx.ins().jump(ret_block, &[]);
427         return;
428     }
429
430     let usize_layout = fx.layout_of(fx.tcx.types.usize);
431
432     call_intrinsic_match! {
433         fx, intrinsic, substs, ret, destination, args,
434         expf32(flt) -> f32 => expf,
435         expf64(flt) -> f64 => exp,
436         exp2f32(flt) -> f32 => exp2f,
437         exp2f64(flt) -> f64 => exp2,
438         sqrtf32(flt) -> f32 => sqrtf,
439         sqrtf64(flt) -> f64 => sqrt,
440         powif32(a, x) -> f32 => __powisf2, // compiler-builtins
441         powif64(a, x) -> f64 => __powidf2, // compiler-builtins
442         powf32(a, x) -> f32 => powf,
443         powf64(a, x) -> f64 => pow,
444         logf32(flt) -> f32 => logf,
445         logf64(flt) -> f64 => log,
446         log2f32(flt) -> f32 => log2f,
447         log2f64(flt) -> f64 => log2,
448         log10f32(flt) -> f32 => log10f,
449         log10f64(flt) -> f64 => log10,
450         fabsf32(flt) -> f32 => fabsf,
451         fabsf64(flt) -> f64 => fabs,
452         fmaf32(x, y, z) -> f32 => fmaf,
453         fmaf64(x, y, z) -> f64 => fma,
454         copysignf32(x, y) -> f32 => copysignf,
455         copysignf64(x, y) -> f64 => copysign,
456
457         // rounding variants
458         // FIXME use clif insts
459         floorf32(flt) -> f32 => floorf,
460         floorf64(flt) -> f64 => floor,
461         ceilf32(flt) -> f32 => ceilf,
462         ceilf64(flt) -> f64 => ceil,
463         truncf32(flt) -> f32 => truncf,
464         truncf64(flt) -> f64 => trunc,
465         roundf32(flt) -> f32 => roundf,
466         roundf64(flt) -> f64 => round,
467
468         // trigonometry
469         sinf32(flt) -> f32 => sinf,
470         sinf64(flt) -> f64 => sin,
471         cosf32(flt) -> f32 => cosf,
472         cosf64(flt) -> f64 => cos,
473         tanf32(flt) -> f32 => tanf,
474         tanf64(flt) -> f64 => tan,
475     }
476
477     intrinsic_match! {
478         fx, intrinsic, substs, args,
479         _ => {
480             fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
481         };
482
483         assume, (c _a) {};
484         likely | unlikely, (c a) {
485             ret.write_cvalue(fx, a);
486         };
487         breakpoint, () {
488             fx.bcx.ins().debugtrap();
489         };
490         copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
491             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
492             assert_eq!(args.len(), 3);
493             let byte_amount = if elem_size != 1 {
494                 fx.bcx.ins().imul_imm(count, elem_size as i64)
495             } else {
496                 count
497             };
498
499             if intrinsic.contains("nonoverlapping") {
500                 // FIXME emit_small_memcpy
501                 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
502             } else {
503                 // FIXME emit_small_memmove
504                 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
505             }
506         };
507         // NOTE: the volatile variants have src and dst swapped
508         volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
509             let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
510             assert_eq!(args.len(), 3);
511             let byte_amount = if elem_size != 1 {
512                 fx.bcx.ins().imul_imm(count, elem_size as i64)
513             } else {
514                 count
515             };
516
517             // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
518             if intrinsic.contains("nonoverlapping") {
519                 // FIXME emit_small_memcpy
520                 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
521             } else {
522                 // FIXME emit_small_memmove
523                 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
524             }
525         };
526         size_of_val, <T> (c ptr) {
527             let layout = fx.layout_of(T);
528             let size = if layout.is_unsized() {
529                 let (_ptr, info) = ptr.load_scalar_pair(fx);
530                 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
531                 size
532             } else {
533                 fx
534                     .bcx
535                     .ins()
536                     .iconst(fx.pointer_type, layout.size.bytes() as i64)
537             };
538             ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
539         };
540         min_align_of_val, <T> (c ptr) {
541             let layout = fx.layout_of(T);
542             let align = if layout.is_unsized() {
543                 let (_ptr, info) = ptr.load_scalar_pair(fx);
544                 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
545                 align
546             } else {
547                 fx
548                     .bcx
549                     .ins()
550                     .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
551             };
552             ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
553         };
554
555         _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
556             // FIXME trap on overflow
557             let bin_op = match intrinsic {
558                 "unchecked_add" => BinOp::Add,
559                 "unchecked_sub" => BinOp::Sub,
560                 "unchecked_div" | "exact_div" => BinOp::Div,
561                 "unchecked_rem" => BinOp::Rem,
562                 "unchecked_shl" => BinOp::Shl,
563                 "unchecked_shr" => BinOp::Shr,
564                 _ => unreachable!("intrinsic {}", intrinsic),
565             };
566             let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
567             ret.write_cvalue(fx, res);
568         };
569         _ if intrinsic.ends_with("_with_overflow"), (c x, c y) {
570             assert_eq!(x.layout().ty, y.layout().ty);
571             let bin_op = match intrinsic {
572                 "add_with_overflow" => BinOp::Add,
573                 "sub_with_overflow" => BinOp::Sub,
574                 "mul_with_overflow" => BinOp::Mul,
575                 _ => unreachable!("intrinsic {}", intrinsic),
576             };
577
578             let res = crate::num::codegen_checked_int_binop(
579                 fx,
580                 bin_op,
581                 x,
582                 y,
583             );
584             ret.write_cvalue(fx, res);
585         };
586         _ if intrinsic.starts_with("saturating_"), <T> (c lhs, c rhs) {
587             assert_eq!(lhs.layout().ty, rhs.layout().ty);
588             let bin_op = match intrinsic {
589                 "saturating_add" => BinOp::Add,
590                 "saturating_sub" => BinOp::Sub,
591                 _ => unreachable!("intrinsic {}", intrinsic),
592             };
593
594             let signed = type_sign(T);
595
596             let checked_res = crate::num::codegen_checked_int_binop(
597                 fx,
598                 bin_op,
599                 lhs,
600                 rhs,
601             );
602
603             let (val, has_overflow) = checked_res.load_scalar_pair(fx);
604             let clif_ty = fx.clif_type(T).unwrap();
605
606             // `select.i8` is not implemented by Cranelift.
607             let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
608
609             let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
610
611             let val = match (intrinsic, signed) {
612                 ("saturating_add", false) => fx.bcx.ins().select(has_overflow, max, val),
613                 ("saturating_sub", false) => fx.bcx.ins().select(has_overflow, min, val),
614                 ("saturating_add", true) => {
615                     let rhs = rhs.load_scalar(fx);
616                     let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
617                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
618                     fx.bcx.ins().select(has_overflow, sat_val, val)
619                 }
620                 ("saturating_sub", true) => {
621                     let rhs = rhs.load_scalar(fx);
622                     let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
623                     let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
624                     fx.bcx.ins().select(has_overflow, sat_val, val)
625                 }
626                 _ => unreachable!(),
627             };
628
629             let res = CValue::by_val(val, fx.layout_of(T));
630
631             ret.write_cvalue(fx, res);
632         };
633         rotate_left, <T>(v x, v y) {
634             let layout = fx.layout_of(T);
635             let res = fx.bcx.ins().rotl(x, y);
636             ret.write_cvalue(fx, CValue::by_val(res, layout));
637         };
638         rotate_right, <T>(v x, v y) {
639             let layout = fx.layout_of(T);
640             let res = fx.bcx.ins().rotr(x, y);
641             ret.write_cvalue(fx, CValue::by_val(res, layout));
642         };
643
644         // The only difference between offset and arith_offset is regarding UB. Because Cranelift
645         // doesn't have UB both are codegen'ed the same way
646         offset | arith_offset, (c base, v offset) {
647             let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
648             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
649             let ptr_diff = if pointee_size != 1 {
650                 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
651             } else {
652                 offset
653             };
654             let base_val = base.load_scalar(fx);
655             let res = fx.bcx.ins().iadd(base_val, ptr_diff);
656             ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
657         };
658
659         transmute, (c from) {
660             ret.write_cvalue_transmute(fx, from);
661         };
662         write_bytes | volatile_set_memory, (c dst, v val, v count) {
663             let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
664             let pointee_size = fx.layout_of(pointee_ty).size.bytes();
665             let count = if pointee_size != 1 {
666                 fx.bcx.ins().imul_imm(count, pointee_size as i64)
667             } else {
668                 count
669             };
670             let dst_ptr = dst.load_scalar(fx);
671             // FIXME make the memset actually volatile when switching to emit_small_memset
672             // FIXME use emit_small_memset
673             fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
674         };
675         ctlz | ctlz_nonzero, <T> (v arg) {
676             // FIXME trap on `ctlz_nonzero` with zero arg.
677             let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
678                 // FIXME verify this algorithm is correct
679                 let (lsb, msb) = fx.bcx.ins().isplit(arg);
680                 let lsb_lz = fx.bcx.ins().clz(lsb);
681                 let msb_lz = fx.bcx.ins().clz(msb);
682                 let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
683                 let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
684                 let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
685                 fx.bcx.ins().uextend(types::I128, res)
686             } else {
687                 fx.bcx.ins().clz(arg)
688             };
689             let res = CValue::by_val(res, fx.layout_of(T));
690             ret.write_cvalue(fx, res);
691         };
692         cttz | cttz_nonzero, <T> (v arg) {
693             // FIXME trap on `cttz_nonzero` with zero arg.
694             let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
695                 // FIXME verify this algorithm is correct
696                 let (lsb, msb) = fx.bcx.ins().isplit(arg);
697                 let lsb_tz = fx.bcx.ins().ctz(lsb);
698                 let msb_tz = fx.bcx.ins().ctz(msb);
699                 let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
700                 let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
701                 let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
702                 fx.bcx.ins().uextend(types::I128, res)
703             } else {
704                 fx.bcx.ins().ctz(arg)
705             };
706             let res = CValue::by_val(res, fx.layout_of(T));
707             ret.write_cvalue(fx, res);
708         };
709         ctpop, <T> (v arg) {
710             let res = fx.bcx.ins().popcnt(arg);
711             let res = CValue::by_val(res, fx.layout_of(T));
712             ret.write_cvalue(fx, res);
713         };
714         bitreverse, <T> (v arg) {
715             let res = fx.bcx.ins().bitrev(arg);
716             let res = CValue::by_val(res, fx.layout_of(T));
717             ret.write_cvalue(fx, res);
718         };
719         bswap, <T> (v arg) {
720             // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
721             fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
722                 match bcx.func.dfg.value_type(v) {
723                     types::I8 => v,
724
725                     // https://code.woboq.org/gcc/include/bits/byteswap.h.html
726                     types::I16 => {
727                         let tmp1 = bcx.ins().ishl_imm(v, 8);
728                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
729
730                         let tmp2 = bcx.ins().ushr_imm(v, 8);
731                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
732
733                         bcx.ins().bor(n1, n2)
734                     }
735                     types::I32 => {
736                         let tmp1 = bcx.ins().ishl_imm(v, 24);
737                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
738
739                         let tmp2 = bcx.ins().ishl_imm(v, 8);
740                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
741
742                         let tmp3 = bcx.ins().ushr_imm(v, 8);
743                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
744
745                         let tmp4 = bcx.ins().ushr_imm(v, 24);
746                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
747
748                         let or_tmp1 = bcx.ins().bor(n1, n2);
749                         let or_tmp2 = bcx.ins().bor(n3, n4);
750                         bcx.ins().bor(or_tmp1, or_tmp2)
751                     }
752                     types::I64 => {
753                         let tmp1 = bcx.ins().ishl_imm(v, 56);
754                         let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
755
756                         let tmp2 = bcx.ins().ishl_imm(v, 40);
757                         let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
758
759                         let tmp3 = bcx.ins().ishl_imm(v, 24);
760                         let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
761
762                         let tmp4 = bcx.ins().ishl_imm(v, 8);
763                         let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
764
765                         let tmp5 = bcx.ins().ushr_imm(v, 8);
766                         let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
767
768                         let tmp6 = bcx.ins().ushr_imm(v, 24);
769                         let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
770
771                         let tmp7 = bcx.ins().ushr_imm(v, 40);
772                         let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
773
774                         let tmp8 = bcx.ins().ushr_imm(v, 56);
775                         let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
776
777                         let or_tmp1 = bcx.ins().bor(n1, n2);
778                         let or_tmp2 = bcx.ins().bor(n3, n4);
779                         let or_tmp3 = bcx.ins().bor(n5, n6);
780                         let or_tmp4 = bcx.ins().bor(n7, n8);
781
782                         let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
783                         let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
784                         bcx.ins().bor(or_tmp5, or_tmp6)
785                     }
786                     types::I128 => {
787                         let (lo, hi) = bcx.ins().isplit(v);
788                         let lo = swap(bcx, lo);
789                         let hi = swap(bcx, hi);
790                         bcx.ins().iconcat(hi, lo)
791                     }
792                     ty => unreachable!("bswap {}", ty),
793                 }
794             }
795             let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
796             ret.write_cvalue(fx, res);
797         };
798         assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
799             let layout = fx.layout_of(T);
800             if layout.abi.is_uninhabited() {
801                 with_no_trimmed_paths(|| crate::base::codegen_panic(
802                     fx,
803                     &format!("attempted to instantiate uninhabited type `{}`", T),
804                     span,
805                 ));
806                 return;
807             }
808
809             if intrinsic == "assert_zero_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
810                 with_no_trimmed_paths(|| crate::base::codegen_panic(
811                     fx,
812                     &format!("attempted to zero-initialize type `{}`, which is invalid", T),
813                     span,
814                 ));
815                 return;
816             }
817
818             if intrinsic == "assert_uninit_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
819                 with_no_trimmed_paths(|| crate::base::codegen_panic(
820                     fx,
821                     &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
822                     span,
823                 ));
824                 return;
825             }
826         };
827
828         volatile_load | unaligned_volatile_load, (c ptr) {
829             // Cranelift treats loads as volatile by default
830             // FIXME correctly handle unaligned_volatile_load
831             let inner_layout =
832                 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
833             let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
834             ret.write_cvalue(fx, val);
835         };
836         volatile_store | unaligned_volatile_store, (v ptr, c val) {
837             // Cranelift treats stores as volatile by default
838             // FIXME correctly handle unaligned_volatile_store
839             let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
840             dest.write_cvalue(fx, val);
841         };
842
843         pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
844             let const_val =
845                 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
846             let val = crate::constant::codegen_const_value(
847                 fx,
848                 const_val,
849                 ret.layout().ty,
850             );
851             ret.write_cvalue(fx, val);
852         };
853
854         ptr_offset_from, <T> (v ptr, v base) {
855             let isize_layout = fx.layout_of(fx.tcx.types.isize);
856
857             let pointee_size: u64 = fx.layout_of(T).size.bytes();
858             let diff = fx.bcx.ins().isub(ptr, base);
859             // FIXME this can be an exact division.
860             let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
861             ret.write_cvalue(fx, val);
862         };
863
864         ptr_guaranteed_eq, (c a, c b) {
865             let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
866             ret.write_cvalue(fx, val);
867         };
868
869         ptr_guaranteed_ne, (c a, c b) {
870             let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
871             ret.write_cvalue(fx, val);
872         };
873
874         caller_location, () {
875             let caller_location = fx.get_caller_location(span);
876             ret.write_cvalue(fx, caller_location);
877         };
878
879         _ if intrinsic.starts_with("atomic_fence"), () {
880             fx.bcx.ins().fence();
881         };
882         _ if intrinsic.starts_with("atomic_singlethreadfence"), () {
883             // FIXME use a compiler fence once Cranelift supports it
884             fx.bcx.ins().fence();
885         };
886         _ if intrinsic.starts_with("atomic_load"), <T> (v ptr) {
887             validate_atomic_type!(fx, intrinsic, span, T);
888             let ty = fx.clif_type(T).unwrap();
889
890             let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
891
892             let val = CValue::by_val(val, fx.layout_of(T));
893             ret.write_cvalue(fx, val);
894         };
895         _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
896             validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
897
898             let val = val.load_scalar(fx);
899
900             fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
901         };
902         _ if intrinsic.starts_with("atomic_xchg"), (v ptr, c new) {
903             let layout = new.layout();
904             validate_atomic_type!(fx, intrinsic, span, layout.ty);
905             let ty = fx.clif_type(layout.ty).unwrap();
906
907             let new = new.load_scalar(fx);
908
909             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
910
911             let old = CValue::by_val(old, layout);
912             ret.write_cvalue(fx, old);
913         };
914         _ if intrinsic.starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
915             let layout = new.layout();
916             validate_atomic_type!(fx, intrinsic, span, layout.ty);
917
918             let test_old = test_old.load_scalar(fx);
919             let new = new.load_scalar(fx);
920
921             let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
922             let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
923
924             let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
925             ret.write_cvalue(fx, ret_val)
926         };
927
928         _ if intrinsic.starts_with("atomic_xadd"), (v ptr, c amount) {
929             let layout = amount.layout();
930             validate_atomic_type!(fx, intrinsic, span, layout.ty);
931             let ty = fx.clif_type(layout.ty).unwrap();
932
933             let amount = amount.load_scalar(fx);
934
935             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
936
937             let old = CValue::by_val(old, layout);
938             ret.write_cvalue(fx, old);
939         };
940         _ if intrinsic.starts_with("atomic_xsub"), (v ptr, c amount) {
941             let layout = amount.layout();
942             validate_atomic_type!(fx, intrinsic, span, layout.ty);
943             let ty = fx.clif_type(layout.ty).unwrap();
944
945             let amount = amount.load_scalar(fx);
946
947             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
948
949             let old = CValue::by_val(old, layout);
950             ret.write_cvalue(fx, old);
951         };
952         _ if intrinsic.starts_with("atomic_and"), (v ptr, c src) {
953             let layout = src.layout();
954             validate_atomic_type!(fx, intrinsic, span, layout.ty);
955             let ty = fx.clif_type(layout.ty).unwrap();
956
957             let src = src.load_scalar(fx);
958
959             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
960
961             let old = CValue::by_val(old, layout);
962             ret.write_cvalue(fx, old);
963         };
964         _ if intrinsic.starts_with("atomic_or"), (v ptr, c src) {
965             let layout = src.layout();
966             validate_atomic_type!(fx, intrinsic, span, layout.ty);
967             let ty = fx.clif_type(layout.ty).unwrap();
968
969             let src = src.load_scalar(fx);
970
971             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
972
973             let old = CValue::by_val(old, layout);
974             ret.write_cvalue(fx, old);
975         };
976         _ if intrinsic.starts_with("atomic_xor"), (v ptr, c src) {
977             let layout = src.layout();
978             validate_atomic_type!(fx, intrinsic, span, layout.ty);
979             let ty = fx.clif_type(layout.ty).unwrap();
980
981             let src = src.load_scalar(fx);
982
983             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
984
985             let old = CValue::by_val(old, layout);
986             ret.write_cvalue(fx, old);
987         };
988
989         // FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
990         _ if intrinsic.starts_with("atomic_nand"), (v ptr, c src) {
991             let layout = src.layout();
992             validate_atomic_type!(fx, intrinsic, span, layout.ty);
993             let ty = fx.clif_type(layout.ty).unwrap();
994
995             let src = src.load_scalar(fx);
996
997             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
998
999             let old = CValue::by_val(old, layout);
1000             ret.write_cvalue(fx, old);
1001         };
1002         _ if intrinsic.starts_with("atomic_max"), (v ptr, c src) {
1003             let layout = src.layout();
1004             validate_atomic_type!(fx, intrinsic, span, layout.ty);
1005             let ty = fx.clif_type(layout.ty).unwrap();
1006
1007             let src = src.load_scalar(fx);
1008
1009             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1010
1011             let old = CValue::by_val(old, layout);
1012             ret.write_cvalue(fx, old);
1013         };
1014         _ if intrinsic.starts_with("atomic_umax"), (v ptr, c src) {
1015             let layout = src.layout();
1016             validate_atomic_type!(fx, intrinsic, span, layout.ty);
1017             let ty = fx.clif_type(layout.ty).unwrap();
1018
1019             let src = src.load_scalar(fx);
1020
1021             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1022
1023             let old = CValue::by_val(old, layout);
1024             ret.write_cvalue(fx, old);
1025         };
1026         _ if intrinsic.starts_with("atomic_min"), (v ptr, c src) {
1027             let layout = src.layout();
1028             validate_atomic_type!(fx, intrinsic, span, layout.ty);
1029             let ty = fx.clif_type(layout.ty).unwrap();
1030
1031             let src = src.load_scalar(fx);
1032
1033             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1034
1035             let old = CValue::by_val(old, layout);
1036             ret.write_cvalue(fx, old);
1037         };
1038         _ if intrinsic.starts_with("atomic_umin"), (v ptr, c src) {
1039             let layout = src.layout();
1040             validate_atomic_type!(fx, intrinsic, span, layout.ty);
1041             let ty = fx.clif_type(layout.ty).unwrap();
1042
1043             let src = src.load_scalar(fx);
1044
1045             let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1046
1047             let old = CValue::by_val(old, layout);
1048             ret.write_cvalue(fx, old);
1049         };
1050
1051         minnumf32, (v a, v b) {
1052             let val = fx.bcx.ins().fmin(a, b);
1053             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1054             ret.write_cvalue(fx, val);
1055         };
1056         minnumf64, (v a, v b) {
1057             let val = fx.bcx.ins().fmin(a, b);
1058             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1059             ret.write_cvalue(fx, val);
1060         };
1061         maxnumf32, (v a, v b) {
1062             let val = fx.bcx.ins().fmax(a, b);
1063             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1064             ret.write_cvalue(fx, val);
1065         };
1066         maxnumf64, (v a, v b) {
1067             let val = fx.bcx.ins().fmax(a, b);
1068             let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1069             ret.write_cvalue(fx, val);
1070         };
1071
1072         try, (v f, v data, v _catch_fn) {
1073             // FIXME once unwinding is supported, change this to actually catch panics
1074             let f_sig = fx.bcx.func.import_signature(Signature {
1075                 call_conv: CallConv::triple_default(fx.triple()),
1076                 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1077                 returns: vec![],
1078             });
1079
1080             fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1081
1082             let layout = ret.layout();
1083             let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1084             ret.write_cvalue(fx, ret_val);
1085         };
1086
1087         fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
1088             let res = crate::num::codegen_float_binop(fx, match intrinsic {
1089                 "fadd_fast" => BinOp::Add,
1090                 "fsub_fast" => BinOp::Sub,
1091                 "fmul_fast" => BinOp::Mul,
1092                 "fdiv_fast" => BinOp::Div,
1093                 "frem_fast" => BinOp::Rem,
1094                 _ => unreachable!(),
1095             }, x, y);
1096             ret.write_cvalue(fx, res);
1097         };
1098         float_to_int_unchecked, (v f) {
1099             let res = crate::cast::clif_int_or_float_cast(
1100                 fx,
1101                 f,
1102                 false,
1103                 fx.clif_type(ret.layout().ty).unwrap(),
1104                 type_sign(ret.layout().ty),
1105             );
1106             ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1107         };
1108     }
1109
1110     if let Some((_, dest)) = destination {
1111         let ret_block = fx.get_block(dest);
1112         fx.bcx.ins().jump(ret_block, &[]);
1113     } else {
1114         trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
1115     }
1116 }