1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
8 pub(crate) use cpuid::codegen_cpuid_call;
9 pub(crate) use llvm::codegen_llvm_intrinsic_call;
11 use rustc_middle::ty::print::with_no_trimmed_paths;
12 use rustc_span::symbol::{kw, sym};
14 use crate::prelude::*;
15 use cranelift_codegen::ir::AtomicRmwOp;
33 (o $fx:expr, $arg:ident) => {
36 (c $fx:expr, $arg:ident) => {
37 codegen_operand($fx, $arg)
39 (v $fx:expr, $arg:ident) => {
40 codegen_operand($fx, $arg).load_scalar($fx)
44 macro intrinsic_substs {
45 ($substs:expr, $index:expr,) => {},
46 ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
47 let $first = $substs.type_at($index);
48 intrinsic_substs!($substs, $index+1, $($rest),*);
52 macro intrinsic_match {
53 ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
56 $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
58 let _ = $substs; // Silence warning when substs is unused.
61 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
62 #[allow(unused_parens, non_snake_case)]
65 intrinsic_substs!($substs, 0, $($subst),*);
67 if let [$($arg),*] = $args {
69 $(intrinsic_arg!($a $fx, $arg),)*
71 #[warn(unused_parens, non_snake_case)]
76 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
86 macro call_intrinsic_match {
87 ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
88 $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
93 assert!($substs.is_noop());
94 if let [$(ref $arg),*] = *$args {
96 $(codegen_operand($fx, $arg),)*
98 let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
99 $ret.write_cvalue($fx, res);
101 if let Some((_, dest)) = $destination {
102 let ret_block = $fx.get_block(dest);
103 $fx.bcx.ins().jump(ret_block, &[]);
109 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
118 macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
120 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
122 $fx.tcx.sess.span_err(
125 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
129 // Prevent verifier error
130 crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
136 macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
138 $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
139 // Prevent verifier error
140 crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
145 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
146 let (element, count) = match &layout.abi {
147 Abi::Vector { element, count } => (element.clone(), *count),
151 match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
152 // Cranelift currently only implements icmp for 128bit vectors.
153 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
158 fn simd_for_each_lane<'tcx>(
159 fx: &mut FunctionCx<'_, '_, 'tcx>,
163 &mut FunctionCx<'_, '_, 'tcx>,
169 let layout = val.layout();
171 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
172 let lane_layout = fx.layout_of(lane_ty);
173 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
174 let ret_lane_layout = fx.layout_of(ret_lane_ty);
175 assert_eq!(lane_count, ret_lane_count);
177 for lane_idx in 0..lane_count {
178 let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
179 let lane = val.value_field(fx, lane_idx).load_scalar(fx);
181 let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
183 ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
187 fn simd_pair_for_each_lane<'tcx>(
188 fx: &mut FunctionCx<'_, '_, 'tcx>,
193 &mut FunctionCx<'_, '_, 'tcx>,
200 assert_eq!(x.layout(), y.layout());
201 let layout = x.layout();
203 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
204 let lane_layout = fx.layout_of(lane_ty);
205 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
206 let ret_lane_layout = fx.layout_of(ret_lane_ty);
207 assert_eq!(lane_count, ret_lane_count);
209 for lane in 0..lane_count {
210 let lane = mir::Field::new(lane.try_into().unwrap());
211 let x_lane = x.value_field(fx, lane).load_scalar(fx);
212 let y_lane = y.value_field(fx, lane).load_scalar(fx);
214 let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
216 ret.place_field(fx, lane).write_cvalue(fx, res_lane);
220 fn simd_reduce<'tcx>(
221 fx: &mut FunctionCx<'_, '_, 'tcx>,
224 f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
226 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
227 let lane_layout = fx.layout_of(lane_ty);
228 assert_eq!(lane_layout, ret.layout());
230 let mut res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
231 for lane_idx in 1..lane_count {
233 val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
234 res_val = f(fx, lane_layout, res_val, lane);
236 let res = CValue::by_val(res_val, lane_layout);
237 ret.write_cvalue(fx, res);
240 fn simd_reduce_bool<'tcx>(
241 fx: &mut FunctionCx<'_, '_, 'tcx>,
244 f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
246 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
247 assert!(ret.layout().ty.is_bool());
249 let res_val = val.value_field(fx, mir::Field::new(0)).load_scalar(fx);
250 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
251 for lane_idx in 1..lane_count {
253 val.value_field(fx, mir::Field::new(lane_idx.try_into().unwrap())).load_scalar(fx);
254 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
255 res_val = f(fx, res_val, lane);
257 let res = CValue::by_val(res_val, ret.layout());
258 ret.write_cvalue(fx, res);
261 fn bool_to_zero_or_max_uint<'tcx>(
262 fx: &mut FunctionCx<'_, '_, 'tcx>,
263 layout: TyAndLayout<'tcx>,
266 let ty = fx.clif_type(layout.ty).unwrap();
268 let int_ty = match ty {
269 types::F32 => types::I32,
270 types::F64 => types::I64,
274 let val = fx.bcx.ins().bint(int_ty, val);
275 let mut res = fx.bcx.ins().ineg(val);
278 res = fx.bcx.ins().bitcast(ty, res);
281 CValue::by_val(res, layout)
285 ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
286 let vector_ty = clif_vector_type($fx.tcx, $x.layout());
288 if let Some(vector_ty) = vector_ty {
289 let x = $x.load_scalar($fx);
290 let y = $y.load_scalar($fx);
291 let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
293 // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
294 let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
296 $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
298 simd_pair_for_each_lane(
303 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
304 let res_lane = match lane_layout.ty.kind() {
305 ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
306 ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
307 _ => unreachable!("{:?}", lane_layout.ty),
309 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
314 ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
315 // FIXME use vector icmp when possible
316 simd_pair_for_each_lane(
321 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
322 let res_lane = match lane_layout.ty.kind() {
323 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
324 ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
325 ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
326 _ => unreachable!("{:?}", lane_layout.ty),
328 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
334 macro simd_int_binop {
335 ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
336 simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
338 ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
339 simd_pair_for_each_lane(
344 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
345 let res_lane = match lane_layout.ty.kind() {
346 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
347 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
348 _ => unreachable!("{:?}", lane_layout.ty),
350 CValue::by_val(res_lane, ret_lane_layout)
356 macro simd_int_flt_binop {
357 ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
358 simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
360 ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
361 simd_pair_for_each_lane(
366 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
367 let res_lane = match lane_layout.ty.kind() {
368 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
369 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
370 ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
371 _ => unreachable!("{:?}", lane_layout.ty),
373 CValue::by_val(res_lane, ret_lane_layout)
379 macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
380 simd_pair_for_each_lane(
385 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
386 let res_lane = match lane_layout.ty.kind() {
387 ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
388 _ => unreachable!("{:?}", lane_layout.ty),
390 CValue::by_val(res_lane, ret_lane_layout)
395 pub(crate) fn codegen_intrinsic_call<'tcx>(
396 fx: &mut FunctionCx<'_, '_, 'tcx>,
397 instance: Instance<'tcx>,
398 args: &[mir::Operand<'tcx>],
399 destination: Option<(CPlace<'tcx>, BasicBlock)>,
402 let def_id = instance.def_id();
403 let substs = instance.substs;
405 let intrinsic = fx.tcx.item_name(def_id);
407 let ret = match destination {
408 Some((place, _)) => place,
410 // Insert non returning intrinsics here
413 trap_abort(fx, "Called intrinsic::abort.");
416 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
418 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
424 if intrinsic.as_str().starts_with("simd_") {
425 self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
426 let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
427 fx.bcx.ins().jump(ret_block, &[]);
431 let usize_layout = fx.layout_of(fx.tcx.types.usize);
433 call_intrinsic_match! {
434 fx, intrinsic, substs, ret, destination, args,
435 expf32(flt) -> f32 => expf,
436 expf64(flt) -> f64 => exp,
437 exp2f32(flt) -> f32 => exp2f,
438 exp2f64(flt) -> f64 => exp2,
439 sqrtf32(flt) -> f32 => sqrtf,
440 sqrtf64(flt) -> f64 => sqrt,
441 powif32(a, x) -> f32 => __powisf2, // compiler-builtins
442 powif64(a, x) -> f64 => __powidf2, // compiler-builtins
443 powf32(a, x) -> f32 => powf,
444 powf64(a, x) -> f64 => pow,
445 logf32(flt) -> f32 => logf,
446 logf64(flt) -> f64 => log,
447 log2f32(flt) -> f32 => log2f,
448 log2f64(flt) -> f64 => log2,
449 log10f32(flt) -> f32 => log10f,
450 log10f64(flt) -> f64 => log10,
451 fabsf32(flt) -> f32 => fabsf,
452 fabsf64(flt) -> f64 => fabs,
453 fmaf32(x, y, z) -> f32 => fmaf,
454 fmaf64(x, y, z) -> f64 => fma,
455 copysignf32(x, y) -> f32 => copysignf,
456 copysignf64(x, y) -> f64 => copysign,
459 // FIXME use clif insts
460 floorf32(flt) -> f32 => floorf,
461 floorf64(flt) -> f64 => floor,
462 ceilf32(flt) -> f32 => ceilf,
463 ceilf64(flt) -> f64 => ceil,
464 truncf32(flt) -> f32 => truncf,
465 truncf64(flt) -> f64 => trunc,
466 roundf32(flt) -> f32 => roundf,
467 roundf64(flt) -> f64 => round,
470 sinf32(flt) -> f32 => sinf,
471 sinf64(flt) -> f64 => sin,
472 cosf32(flt) -> f32 => cosf,
473 cosf64(flt) -> f64 => cos,
477 fx, intrinsic, substs, args,
479 fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
483 likely | unlikely, (c a) {
484 ret.write_cvalue(fx, a);
487 fx.bcx.ins().debugtrap();
489 copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
490 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
491 assert_eq!(args.len(), 3);
492 let byte_amount = if elem_size != 1 {
493 fx.bcx.ins().imul_imm(count, elem_size as i64)
498 if intrinsic == sym::copy_nonoverlapping {
499 // FIXME emit_small_memcpy
500 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
502 // FIXME emit_small_memmove
503 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
506 // NOTE: the volatile variants have src and dst swapped
507 volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
508 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
509 assert_eq!(args.len(), 3);
510 let byte_amount = if elem_size != 1 {
511 fx.bcx.ins().imul_imm(count, elem_size as i64)
516 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
517 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
518 // FIXME emit_small_memcpy
519 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
521 // FIXME emit_small_memmove
522 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
525 size_of_val, <T> (c ptr) {
526 let layout = fx.layout_of(T);
527 let size = if layout.is_unsized() {
528 let (_ptr, info) = ptr.load_scalar_pair(fx);
529 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
535 .iconst(fx.pointer_type, layout.size.bytes() as i64)
537 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
539 min_align_of_val, <T> (c ptr) {
540 let layout = fx.layout_of(T);
541 let align = if layout.is_unsized() {
542 let (_ptr, info) = ptr.load_scalar_pair(fx);
543 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
549 .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
551 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
554 unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
555 | unchecked_shl | unchecked_shr, (c x, c y) {
556 // FIXME trap on overflow
557 let bin_op = match intrinsic {
558 sym::unchecked_add => BinOp::Add,
559 sym::unchecked_sub => BinOp::Sub,
560 sym::unchecked_div | sym::exact_div => BinOp::Div,
561 sym::unchecked_rem => BinOp::Rem,
562 sym::unchecked_shl => BinOp::Shl,
563 sym::unchecked_shr => BinOp::Shr,
566 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
567 ret.write_cvalue(fx, res);
569 add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
570 assert_eq!(x.layout().ty, y.layout().ty);
571 let bin_op = match intrinsic {
572 sym::add_with_overflow => BinOp::Add,
573 sym::sub_with_overflow => BinOp::Sub,
574 sym::mul_with_overflow => BinOp::Mul,
578 let res = crate::num::codegen_checked_int_binop(
584 ret.write_cvalue(fx, res);
586 saturating_add | saturating_sub, <T> (c lhs, c rhs) {
587 assert_eq!(lhs.layout().ty, rhs.layout().ty);
588 let bin_op = match intrinsic {
589 sym::saturating_add => BinOp::Add,
590 sym::saturating_sub => BinOp::Sub,
594 let signed = type_sign(T);
596 let checked_res = crate::num::codegen_checked_int_binop(
603 let (val, has_overflow) = checked_res.load_scalar_pair(fx);
604 let clif_ty = fx.clif_type(T).unwrap();
606 // `select.i8` is not implemented by Cranelift.
607 let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
609 let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
611 let val = match (intrinsic, signed) {
612 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
613 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
614 (sym::saturating_add, true) => {
615 let rhs = rhs.load_scalar(fx);
616 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
617 let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
618 fx.bcx.ins().select(has_overflow, sat_val, val)
620 (sym::saturating_sub, true) => {
621 let rhs = rhs.load_scalar(fx);
622 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
623 let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
624 fx.bcx.ins().select(has_overflow, sat_val, val)
629 let res = CValue::by_val(val, fx.layout_of(T));
631 ret.write_cvalue(fx, res);
633 rotate_left, <T>(v x, v y) {
634 let layout = fx.layout_of(T);
635 let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
636 fx.bcx.ins().ireduce(types::I64, y)
640 let res = fx.bcx.ins().rotl(x, y);
641 ret.write_cvalue(fx, CValue::by_val(res, layout));
643 rotate_right, <T>(v x, v y) {
644 let layout = fx.layout_of(T);
645 let y = if fx.bcx.func.dfg.value_type(y) == types::I128 {
646 fx.bcx.ins().ireduce(types::I64, y)
650 let res = fx.bcx.ins().rotr(x, y);
651 ret.write_cvalue(fx, CValue::by_val(res, layout));
654 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
655 // doesn't have UB both are codegen'ed the same way
656 offset | arith_offset, (c base, v offset) {
657 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
658 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
659 let ptr_diff = if pointee_size != 1 {
660 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
664 let base_val = base.load_scalar(fx);
665 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
666 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
669 transmute, (c from) {
670 ret.write_cvalue_transmute(fx, from);
672 write_bytes | volatile_set_memory, (c dst, v val, v count) {
673 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
674 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
675 let count = if pointee_size != 1 {
676 fx.bcx.ins().imul_imm(count, pointee_size as i64)
680 let dst_ptr = dst.load_scalar(fx);
681 // FIXME make the memset actually volatile when switching to emit_small_memset
682 // FIXME use emit_small_memset
683 fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
685 ctlz | ctlz_nonzero, <T> (v arg) {
686 // FIXME trap on `ctlz_nonzero` with zero arg.
687 let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
688 // FIXME verify this algorithm is correct
689 let (lsb, msb) = fx.bcx.ins().isplit(arg);
690 let lsb_lz = fx.bcx.ins().clz(lsb);
691 let msb_lz = fx.bcx.ins().clz(msb);
692 let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
693 let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
694 let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
695 fx.bcx.ins().uextend(types::I128, res)
697 fx.bcx.ins().clz(arg)
699 let res = CValue::by_val(res, fx.layout_of(T));
700 ret.write_cvalue(fx, res);
702 cttz | cttz_nonzero, <T> (v arg) {
703 // FIXME trap on `cttz_nonzero` with zero arg.
704 let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
705 // FIXME verify this algorithm is correct
706 let (lsb, msb) = fx.bcx.ins().isplit(arg);
707 let lsb_tz = fx.bcx.ins().ctz(lsb);
708 let msb_tz = fx.bcx.ins().ctz(msb);
709 let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
710 let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
711 let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
712 fx.bcx.ins().uextend(types::I128, res)
714 fx.bcx.ins().ctz(arg)
716 let res = CValue::by_val(res, fx.layout_of(T));
717 ret.write_cvalue(fx, res);
720 let res = fx.bcx.ins().popcnt(arg);
721 let res = CValue::by_val(res, fx.layout_of(T));
722 ret.write_cvalue(fx, res);
724 bitreverse, <T> (v arg) {
725 let res = fx.bcx.ins().bitrev(arg);
726 let res = CValue::by_val(res, fx.layout_of(T));
727 ret.write_cvalue(fx, res);
730 // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
731 fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
732 match bcx.func.dfg.value_type(v) {
735 // https://code.woboq.org/gcc/include/bits/byteswap.h.html
737 let tmp1 = bcx.ins().ishl_imm(v, 8);
738 let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
740 let tmp2 = bcx.ins().ushr_imm(v, 8);
741 let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
743 bcx.ins().bor(n1, n2)
746 let tmp1 = bcx.ins().ishl_imm(v, 24);
747 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
749 let tmp2 = bcx.ins().ishl_imm(v, 8);
750 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
752 let tmp3 = bcx.ins().ushr_imm(v, 8);
753 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
755 let tmp4 = bcx.ins().ushr_imm(v, 24);
756 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
758 let or_tmp1 = bcx.ins().bor(n1, n2);
759 let or_tmp2 = bcx.ins().bor(n3, n4);
760 bcx.ins().bor(or_tmp1, or_tmp2)
763 let tmp1 = bcx.ins().ishl_imm(v, 56);
764 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
766 let tmp2 = bcx.ins().ishl_imm(v, 40);
767 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
769 let tmp3 = bcx.ins().ishl_imm(v, 24);
770 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
772 let tmp4 = bcx.ins().ishl_imm(v, 8);
773 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
775 let tmp5 = bcx.ins().ushr_imm(v, 8);
776 let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
778 let tmp6 = bcx.ins().ushr_imm(v, 24);
779 let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
781 let tmp7 = bcx.ins().ushr_imm(v, 40);
782 let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
784 let tmp8 = bcx.ins().ushr_imm(v, 56);
785 let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
787 let or_tmp1 = bcx.ins().bor(n1, n2);
788 let or_tmp2 = bcx.ins().bor(n3, n4);
789 let or_tmp3 = bcx.ins().bor(n5, n6);
790 let or_tmp4 = bcx.ins().bor(n7, n8);
792 let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
793 let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
794 bcx.ins().bor(or_tmp5, or_tmp6)
797 let (lo, hi) = bcx.ins().isplit(v);
798 let lo = swap(bcx, lo);
799 let hi = swap(bcx, hi);
800 bcx.ins().iconcat(hi, lo)
802 ty => unreachable!("bswap {}", ty),
805 let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
806 ret.write_cvalue(fx, res);
808 assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
809 let layout = fx.layout_of(T);
810 if layout.abi.is_uninhabited() {
811 with_no_trimmed_paths(|| crate::base::codegen_panic(
813 &format!("attempted to instantiate uninhabited type `{}`", T),
819 if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
820 with_no_trimmed_paths(|| crate::base::codegen_panic(
822 &format!("attempted to zero-initialize type `{}`, which is invalid", T),
828 if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
829 with_no_trimmed_paths(|| crate::base::codegen_panic(
831 &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
838 volatile_load | unaligned_volatile_load, (c ptr) {
839 // Cranelift treats loads as volatile by default
840 // FIXME correctly handle unaligned_volatile_load
842 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
843 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
844 ret.write_cvalue(fx, val);
846 volatile_store | unaligned_volatile_store, (v ptr, c val) {
847 // Cranelift treats stores as volatile by default
848 // FIXME correctly handle unaligned_volatile_store
849 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
850 dest.write_cvalue(fx, val);
853 pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
855 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
856 let val = crate::constant::codegen_const_value(
861 ret.write_cvalue(fx, val);
864 ptr_offset_from, <T> (v ptr, v base) {
865 let isize_layout = fx.layout_of(fx.tcx.types.isize);
867 let pointee_size: u64 = fx.layout_of(T).size.bytes();
868 let diff = fx.bcx.ins().isub(ptr, base);
869 // FIXME this can be an exact division.
870 let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
871 ret.write_cvalue(fx, val);
874 ptr_guaranteed_eq, (c a, c b) {
875 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
876 ret.write_cvalue(fx, val);
879 ptr_guaranteed_ne, (c a, c b) {
880 let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
881 ret.write_cvalue(fx, val);
884 caller_location, () {
885 let caller_location = fx.get_caller_location(span);
886 ret.write_cvalue(fx, caller_location);
889 _ if intrinsic.as_str().starts_with("atomic_fence"), () {
890 fx.bcx.ins().fence();
892 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
893 // FIXME use a compiler fence once Cranelift supports it
894 fx.bcx.ins().fence();
896 _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
897 validate_atomic_type!(fx, intrinsic, span, T);
898 let ty = fx.clif_type(T).unwrap();
900 let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
902 let val = CValue::by_val(val, fx.layout_of(T));
903 ret.write_cvalue(fx, val);
905 _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
906 validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
908 let val = val.load_scalar(fx);
910 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
912 _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
913 let layout = new.layout();
914 validate_atomic_type!(fx, intrinsic, span, layout.ty);
915 let ty = fx.clif_type(layout.ty).unwrap();
917 let new = new.load_scalar(fx);
919 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
921 let old = CValue::by_val(old, layout);
922 ret.write_cvalue(fx, old);
924 _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
925 let layout = new.layout();
926 validate_atomic_type!(fx, intrinsic, span, layout.ty);
928 let test_old = test_old.load_scalar(fx);
929 let new = new.load_scalar(fx);
931 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
932 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
934 let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
935 ret.write_cvalue(fx, ret_val)
938 _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
939 let layout = amount.layout();
940 validate_atomic_type!(fx, intrinsic, span, layout.ty);
941 let ty = fx.clif_type(layout.ty).unwrap();
943 let amount = amount.load_scalar(fx);
945 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
947 let old = CValue::by_val(old, layout);
948 ret.write_cvalue(fx, old);
950 _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
951 let layout = amount.layout();
952 validate_atomic_type!(fx, intrinsic, span, layout.ty);
953 let ty = fx.clif_type(layout.ty).unwrap();
955 let amount = amount.load_scalar(fx);
957 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
959 let old = CValue::by_val(old, layout);
960 ret.write_cvalue(fx, old);
962 _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
963 let layout = src.layout();
964 validate_atomic_type!(fx, intrinsic, span, layout.ty);
965 let ty = fx.clif_type(layout.ty).unwrap();
967 let src = src.load_scalar(fx);
969 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
971 let old = CValue::by_val(old, layout);
972 ret.write_cvalue(fx, old);
974 _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
975 let layout = src.layout();
976 validate_atomic_type!(fx, intrinsic, span, layout.ty);
977 let ty = fx.clif_type(layout.ty).unwrap();
979 let src = src.load_scalar(fx);
981 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
983 let old = CValue::by_val(old, layout);
984 ret.write_cvalue(fx, old);
986 _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
987 let layout = src.layout();
988 validate_atomic_type!(fx, intrinsic, span, layout.ty);
989 let ty = fx.clif_type(layout.ty).unwrap();
991 let src = src.load_scalar(fx);
993 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
995 let old = CValue::by_val(old, layout);
996 ret.write_cvalue(fx, old);
999 // FIXME https://github.com/bytecodealliance/wasmtime/issues/2647
1000 _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
1001 let layout = src.layout();
1002 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1003 let ty = fx.clif_type(layout.ty).unwrap();
1005 let src = src.load_scalar(fx);
1007 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1009 let old = CValue::by_val(old, layout);
1010 ret.write_cvalue(fx, old);
1012 _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
1013 let layout = src.layout();
1014 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1015 let ty = fx.clif_type(layout.ty).unwrap();
1017 let src = src.load_scalar(fx);
1019 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1021 let old = CValue::by_val(old, layout);
1022 ret.write_cvalue(fx, old);
1024 _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
1025 let layout = src.layout();
1026 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1027 let ty = fx.clif_type(layout.ty).unwrap();
1029 let src = src.load_scalar(fx);
1031 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1033 let old = CValue::by_val(old, layout);
1034 ret.write_cvalue(fx, old);
1036 _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
1037 let layout = src.layout();
1038 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1039 let ty = fx.clif_type(layout.ty).unwrap();
1041 let src = src.load_scalar(fx);
1043 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1045 let old = CValue::by_val(old, layout);
1046 ret.write_cvalue(fx, old);
1048 _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
1049 let layout = src.layout();
1050 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1051 let ty = fx.clif_type(layout.ty).unwrap();
1053 let src = src.load_scalar(fx);
1055 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1057 let old = CValue::by_val(old, layout);
1058 ret.write_cvalue(fx, old);
1061 minnumf32, (v a, v b) {
1062 let val = fx.bcx.ins().fmin(a, b);
1063 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1064 ret.write_cvalue(fx, val);
1066 minnumf64, (v a, v b) {
1067 let val = fx.bcx.ins().fmin(a, b);
1068 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1069 ret.write_cvalue(fx, val);
1071 maxnumf32, (v a, v b) {
1072 let val = fx.bcx.ins().fmax(a, b);
1073 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1074 ret.write_cvalue(fx, val);
1076 maxnumf64, (v a, v b) {
1077 let val = fx.bcx.ins().fmax(a, b);
1078 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1079 ret.write_cvalue(fx, val);
1082 kw.Try, (v f, v data, v _catch_fn) {
1083 // FIXME once unwinding is supported, change this to actually catch panics
1084 let f_sig = fx.bcx.func.import_signature(Signature {
1085 call_conv: CallConv::triple_default(fx.triple()),
1086 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1090 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1092 let layout = ret.layout();
1093 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1094 ret.write_cvalue(fx, ret_val);
1097 fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
1098 let res = crate::num::codegen_float_binop(fx, match intrinsic {
1099 sym::fadd_fast => BinOp::Add,
1100 sym::fsub_fast => BinOp::Sub,
1101 sym::fmul_fast => BinOp::Mul,
1102 sym::fdiv_fast => BinOp::Div,
1103 sym::frem_fast => BinOp::Rem,
1104 _ => unreachable!(),
1106 ret.write_cvalue(fx, res);
1108 float_to_int_unchecked, (v f) {
1109 let res = crate::cast::clif_int_or_float_cast(
1113 fx.clif_type(ret.layout().ty).unwrap(),
1114 type_sign(ret.layout().ty),
1116 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1120 if let Some((_, dest)) = destination {
1121 let ret_block = fx.get_block(dest);
1122 fx.bcx.ins().jump(ret_block, &[]);
1124 trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");