1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
8 pub(crate) use cpuid::codegen_cpuid_call;
9 pub(crate) use llvm::codegen_llvm_intrinsic_call;
11 use rustc_middle::ty::print::with_no_trimmed_paths;
12 use rustc_span::symbol::{kw, sym};
14 use crate::prelude::*;
15 use cranelift_codegen::ir::AtomicRmwOp;
33 (o $fx:expr, $arg:ident) => {
36 (c $fx:expr, $arg:ident) => {
37 codegen_operand($fx, $arg)
39 (v $fx:expr, $arg:ident) => {
40 codegen_operand($fx, $arg).load_scalar($fx)
44 macro intrinsic_substs {
45 ($substs:expr, $index:expr,) => {},
46 ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
47 let $first = $substs.type_at($index);
48 intrinsic_substs!($substs, $index+1, $($rest),*);
52 macro intrinsic_match {
53 ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
56 $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
58 let _ = $substs; // Silence warning when substs is unused.
61 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
62 #[allow(unused_parens, non_snake_case)]
65 intrinsic_substs!($substs, 0, $($subst),*);
67 if let [$($arg),*] = $args {
69 $(intrinsic_arg!($a $fx, $arg),)*
71 #[warn(unused_parens, non_snake_case)]
76 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
86 macro call_intrinsic_match {
87 ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
88 $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
93 assert!($substs.is_noop());
94 if let [$(ref $arg),*] = *$args {
96 $(codegen_operand($fx, $arg),)*
98 let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
99 $ret.write_cvalue($fx, res);
101 if let Some((_, dest)) = $destination {
102 let ret_block = $fx.get_block(dest);
103 $fx.bcx.ins().jump(ret_block, &[]);
109 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
118 macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
120 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
122 $fx.tcx.sess.span_err(
125 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
129 // Prevent verifier error
130 crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
136 macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
138 $fx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
139 // Prevent verifier error
140 crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
145 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
146 let (element, count) = match &layout.abi {
147 Abi::Vector { element, count } => (element.clone(), *count),
151 match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
152 // Cranelift currently only implements icmp for 128bit vectors.
153 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
158 fn simd_for_each_lane<'tcx>(
159 fx: &mut FunctionCx<'_, '_, 'tcx>,
163 &mut FunctionCx<'_, '_, 'tcx>,
169 let layout = val.layout();
171 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
172 let lane_layout = fx.layout_of(lane_ty);
173 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
174 let ret_lane_layout = fx.layout_of(ret_lane_ty);
175 assert_eq!(lane_count, ret_lane_count);
177 for lane_idx in 0..lane_count {
178 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
180 let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
182 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
186 fn simd_pair_for_each_lane<'tcx>(
187 fx: &mut FunctionCx<'_, '_, 'tcx>,
192 &mut FunctionCx<'_, '_, 'tcx>,
199 assert_eq!(x.layout(), y.layout());
200 let layout = x.layout();
202 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
203 let lane_layout = fx.layout_of(lane_ty);
204 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
205 let ret_lane_layout = fx.layout_of(ret_lane_ty);
206 assert_eq!(lane_count, ret_lane_count);
208 for lane_idx in 0..lane_count {
209 let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
210 let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
212 let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
214 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
218 fn simd_reduce<'tcx>(
219 fx: &mut FunctionCx<'_, '_, 'tcx>,
223 f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
225 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
226 let lane_layout = fx.layout_of(lane_ty);
227 assert_eq!(lane_layout, ret.layout());
229 let (mut res_val, start_lane) =
230 if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
231 for lane_idx in start_lane..lane_count {
232 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
233 res_val = f(fx, lane_layout, res_val, lane);
235 let res = CValue::by_val(res_val, lane_layout);
236 ret.write_cvalue(fx, res);
239 // FIXME move all uses to `simd_reduce`
240 fn simd_reduce_bool<'tcx>(
241 fx: &mut FunctionCx<'_, '_, 'tcx>,
244 f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
246 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
247 assert!(ret.layout().ty.is_bool());
249 let res_val = val.value_lane(fx, 0).load_scalar(fx);
250 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
251 for lane_idx in 1..lane_count {
252 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
253 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
254 res_val = f(fx, res_val, lane);
256 let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
257 fx.bcx.ins().ireduce(types::I8, res_val)
261 let res = CValue::by_val(res_val, ret.layout());
262 ret.write_cvalue(fx, res);
265 fn bool_to_zero_or_max_uint<'tcx>(
266 fx: &mut FunctionCx<'_, '_, 'tcx>,
267 layout: TyAndLayout<'tcx>,
270 let ty = fx.clif_type(layout.ty).unwrap();
272 let int_ty = match ty {
273 types::F32 => types::I32,
274 types::F64 => types::I64,
278 let val = fx.bcx.ins().bint(int_ty, val);
279 let mut res = fx.bcx.ins().ineg(val);
282 res = fx.bcx.ins().bitcast(ty, res);
285 CValue::by_val(res, layout)
289 ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
290 let vector_ty = clif_vector_type($fx.tcx, $x.layout());
292 if let Some(vector_ty) = vector_ty {
293 let x = $x.load_scalar($fx);
294 let y = $y.load_scalar($fx);
295 let val = if vector_ty.lane_type().is_float() {
296 $fx.bcx.ins().fcmp(FloatCC::$cc_f, x, y)
298 $fx.bcx.ins().icmp(IntCC::$cc, x, y)
301 // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
302 let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
304 $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
306 simd_pair_for_each_lane(
311 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
312 let res_lane = match lane_layout.ty.kind() {
313 ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
314 ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
315 _ => unreachable!("{:?}", lane_layout.ty),
317 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
322 ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
323 // FIXME use vector icmp when possible
324 simd_pair_for_each_lane(
329 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
330 let res_lane = match lane_layout.ty.kind() {
331 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
332 ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
333 ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
334 _ => unreachable!("{:?}", lane_layout.ty),
336 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
342 macro simd_int_binop {
343 ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
344 simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
346 ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
347 simd_pair_for_each_lane(
352 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
353 let res_lane = match lane_layout.ty.kind() {
354 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
355 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
356 _ => unreachable!("{:?}", lane_layout.ty),
358 CValue::by_val(res_lane, ret_lane_layout)
364 macro simd_int_flt_binop {
365 ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
366 simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
368 ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
369 simd_pair_for_each_lane(
374 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
375 let res_lane = match lane_layout.ty.kind() {
376 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
377 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
378 ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
379 _ => unreachable!("{:?}", lane_layout.ty),
381 CValue::by_val(res_lane, ret_lane_layout)
387 macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
388 simd_pair_for_each_lane(
393 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
394 let res_lane = match lane_layout.ty.kind() {
395 ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
396 _ => unreachable!("{:?}", lane_layout.ty),
398 CValue::by_val(res_lane, ret_lane_layout)
403 pub(crate) fn codegen_intrinsic_call<'tcx>(
404 fx: &mut FunctionCx<'_, '_, 'tcx>,
405 instance: Instance<'tcx>,
406 args: &[mir::Operand<'tcx>],
407 destination: Option<(CPlace<'tcx>, BasicBlock)>,
410 let def_id = instance.def_id();
411 let substs = instance.substs;
413 let intrinsic = fx.tcx.item_name(def_id);
415 let ret = match destination {
416 Some((place, _)) => place,
418 // Insert non returning intrinsics here
421 trap_abort(fx, "Called intrinsic::abort.");
424 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
426 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
432 if intrinsic.as_str().starts_with("simd_") {
433 self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
434 let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
435 fx.bcx.ins().jump(ret_block, &[]);
439 let usize_layout = fx.layout_of(fx.tcx.types.usize);
441 call_intrinsic_match! {
442 fx, intrinsic, substs, ret, destination, args,
443 expf32(flt) -> f32 => expf,
444 expf64(flt) -> f64 => exp,
445 exp2f32(flt) -> f32 => exp2f,
446 exp2f64(flt) -> f64 => exp2,
447 sqrtf32(flt) -> f32 => sqrtf,
448 sqrtf64(flt) -> f64 => sqrt,
449 powif32(a, x) -> f32 => __powisf2, // compiler-builtins
450 powif64(a, x) -> f64 => __powidf2, // compiler-builtins
451 powf32(a, x) -> f32 => powf,
452 powf64(a, x) -> f64 => pow,
453 logf32(flt) -> f32 => logf,
454 logf64(flt) -> f64 => log,
455 log2f32(flt) -> f32 => log2f,
456 log2f64(flt) -> f64 => log2,
457 log10f32(flt) -> f32 => log10f,
458 log10f64(flt) -> f64 => log10,
459 fabsf32(flt) -> f32 => fabsf,
460 fabsf64(flt) -> f64 => fabs,
461 fmaf32(x, y, z) -> f32 => fmaf,
462 fmaf64(x, y, z) -> f64 => fma,
463 copysignf32(x, y) -> f32 => copysignf,
464 copysignf64(x, y) -> f64 => copysign,
467 // FIXME use clif insts
468 floorf32(flt) -> f32 => floorf,
469 floorf64(flt) -> f64 => floor,
470 ceilf32(flt) -> f32 => ceilf,
471 ceilf64(flt) -> f64 => ceil,
472 truncf32(flt) -> f32 => truncf,
473 truncf64(flt) -> f64 => trunc,
474 roundf32(flt) -> f32 => roundf,
475 roundf64(flt) -> f64 => round,
478 sinf32(flt) -> f32 => sinf,
479 sinf64(flt) -> f64 => sin,
480 cosf32(flt) -> f32 => cosf,
481 cosf64(flt) -> f64 => cos,
485 fx, intrinsic, substs, args,
487 fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
491 likely | unlikely, (c a) {
492 ret.write_cvalue(fx, a);
495 fx.bcx.ins().debugtrap();
497 copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
498 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
499 assert_eq!(args.len(), 3);
500 let byte_amount = if elem_size != 1 {
501 fx.bcx.ins().imul_imm(count, elem_size as i64)
506 if intrinsic == sym::copy_nonoverlapping {
507 // FIXME emit_small_memcpy
508 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
510 // FIXME emit_small_memmove
511 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
514 // NOTE: the volatile variants have src and dst swapped
515 volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
516 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
517 assert_eq!(args.len(), 3);
518 let byte_amount = if elem_size != 1 {
519 fx.bcx.ins().imul_imm(count, elem_size as i64)
524 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
525 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
526 // FIXME emit_small_memcpy
527 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
529 // FIXME emit_small_memmove
530 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
533 size_of_val, <T> (c ptr) {
534 let layout = fx.layout_of(T);
535 let size = if layout.is_unsized() {
536 let (_ptr, info) = ptr.load_scalar_pair(fx);
537 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
543 .iconst(fx.pointer_type, layout.size.bytes() as i64)
545 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
547 min_align_of_val, <T> (c ptr) {
548 let layout = fx.layout_of(T);
549 let align = if layout.is_unsized() {
550 let (_ptr, info) = ptr.load_scalar_pair(fx);
551 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
557 .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
559 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
562 unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
563 | unchecked_shl | unchecked_shr, (c x, c y) {
564 // FIXME trap on overflow
565 let bin_op = match intrinsic {
566 sym::unchecked_add => BinOp::Add,
567 sym::unchecked_sub => BinOp::Sub,
568 sym::unchecked_div | sym::exact_div => BinOp::Div,
569 sym::unchecked_rem => BinOp::Rem,
570 sym::unchecked_shl => BinOp::Shl,
571 sym::unchecked_shr => BinOp::Shr,
574 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
575 ret.write_cvalue(fx, res);
577 add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
578 assert_eq!(x.layout().ty, y.layout().ty);
579 let bin_op = match intrinsic {
580 sym::add_with_overflow => BinOp::Add,
581 sym::sub_with_overflow => BinOp::Sub,
582 sym::mul_with_overflow => BinOp::Mul,
586 let res = crate::num::codegen_checked_int_binop(
592 ret.write_cvalue(fx, res);
594 saturating_add | saturating_sub, <T> (c lhs, c rhs) {
595 assert_eq!(lhs.layout().ty, rhs.layout().ty);
596 let bin_op = match intrinsic {
597 sym::saturating_add => BinOp::Add,
598 sym::saturating_sub => BinOp::Sub,
602 let signed = type_sign(T);
604 let checked_res = crate::num::codegen_checked_int_binop(
611 let (val, has_overflow) = checked_res.load_scalar_pair(fx);
612 let clif_ty = fx.clif_type(T).unwrap();
614 let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
616 let val = match (intrinsic, signed) {
617 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
618 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
619 (sym::saturating_add, true) => {
620 let rhs = rhs.load_scalar(fx);
621 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
622 let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
623 fx.bcx.ins().select(has_overflow, sat_val, val)
625 (sym::saturating_sub, true) => {
626 let rhs = rhs.load_scalar(fx);
627 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
628 let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
629 fx.bcx.ins().select(has_overflow, sat_val, val)
634 let res = CValue::by_val(val, fx.layout_of(T));
636 ret.write_cvalue(fx, res);
638 rotate_left, <T>(v x, v y) {
639 let layout = fx.layout_of(T);
640 let res = fx.bcx.ins().rotl(x, y);
641 ret.write_cvalue(fx, CValue::by_val(res, layout));
643 rotate_right, <T>(v x, v y) {
644 let layout = fx.layout_of(T);
645 let res = fx.bcx.ins().rotr(x, y);
646 ret.write_cvalue(fx, CValue::by_val(res, layout));
649 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
650 // doesn't have UB both are codegen'ed the same way
651 offset | arith_offset, (c base, v offset) {
652 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
653 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
654 let ptr_diff = if pointee_size != 1 {
655 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
659 let base_val = base.load_scalar(fx);
660 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
661 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
664 transmute, (c from) {
665 ret.write_cvalue_transmute(fx, from);
667 write_bytes | volatile_set_memory, (c dst, v val, v count) {
668 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
669 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
670 let count = if pointee_size != 1 {
671 fx.bcx.ins().imul_imm(count, pointee_size as i64)
675 let dst_ptr = dst.load_scalar(fx);
676 // FIXME make the memset actually volatile when switching to emit_small_memset
677 // FIXME use emit_small_memset
678 fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
680 ctlz | ctlz_nonzero, <T> (v arg) {
681 // FIXME trap on `ctlz_nonzero` with zero arg.
682 let res = fx.bcx.ins().clz(arg);
683 let res = CValue::by_val(res, fx.layout_of(T));
684 ret.write_cvalue(fx, res);
686 cttz | cttz_nonzero, <T> (v arg) {
687 // FIXME trap on `cttz_nonzero` with zero arg.
688 let res = fx.bcx.ins().ctz(arg);
689 let res = CValue::by_val(res, fx.layout_of(T));
690 ret.write_cvalue(fx, res);
693 let res = fx.bcx.ins().popcnt(arg);
694 let res = CValue::by_val(res, fx.layout_of(T));
695 ret.write_cvalue(fx, res);
697 bitreverse, <T> (v arg) {
698 let res = fx.bcx.ins().bitrev(arg);
699 let res = CValue::by_val(res, fx.layout_of(T));
700 ret.write_cvalue(fx, res);
703 // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
704 fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
705 match bcx.func.dfg.value_type(v) {
708 // https://code.woboq.org/gcc/include/bits/byteswap.h.html
710 let tmp1 = bcx.ins().ishl_imm(v, 8);
711 let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
713 let tmp2 = bcx.ins().ushr_imm(v, 8);
714 let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
716 bcx.ins().bor(n1, n2)
719 let tmp1 = bcx.ins().ishl_imm(v, 24);
720 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
722 let tmp2 = bcx.ins().ishl_imm(v, 8);
723 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
725 let tmp3 = bcx.ins().ushr_imm(v, 8);
726 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
728 let tmp4 = bcx.ins().ushr_imm(v, 24);
729 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
731 let or_tmp1 = bcx.ins().bor(n1, n2);
732 let or_tmp2 = bcx.ins().bor(n3, n4);
733 bcx.ins().bor(or_tmp1, or_tmp2)
736 let tmp1 = bcx.ins().ishl_imm(v, 56);
737 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
739 let tmp2 = bcx.ins().ishl_imm(v, 40);
740 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
742 let tmp3 = bcx.ins().ishl_imm(v, 24);
743 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
745 let tmp4 = bcx.ins().ishl_imm(v, 8);
746 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
748 let tmp5 = bcx.ins().ushr_imm(v, 8);
749 let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
751 let tmp6 = bcx.ins().ushr_imm(v, 24);
752 let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
754 let tmp7 = bcx.ins().ushr_imm(v, 40);
755 let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
757 let tmp8 = bcx.ins().ushr_imm(v, 56);
758 let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
760 let or_tmp1 = bcx.ins().bor(n1, n2);
761 let or_tmp2 = bcx.ins().bor(n3, n4);
762 let or_tmp3 = bcx.ins().bor(n5, n6);
763 let or_tmp4 = bcx.ins().bor(n7, n8);
765 let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
766 let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
767 bcx.ins().bor(or_tmp5, or_tmp6)
770 let (lo, hi) = bcx.ins().isplit(v);
771 let lo = swap(bcx, lo);
772 let hi = swap(bcx, hi);
773 bcx.ins().iconcat(hi, lo)
775 ty => unreachable!("bswap {}", ty),
778 let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
779 ret.write_cvalue(fx, res);
781 assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
782 let layout = fx.layout_of(T);
783 if layout.abi.is_uninhabited() {
784 with_no_trimmed_paths(|| crate::base::codegen_panic(
786 &format!("attempted to instantiate uninhabited type `{}`", T),
792 if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
793 with_no_trimmed_paths(|| crate::base::codegen_panic(
795 &format!("attempted to zero-initialize type `{}`, which is invalid", T),
801 if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
802 with_no_trimmed_paths(|| crate::base::codegen_panic(
804 &format!("attempted to leave type `{}` uninitialized, which is invalid", T),
811 volatile_load | unaligned_volatile_load, (c ptr) {
812 // Cranelift treats loads as volatile by default
813 // FIXME correctly handle unaligned_volatile_load
815 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
816 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
817 ret.write_cvalue(fx, val);
819 volatile_store | unaligned_volatile_store, (v ptr, c val) {
820 // Cranelift treats stores as volatile by default
821 // FIXME correctly handle unaligned_volatile_store
822 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
823 dest.write_cvalue(fx, val);
826 pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
828 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
829 let val = crate::constant::codegen_const_value(
834 ret.write_cvalue(fx, val);
837 ptr_offset_from, <T> (v ptr, v base) {
838 let isize_layout = fx.layout_of(fx.tcx.types.isize);
840 let pointee_size: u64 = fx.layout_of(T).size.bytes();
841 let diff = fx.bcx.ins().isub(ptr, base);
842 // FIXME this can be an exact division.
843 let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
844 ret.write_cvalue(fx, val);
847 ptr_guaranteed_eq, (c a, c b) {
848 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
849 ret.write_cvalue(fx, val);
852 ptr_guaranteed_ne, (c a, c b) {
853 let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
854 ret.write_cvalue(fx, val);
857 caller_location, () {
858 let caller_location = fx.get_caller_location(span);
859 ret.write_cvalue(fx, caller_location);
862 _ if intrinsic.as_str().starts_with("atomic_fence"), () {
863 fx.bcx.ins().fence();
865 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
866 // FIXME use a compiler fence once Cranelift supports it
867 fx.bcx.ins().fence();
869 _ if intrinsic.as_str().starts_with("atomic_load"), <T> (v ptr) {
870 validate_atomic_type!(fx, intrinsic, span, T);
871 let ty = fx.clif_type(T).unwrap();
873 let val = fx.bcx.ins().atomic_load(ty, MemFlags::trusted(), ptr);
875 let val = CValue::by_val(val, fx.layout_of(T));
876 ret.write_cvalue(fx, val);
878 _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
879 validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
881 let val = val.load_scalar(fx);
883 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
885 _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
886 let layout = new.layout();
887 validate_atomic_type!(fx, intrinsic, span, layout.ty);
888 let ty = fx.clif_type(layout.ty).unwrap();
890 let new = new.load_scalar(fx);
892 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
894 let old = CValue::by_val(old, layout);
895 ret.write_cvalue(fx, old);
897 _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
898 let layout = new.layout();
899 validate_atomic_type!(fx, intrinsic, span, layout.ty);
901 let test_old = test_old.load_scalar(fx);
902 let new = new.load_scalar(fx);
904 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
905 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
907 let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
908 ret.write_cvalue(fx, ret_val)
911 _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
912 let layout = amount.layout();
913 validate_atomic_type!(fx, intrinsic, span, layout.ty);
914 let ty = fx.clif_type(layout.ty).unwrap();
916 let amount = amount.load_scalar(fx);
918 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
920 let old = CValue::by_val(old, layout);
921 ret.write_cvalue(fx, old);
923 _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
924 let layout = amount.layout();
925 validate_atomic_type!(fx, intrinsic, span, layout.ty);
926 let ty = fx.clif_type(layout.ty).unwrap();
928 let amount = amount.load_scalar(fx);
930 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
932 let old = CValue::by_val(old, layout);
933 ret.write_cvalue(fx, old);
935 _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
936 let layout = src.layout();
937 validate_atomic_type!(fx, intrinsic, span, layout.ty);
938 let ty = fx.clif_type(layout.ty).unwrap();
940 let src = src.load_scalar(fx);
942 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
944 let old = CValue::by_val(old, layout);
945 ret.write_cvalue(fx, old);
947 _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
948 let layout = src.layout();
949 validate_atomic_type!(fx, intrinsic, span, layout.ty);
950 let ty = fx.clif_type(layout.ty).unwrap();
952 let src = src.load_scalar(fx);
954 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
956 let old = CValue::by_val(old, layout);
957 ret.write_cvalue(fx, old);
959 _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
960 let layout = src.layout();
961 validate_atomic_type!(fx, intrinsic, span, layout.ty);
962 let ty = fx.clif_type(layout.ty).unwrap();
964 let src = src.load_scalar(fx);
966 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
968 let old = CValue::by_val(old, layout);
969 ret.write_cvalue(fx, old);
971 _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
972 let layout = src.layout();
973 validate_atomic_type!(fx, intrinsic, span, layout.ty);
974 let ty = fx.clif_type(layout.ty).unwrap();
976 let src = src.load_scalar(fx);
978 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
980 let old = CValue::by_val(old, layout);
981 ret.write_cvalue(fx, old);
983 _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
984 let layout = src.layout();
985 validate_atomic_type!(fx, intrinsic, span, layout.ty);
986 let ty = fx.clif_type(layout.ty).unwrap();
988 let src = src.load_scalar(fx);
990 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
992 let old = CValue::by_val(old, layout);
993 ret.write_cvalue(fx, old);
995 _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
996 let layout = src.layout();
997 validate_atomic_type!(fx, intrinsic, span, layout.ty);
998 let ty = fx.clif_type(layout.ty).unwrap();
1000 let src = src.load_scalar(fx);
1002 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1004 let old = CValue::by_val(old, layout);
1005 ret.write_cvalue(fx, old);
1007 _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
1008 let layout = src.layout();
1009 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1010 let ty = fx.clif_type(layout.ty).unwrap();
1012 let src = src.load_scalar(fx);
1014 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1016 let old = CValue::by_val(old, layout);
1017 ret.write_cvalue(fx, old);
1019 _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
1020 let layout = src.layout();
1021 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1022 let ty = fx.clif_type(layout.ty).unwrap();
1024 let src = src.load_scalar(fx);
1026 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1028 let old = CValue::by_val(old, layout);
1029 ret.write_cvalue(fx, old);
1032 // In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
1033 // For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
1034 // and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
1035 // a float against itself. Only in case of NaN is it not equal to itself.
1036 minnumf32, (v a, v b) {
1037 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1038 let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
1039 let temp = fx.bcx.ins().select(a_ge_b, b, a);
1040 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1041 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1042 ret.write_cvalue(fx, val);
1044 minnumf64, (v a, v b) {
1045 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1046 let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
1047 let temp = fx.bcx.ins().select(a_ge_b, b, a);
1048 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1049 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1050 ret.write_cvalue(fx, val);
1052 maxnumf32, (v a, v b) {
1053 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1054 let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
1055 let temp = fx.bcx.ins().select(a_le_b, b, a);
1056 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1057 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1058 ret.write_cvalue(fx, val);
1060 maxnumf64, (v a, v b) {
1061 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1062 let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
1063 let temp = fx.bcx.ins().select(a_le_b, b, a);
1064 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1065 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1066 ret.write_cvalue(fx, val);
1069 kw.Try, (v f, v data, v _catch_fn) {
1070 // FIXME once unwinding is supported, change this to actually catch panics
1071 let f_sig = fx.bcx.func.import_signature(Signature {
1072 call_conv: CallConv::triple_default(fx.triple()),
1073 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1077 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1079 let layout = ret.layout();
1080 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1081 ret.write_cvalue(fx, ret_val);
1084 fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
1085 let res = crate::num::codegen_float_binop(fx, match intrinsic {
1086 sym::fadd_fast => BinOp::Add,
1087 sym::fsub_fast => BinOp::Sub,
1088 sym::fmul_fast => BinOp::Mul,
1089 sym::fdiv_fast => BinOp::Div,
1090 sym::frem_fast => BinOp::Rem,
1091 _ => unreachable!(),
1093 ret.write_cvalue(fx, res);
1095 float_to_int_unchecked, (v f) {
1096 let res = crate::cast::clif_int_or_float_cast(
1100 fx.clif_type(ret.layout().ty).unwrap(),
1101 type_sign(ret.layout().ty),
1103 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1106 raw_eq, <T>(v lhs_ref, v rhs_ref) {
1107 fn type_by_size(size: Size) -> Option<Type> {
1108 Type::int(size.bits().try_into().ok()?)
1111 let size = fx.layout_of(T).layout.size;
1112 // FIXME add and use emit_small_memcmp
1114 if size == Size::ZERO {
1115 // No bytes means they're trivially equal
1116 fx.bcx.ins().iconst(types::I8, 1)
1117 } else if let Some(clty) = type_by_size(size) {
1118 // Can't use `trusted` for these loads; they could be unaligned.
1119 let mut flags = MemFlags::new();
1121 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1122 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1123 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1124 fx.bcx.ins().bint(types::I8, eq)
1126 // Just call `memcmp` (like slices do in core) when the
1127 // size is too large or it's not a power-of-two.
1128 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1129 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1130 let params = vec![AbiParam::new(fx.pointer_type); 3];
1131 let returns = vec![AbiParam::new(types::I32)];
1132 let args = &[lhs_ref, rhs_ref, bytes_val];
1133 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1134 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1135 fx.bcx.ins().bint(types::I8, eq)
1137 ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1141 if let Some((_, dest)) = destination {
1142 let ret_block = fx.get_block(dest);
1143 fx.bcx.ins().jump(ret_block, &[]);
1145 trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");