1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
8 pub(crate) use cpuid::codegen_cpuid_call;
9 pub(crate) use llvm::codegen_llvm_intrinsic_call;
11 use rustc_middle::ty::print::with_no_trimmed_paths;
12 use rustc_middle::ty::subst::SubstsRef;
13 use rustc_span::symbol::{kw, sym, Symbol};
15 use crate::prelude::*;
16 use cranelift_codegen::ir::AtomicRmwOp;
34 (o $fx:expr, $arg:ident) => {
37 (c $fx:expr, $arg:ident) => {
38 codegen_operand($fx, $arg)
40 (v $fx:expr, $arg:ident) => {
41 codegen_operand($fx, $arg).load_scalar($fx)
45 macro intrinsic_match {
46 ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
49 $($($name:tt).*)|+ $(if $cond:expr)?, ($($a:ident $arg:ident),*) $content:block;
51 let _ = $substs; // Silence warning when substs is unused.
54 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
55 #[allow(unused_parens, non_snake_case)]
57 if let [$($arg),*] = $args {
59 $(intrinsic_arg!($a $fx, $arg),)*
61 #[warn(unused_parens, non_snake_case)]
66 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
76 macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
78 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
80 $fx.tcx.sess.span_err(
83 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
87 // Prevent verifier error
88 crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
94 fn validate_simd_type(fx: &mut FunctionCx<'_, '_, '_>, intrinsic: Symbol, span: Span, ty: Ty<'_>) {
96 fx.tcx.sess.span_err(span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", intrinsic, ty));
97 // Prevent verifier error
98 crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
103 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
104 let (element, count) = match layout.abi {
105 Abi::Vector { element, count } => (element, count),
109 match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
110 // Cranelift currently only implements icmp for 128bit vectors.
111 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
116 fn simd_for_each_lane<'tcx>(
117 fx: &mut FunctionCx<'_, '_, 'tcx>,
121 &mut FunctionCx<'_, '_, 'tcx>,
127 let layout = val.layout();
129 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
130 let lane_layout = fx.layout_of(lane_ty);
131 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
132 let ret_lane_layout = fx.layout_of(ret_lane_ty);
133 assert_eq!(lane_count, ret_lane_count);
135 for lane_idx in 0..lane_count {
136 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
138 let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
140 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
144 fn simd_pair_for_each_lane<'tcx>(
145 fx: &mut FunctionCx<'_, '_, 'tcx>,
150 &mut FunctionCx<'_, '_, 'tcx>,
157 assert_eq!(x.layout(), y.layout());
158 let layout = x.layout();
160 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
161 let lane_layout = fx.layout_of(lane_ty);
162 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
163 let ret_lane_layout = fx.layout_of(ret_lane_ty);
164 assert_eq!(lane_count, ret_lane_count);
166 for lane_idx in 0..lane_count {
167 let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
168 let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
170 let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
172 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
176 fn simd_reduce<'tcx>(
177 fx: &mut FunctionCx<'_, '_, 'tcx>,
181 f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, TyAndLayout<'tcx>, Value, Value) -> Value,
183 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
184 let lane_layout = fx.layout_of(lane_ty);
185 assert_eq!(lane_layout, ret.layout());
187 let (mut res_val, start_lane) =
188 if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
189 for lane_idx in start_lane..lane_count {
190 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
191 res_val = f(fx, lane_layout, res_val, lane);
193 let res = CValue::by_val(res_val, lane_layout);
194 ret.write_cvalue(fx, res);
197 // FIXME move all uses to `simd_reduce`
198 fn simd_reduce_bool<'tcx>(
199 fx: &mut FunctionCx<'_, '_, 'tcx>,
202 f: impl Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
204 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
205 assert!(ret.layout().ty.is_bool());
207 let res_val = val.value_lane(fx, 0).load_scalar(fx);
208 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
209 for lane_idx in 1..lane_count {
210 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
211 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
212 res_val = f(fx, res_val, lane);
214 let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
215 fx.bcx.ins().ireduce(types::I8, res_val)
219 let res = CValue::by_val(res_val, ret.layout());
220 ret.write_cvalue(fx, res);
223 fn bool_to_zero_or_max_uint<'tcx>(
224 fx: &mut FunctionCx<'_, '_, 'tcx>,
225 layout: TyAndLayout<'tcx>,
228 let ty = fx.clif_type(layout.ty).unwrap();
230 let int_ty = match ty {
231 types::F32 => types::I32,
232 types::F64 => types::I64,
236 let val = fx.bcx.ins().bint(int_ty, val);
237 let mut res = fx.bcx.ins().ineg(val);
240 res = fx.bcx.ins().bitcast(ty, res);
243 CValue::by_val(res, layout)
247 ($fx:expr, $cc:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
248 let vector_ty = clif_vector_type($fx.tcx, $x.layout());
250 if let Some(vector_ty) = vector_ty {
251 let x = $x.load_scalar($fx);
252 let y = $y.load_scalar($fx);
253 let val = if vector_ty.lane_type().is_float() {
254 $fx.bcx.ins().fcmp(FloatCC::$cc_f, x, y)
256 $fx.bcx.ins().icmp(IntCC::$cc, x, y)
259 // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
260 let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
262 $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
264 simd_pair_for_each_lane(
269 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
270 let res_lane = match lane_layout.ty.kind() {
271 ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
272 ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
273 _ => unreachable!("{:?}", lane_layout.ty),
275 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
280 ($fx:expr, $cc_u:ident|$cc_s:ident|$cc_f:ident($x:ident, $y:ident) -> $ret:ident) => {
281 // FIXME use vector icmp when possible
282 simd_pair_for_each_lane(
287 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
288 let res_lane = match lane_layout.ty.kind() {
289 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
290 ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
291 ty::Float(_) => fx.bcx.ins().fcmp(FloatCC::$cc_f, x_lane, y_lane),
292 _ => unreachable!("{:?}", lane_layout.ty),
294 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
300 macro simd_int_binop {
301 ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
302 simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
304 ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
305 simd_pair_for_each_lane(
310 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
311 let res_lane = match lane_layout.ty.kind() {
312 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
313 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
314 _ => unreachable!("{:?}", lane_layout.ty),
316 CValue::by_val(res_lane, ret_lane_layout)
322 macro simd_int_flt_binop {
323 ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
324 simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
326 ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
327 simd_pair_for_each_lane(
332 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
333 let res_lane = match lane_layout.ty.kind() {
334 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
335 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
336 ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
337 _ => unreachable!("{:?}", lane_layout.ty),
339 CValue::by_val(res_lane, ret_lane_layout)
345 macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
346 simd_pair_for_each_lane(
351 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
352 let res_lane = match lane_layout.ty.kind() {
353 ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
354 _ => unreachable!("{:?}", lane_layout.ty),
356 CValue::by_val(res_lane, ret_lane_layout)
361 pub(crate) fn codegen_intrinsic_call<'tcx>(
362 fx: &mut FunctionCx<'_, '_, 'tcx>,
363 instance: Instance<'tcx>,
364 args: &[mir::Operand<'tcx>],
365 destination: Option<(CPlace<'tcx>, BasicBlock)>,
368 let intrinsic = fx.tcx.item_name(instance.def_id());
369 let substs = instance.substs;
371 let ret = match destination {
372 Some((place, _)) => place,
374 // Insert non returning intrinsics here
377 trap_abort(fx, "Called intrinsic::abort.");
380 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
382 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
388 if intrinsic.as_str().starts_with("simd_") {
389 self::simd::codegen_simd_intrinsic_call(fx, intrinsic, substs, args, ret, span);
390 let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
391 fx.bcx.ins().jump(ret_block, &[]);
392 } else if codegen_float_intrinsic_call(fx, intrinsic, args, ret) {
393 let ret_block = fx.get_block(destination.expect("Float intrinsics don't diverge").1);
394 fx.bcx.ins().jump(ret_block, &[]);
396 codegen_regular_intrinsic_call(
409 fn codegen_float_intrinsic_call<'tcx>(
410 fx: &mut FunctionCx<'_, '_, 'tcx>,
412 args: &[mir::Operand<'tcx>],
415 let (name, arg_count, ty) = match intrinsic {
416 sym::expf32 => ("expf", 1, fx.tcx.types.f32),
417 sym::expf64 => ("exp", 1, fx.tcx.types.f64),
418 sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
419 sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
420 sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
421 sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
422 sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
423 sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
424 sym::powf32 => ("powf", 2, fx.tcx.types.f32),
425 sym::powf64 => ("pow", 2, fx.tcx.types.f64),
426 sym::logf32 => ("logf", 1, fx.tcx.types.f32),
427 sym::logf64 => ("log", 1, fx.tcx.types.f64),
428 sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
429 sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
430 sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
431 sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
432 sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
433 sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
434 sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
435 sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
436 sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
437 sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
438 sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
439 sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
440 sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
441 sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
442 sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
443 sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
444 sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
445 sym::roundf64 => ("round", 1, fx.tcx.types.f64),
446 sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
447 sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
448 sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
449 sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
453 if args.len() != arg_count {
454 bug!("wrong number of args for intrinsic {:?}", intrinsic);
458 let args = match args {
460 a = [codegen_operand(fx, x)];
464 b = [codegen_operand(fx, x), codegen_operand(fx, y)];
468 c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
474 let res = fx.easy_call(name, &args, ty);
475 ret.write_cvalue(fx, res);
480 fn codegen_regular_intrinsic_call<'tcx>(
481 fx: &mut FunctionCx<'_, '_, 'tcx>,
482 instance: Instance<'tcx>,
484 substs: SubstsRef<'tcx>,
485 args: &[mir::Operand<'tcx>],
488 destination: Option<(CPlace<'tcx>, BasicBlock)>,
490 let usize_layout = fx.layout_of(fx.tcx.types.usize);
493 fx, intrinsic, substs, args,
495 fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
499 likely | unlikely, (c a) {
500 ret.write_cvalue(fx, a);
503 fx.bcx.ins().debugtrap();
505 copy | copy_nonoverlapping, (v src, v dst, v count) {
506 let elem_ty = substs.type_at(0);
507 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
508 assert_eq!(args.len(), 3);
509 let byte_amount = if elem_size != 1 {
510 fx.bcx.ins().imul_imm(count, elem_size as i64)
515 if intrinsic == sym::copy_nonoverlapping {
516 // FIXME emit_small_memcpy
517 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
519 // FIXME emit_small_memmove
520 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
523 // NOTE: the volatile variants have src and dst swapped
524 volatile_copy_memory | volatile_copy_nonoverlapping_memory, (v dst, v src, v count) {
525 let elem_ty = substs.type_at(0);
526 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
527 assert_eq!(args.len(), 3);
528 let byte_amount = if elem_size != 1 {
529 fx.bcx.ins().imul_imm(count, elem_size as i64)
534 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
535 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
536 // FIXME emit_small_memcpy
537 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
539 // FIXME emit_small_memmove
540 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
543 size_of_val, (c ptr) {
544 let layout = fx.layout_of(substs.type_at(0));
545 let size = if layout.is_unsized() {
546 let (_ptr, info) = ptr.load_scalar_pair(fx);
547 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
553 .iconst(fx.pointer_type, layout.size.bytes() as i64)
555 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
557 min_align_of_val, (c ptr) {
558 let layout = fx.layout_of(substs.type_at(0));
559 let align = if layout.is_unsized() {
560 let (_ptr, info) = ptr.load_scalar_pair(fx);
561 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
567 .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
569 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
572 unchecked_add | unchecked_sub | unchecked_div | exact_div | unchecked_rem
573 | unchecked_shl | unchecked_shr, (c x, c y) {
574 // FIXME trap on overflow
575 let bin_op = match intrinsic {
576 sym::unchecked_add => BinOp::Add,
577 sym::unchecked_sub => BinOp::Sub,
578 sym::unchecked_div | sym::exact_div => BinOp::Div,
579 sym::unchecked_rem => BinOp::Rem,
580 sym::unchecked_shl => BinOp::Shl,
581 sym::unchecked_shr => BinOp::Shr,
584 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
585 ret.write_cvalue(fx, res);
587 add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
588 assert_eq!(x.layout().ty, y.layout().ty);
589 let bin_op = match intrinsic {
590 sym::add_with_overflow => BinOp::Add,
591 sym::sub_with_overflow => BinOp::Sub,
592 sym::mul_with_overflow => BinOp::Mul,
596 let res = crate::num::codegen_checked_int_binop(
602 ret.write_cvalue(fx, res);
604 saturating_add | saturating_sub, (c lhs, c rhs) {
605 assert_eq!(lhs.layout().ty, rhs.layout().ty);
606 let bin_op = match intrinsic {
607 sym::saturating_add => BinOp::Add,
608 sym::saturating_sub => BinOp::Sub,
612 let signed = type_sign(lhs.layout().ty);
614 let checked_res = crate::num::codegen_checked_int_binop(
621 let (val, has_overflow) = checked_res.load_scalar_pair(fx);
622 let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
624 let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
626 let val = match (intrinsic, signed) {
627 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
628 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
629 (sym::saturating_add, true) => {
630 let rhs = rhs.load_scalar(fx);
631 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
632 let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
633 fx.bcx.ins().select(has_overflow, sat_val, val)
635 (sym::saturating_sub, true) => {
636 let rhs = rhs.load_scalar(fx);
637 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
638 let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
639 fx.bcx.ins().select(has_overflow, sat_val, val)
644 let res = CValue::by_val(val, lhs.layout());
646 ret.write_cvalue(fx, res);
648 rotate_left, (c x, v y) {
649 let layout = x.layout();
650 let x = x.load_scalar(fx);
651 let res = fx.bcx.ins().rotl(x, y);
652 ret.write_cvalue(fx, CValue::by_val(res, layout));
654 rotate_right, (c x, v y) {
655 let layout = x.layout();
656 let x = x.load_scalar(fx);
657 let res = fx.bcx.ins().rotr(x, y);
658 ret.write_cvalue(fx, CValue::by_val(res, layout));
661 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
662 // doesn't have UB both are codegen'ed the same way
663 offset | arith_offset, (c base, v offset) {
664 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
665 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
666 let ptr_diff = if pointee_size != 1 {
667 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
671 let base_val = base.load_scalar(fx);
672 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
673 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
676 transmute, (c from) {
677 ret.write_cvalue_transmute(fx, from);
679 write_bytes | volatile_set_memory, (c dst, v val, v count) {
680 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
681 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
682 let count = if pointee_size != 1 {
683 fx.bcx.ins().imul_imm(count, pointee_size as i64)
687 let dst_ptr = dst.load_scalar(fx);
688 // FIXME make the memset actually volatile when switching to emit_small_memset
689 // FIXME use emit_small_memset
690 fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
692 ctlz | ctlz_nonzero, (c arg) {
693 let val = arg.load_scalar(fx);
694 // FIXME trap on `ctlz_nonzero` with zero arg.
695 let res = fx.bcx.ins().clz(val);
696 let res = CValue::by_val(res, arg.layout());
697 ret.write_cvalue(fx, res);
699 cttz | cttz_nonzero, (c arg) {
700 let val = arg.load_scalar(fx);
701 // FIXME trap on `cttz_nonzero` with zero arg.
702 let res = fx.bcx.ins().ctz(val);
703 let res = CValue::by_val(res, arg.layout());
704 ret.write_cvalue(fx, res);
707 let val = arg.load_scalar(fx);
708 let res = fx.bcx.ins().popcnt(val);
709 let res = CValue::by_val(res, arg.layout());
710 ret.write_cvalue(fx, res);
712 bitreverse, (c arg) {
713 let val = arg.load_scalar(fx);
714 let res = fx.bcx.ins().bitrev(val);
715 let res = CValue::by_val(res, arg.layout());
716 ret.write_cvalue(fx, res);
719 // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
720 fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
721 match bcx.func.dfg.value_type(v) {
724 // https://code.woboq.org/gcc/include/bits/byteswap.h.html
726 let tmp1 = bcx.ins().ishl_imm(v, 8);
727 let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
729 let tmp2 = bcx.ins().ushr_imm(v, 8);
730 let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
732 bcx.ins().bor(n1, n2)
735 let tmp1 = bcx.ins().ishl_imm(v, 24);
736 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
738 let tmp2 = bcx.ins().ishl_imm(v, 8);
739 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
741 let tmp3 = bcx.ins().ushr_imm(v, 8);
742 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
744 let tmp4 = bcx.ins().ushr_imm(v, 24);
745 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
747 let or_tmp1 = bcx.ins().bor(n1, n2);
748 let or_tmp2 = bcx.ins().bor(n3, n4);
749 bcx.ins().bor(or_tmp1, or_tmp2)
752 let tmp1 = bcx.ins().ishl_imm(v, 56);
753 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
755 let tmp2 = bcx.ins().ishl_imm(v, 40);
756 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
758 let tmp3 = bcx.ins().ishl_imm(v, 24);
759 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
761 let tmp4 = bcx.ins().ishl_imm(v, 8);
762 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
764 let tmp5 = bcx.ins().ushr_imm(v, 8);
765 let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
767 let tmp6 = bcx.ins().ushr_imm(v, 24);
768 let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
770 let tmp7 = bcx.ins().ushr_imm(v, 40);
771 let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
773 let tmp8 = bcx.ins().ushr_imm(v, 56);
774 let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
776 let or_tmp1 = bcx.ins().bor(n1, n2);
777 let or_tmp2 = bcx.ins().bor(n3, n4);
778 let or_tmp3 = bcx.ins().bor(n5, n6);
779 let or_tmp4 = bcx.ins().bor(n7, n8);
781 let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
782 let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
783 bcx.ins().bor(or_tmp5, or_tmp6)
786 let (lo, hi) = bcx.ins().isplit(v);
787 let lo = swap(bcx, lo);
788 let hi = swap(bcx, hi);
789 bcx.ins().iconcat(hi, lo)
791 ty => unreachable!("bswap {}", ty),
794 let val = arg.load_scalar(fx);
795 let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
796 ret.write_cvalue(fx, res);
798 assert_inhabited | assert_zero_valid | assert_uninit_valid, () {
799 let layout = fx.layout_of(substs.type_at(0));
800 if layout.abi.is_uninhabited() {
801 with_no_trimmed_paths(|| crate::base::codegen_panic(
803 &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
809 if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true) {
810 with_no_trimmed_paths(|| crate::base::codegen_panic(
812 &format!("attempted to zero-initialize type `{}`, which is invalid", layout.ty),
818 if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false) {
819 with_no_trimmed_paths(|| crate::base::codegen_panic(
821 &format!("attempted to leave type `{}` uninitialized, which is invalid", layout.ty),
828 volatile_load | unaligned_volatile_load, (c ptr) {
829 // Cranelift treats loads as volatile by default
830 // FIXME correctly handle unaligned_volatile_load
832 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
833 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
834 ret.write_cvalue(fx, val);
836 volatile_store | unaligned_volatile_store, (v ptr, c val) {
837 // Cranelift treats stores as volatile by default
838 // FIXME correctly handle unaligned_volatile_store
839 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
840 dest.write_cvalue(fx, val);
843 pref_align_of | needs_drop | type_id | type_name | variant_count, () {
845 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
846 let val = crate::constant::codegen_const_value(
851 ret.write_cvalue(fx, val);
854 ptr_offset_from, (v ptr, v base) {
855 let ty = substs.type_at(0);
856 let isize_layout = fx.layout_of(fx.tcx.types.isize);
858 let pointee_size: u64 = fx.layout_of(ty).size.bytes();
859 let diff = fx.bcx.ins().isub(ptr, base);
860 // FIXME this can be an exact division.
861 let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
862 ret.write_cvalue(fx, val);
865 ptr_guaranteed_eq, (c a, c b) {
866 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
867 ret.write_cvalue(fx, val);
870 ptr_guaranteed_ne, (c a, c b) {
871 let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
872 ret.write_cvalue(fx, val);
875 caller_location, () {
876 let caller_location = fx.get_caller_location(span);
877 ret.write_cvalue(fx, caller_location);
880 _ if intrinsic.as_str().starts_with("atomic_fence"), () {
881 fx.bcx.ins().fence();
883 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
884 // FIXME use a compiler fence once Cranelift supports it
885 fx.bcx.ins().fence();
887 _ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) {
888 let ty = substs.type_at(0);
889 validate_atomic_type!(fx, intrinsic, span, ty);
890 let clif_ty = fx.clif_type(ty).unwrap();
892 let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
894 let val = CValue::by_val(val, fx.layout_of(ty));
895 ret.write_cvalue(fx, val);
897 _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
898 validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
900 let val = val.load_scalar(fx);
902 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
904 _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
905 let layout = new.layout();
906 validate_atomic_type!(fx, intrinsic, span, layout.ty);
907 let ty = fx.clif_type(layout.ty).unwrap();
909 let new = new.load_scalar(fx);
911 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
913 let old = CValue::by_val(old, layout);
914 ret.write_cvalue(fx, old);
916 _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
917 let layout = new.layout();
918 validate_atomic_type!(fx, intrinsic, span, layout.ty);
920 let test_old = test_old.load_scalar(fx);
921 let new = new.load_scalar(fx);
923 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
924 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
926 let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
927 ret.write_cvalue(fx, ret_val)
930 _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
931 let layout = amount.layout();
932 validate_atomic_type!(fx, intrinsic, span, layout.ty);
933 let ty = fx.clif_type(layout.ty).unwrap();
935 let amount = amount.load_scalar(fx);
937 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
939 let old = CValue::by_val(old, layout);
940 ret.write_cvalue(fx, old);
942 _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
943 let layout = amount.layout();
944 validate_atomic_type!(fx, intrinsic, span, layout.ty);
945 let ty = fx.clif_type(layout.ty).unwrap();
947 let amount = amount.load_scalar(fx);
949 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
951 let old = CValue::by_val(old, layout);
952 ret.write_cvalue(fx, old);
954 _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
955 let layout = src.layout();
956 validate_atomic_type!(fx, intrinsic, span, layout.ty);
957 let ty = fx.clif_type(layout.ty).unwrap();
959 let src = src.load_scalar(fx);
961 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
963 let old = CValue::by_val(old, layout);
964 ret.write_cvalue(fx, old);
966 _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
967 let layout = src.layout();
968 validate_atomic_type!(fx, intrinsic, span, layout.ty);
969 let ty = fx.clif_type(layout.ty).unwrap();
971 let src = src.load_scalar(fx);
973 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
975 let old = CValue::by_val(old, layout);
976 ret.write_cvalue(fx, old);
978 _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
979 let layout = src.layout();
980 validate_atomic_type!(fx, intrinsic, span, layout.ty);
981 let ty = fx.clif_type(layout.ty).unwrap();
983 let src = src.load_scalar(fx);
985 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
987 let old = CValue::by_val(old, layout);
988 ret.write_cvalue(fx, old);
990 _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
991 let layout = src.layout();
992 validate_atomic_type!(fx, intrinsic, span, layout.ty);
993 let ty = fx.clif_type(layout.ty).unwrap();
995 let src = src.load_scalar(fx);
997 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
999 let old = CValue::by_val(old, layout);
1000 ret.write_cvalue(fx, old);
1002 _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
1003 let layout = src.layout();
1004 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1005 let ty = fx.clif_type(layout.ty).unwrap();
1007 let src = src.load_scalar(fx);
1009 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1011 let old = CValue::by_val(old, layout);
1012 ret.write_cvalue(fx, old);
1014 _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
1015 let layout = src.layout();
1016 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1017 let ty = fx.clif_type(layout.ty).unwrap();
1019 let src = src.load_scalar(fx);
1021 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1023 let old = CValue::by_val(old, layout);
1024 ret.write_cvalue(fx, old);
1026 _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
1027 let layout = src.layout();
1028 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1029 let ty = fx.clif_type(layout.ty).unwrap();
1031 let src = src.load_scalar(fx);
1033 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1035 let old = CValue::by_val(old, layout);
1036 ret.write_cvalue(fx, old);
1038 _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
1039 let layout = src.layout();
1040 validate_atomic_type!(fx, intrinsic, span, layout.ty);
1041 let ty = fx.clif_type(layout.ty).unwrap();
1043 let src = src.load_scalar(fx);
1045 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1047 let old = CValue::by_val(old, layout);
1048 ret.write_cvalue(fx, old);
1051 // In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
1052 // For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
1053 // and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
1054 // a float against itself. Only in case of NaN is it not equal to itself.
1055 minnumf32, (v a, v b) {
1056 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1057 let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
1058 let temp = fx.bcx.ins().select(a_ge_b, b, a);
1059 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1060 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1061 ret.write_cvalue(fx, val);
1063 minnumf64, (v a, v b) {
1064 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1065 let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
1066 let temp = fx.bcx.ins().select(a_ge_b, b, a);
1067 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1068 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1069 ret.write_cvalue(fx, val);
1071 maxnumf32, (v a, v b) {
1072 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1073 let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
1074 let temp = fx.bcx.ins().select(a_le_b, b, a);
1075 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1076 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1077 ret.write_cvalue(fx, val);
1079 maxnumf64, (v a, v b) {
1080 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1081 let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
1082 let temp = fx.bcx.ins().select(a_le_b, b, a);
1083 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1084 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1085 ret.write_cvalue(fx, val);
1088 kw.Try, (v f, v data, v _catch_fn) {
1089 // FIXME once unwinding is supported, change this to actually catch panics
1090 let f_sig = fx.bcx.func.import_signature(Signature {
1091 call_conv: fx.target_config.default_call_conv,
1092 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1096 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1098 let layout = ret.layout();
1099 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1100 ret.write_cvalue(fx, ret_val);
1103 fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
1104 let res = crate::num::codegen_float_binop(fx, match intrinsic {
1105 sym::fadd_fast => BinOp::Add,
1106 sym::fsub_fast => BinOp::Sub,
1107 sym::fmul_fast => BinOp::Mul,
1108 sym::fdiv_fast => BinOp::Div,
1109 sym::frem_fast => BinOp::Rem,
1110 _ => unreachable!(),
1112 ret.write_cvalue(fx, res);
1114 float_to_int_unchecked, (v f) {
1115 let res = crate::cast::clif_int_or_float_cast(
1119 fx.clif_type(ret.layout().ty).unwrap(),
1120 type_sign(ret.layout().ty),
1122 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1125 raw_eq, (v lhs_ref, v rhs_ref) {
1126 let size = fx.layout_of(substs.type_at(0)).layout.size;
1127 // FIXME add and use emit_small_memcmp
1129 if size == Size::ZERO {
1130 // No bytes means they're trivially equal
1131 fx.bcx.ins().iconst(types::I8, 1)
1132 } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1133 // Can't use `trusted` for these loads; they could be unaligned.
1134 let mut flags = MemFlags::new();
1136 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1137 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1138 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1139 fx.bcx.ins().bint(types::I8, eq)
1141 // Just call `memcmp` (like slices do in core) when the
1142 // size is too large or it's not a power-of-two.
1143 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1144 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1145 let params = vec![AbiParam::new(fx.pointer_type); 3];
1146 let returns = vec![AbiParam::new(types::I32)];
1147 let args = &[lhs_ref, rhs_ref, bytes_val];
1148 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1149 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1150 fx.bcx.ins().bint(types::I8, eq)
1152 ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1156 // FIXME implement black_box semantics
1157 ret.write_cvalue(fx, a);
1161 if let Some((_, dest)) = destination {
1162 let ret_block = fx.get_block(dest);
1163 fx.bcx.ins().jump(ret_block, &[]);
1165 trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");