5 pub(crate) use cpuid::codegen_cpuid_call;
6 pub(crate) use llvm::codegen_llvm_intrinsic_call;
20 ($x:ident . $($xs:tt).*) => {
21 concat!(stringify!($x), ".", intrinsic_pat!($($xs).*))
26 (o $fx:expr, $arg:ident) => {
29 (c $fx:expr, $arg:ident) => {
30 trans_operand($fx, $arg)
32 (v $fx:expr, $arg:ident) => {
33 trans_operand($fx, $arg).load_scalar($fx)
37 macro intrinsic_substs {
38 ($substs:expr, $index:expr,) => {},
39 ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
40 let $first = $substs.type_at($index);
41 intrinsic_substs!($substs, $index+1, $($rest),*);
45 macro intrinsic_match {
46 ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr,
49 $($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
51 let _ = $substs; // Silence warning when substs is unused.
54 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
55 #[allow(unused_parens, non_snake_case)]
58 intrinsic_substs!($substs, 0, $($subst),*);
60 if let [$($arg),*] = $args {
62 $(intrinsic_arg!($a $fx, $arg),)*
64 #[warn(unused_parens, non_snake_case)]
69 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
79 macro call_intrinsic_match {
80 ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
81 $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
85 stringify!($name) => {
86 assert!($substs.is_noop());
87 if let [$(ref $arg),*] = *$args {
89 $(trans_operand($fx, $arg),)*
91 let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fxcodegen_cx.tcx.types.$ty);
92 $ret.write_cvalue($fx, res);
94 if let Some((_, dest)) = $destination {
95 let ret_block = $fx.get_block(dest);
96 $fx.bcx.ins().jump(ret_block, &[]);
102 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
111 macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
112 crate::atomic_shim::lock_global_lock($fx);
114 let clif_ty = $fx.clif_type($T).unwrap();
115 let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
116 let new = $fx.bcx.ins().$op(old, $src);
117 $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
118 $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
120 crate::atomic_shim::unlock_global_lock($fx);
123 macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
124 crate::atomic_shim::lock_global_lock($fx);
127 let clif_ty = $fx.clif_type($T).unwrap();
128 let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
131 let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
132 let new = $fx.bcx.ins().select(is_eq, old, $src);
135 $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
137 let ret_val = CValue::by_val(old, $ret.layout());
138 $ret.write_cvalue($fx, ret_val);
140 crate::atomic_shim::unlock_global_lock($fx);
143 macro validate_atomic_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
145 ty::Uint(_) | ty::Int(_) => {}
147 $fxcodegen_cx.tcx.sess.span_err($span, &format!("`{}` intrinsic: expected basic integer type, found `{:?}`", $intrinsic, $ty));
148 // Prevent verifier error
149 crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
155 macro validate_simd_type($fx:ident, $intrinsic:ident, $span:ident, $ty:expr) {
157 $fxcodegen_cx.tcx.sess.span_err($span, &format!("invalid monomorphization of `{}` intrinsic: expected SIMD input type, found non-SIMD `{}`", $intrinsic, $ty));
158 // Prevent verifier error
159 crate::trap::trap_unreachable($fx, "compilation should not have succeeded");
164 fn lane_type_and_count<'tcx>(
166 layout: TyAndLayout<'tcx>,
167 ) -> (TyAndLayout<'tcx>, u16) {
168 assert!(layout.ty.is_simd());
169 let lane_count = match layout.fields {
170 rustc_target::abi::FieldsShape::Array { stride: _, count } => u16::try_from(count).unwrap(),
171 _ => unreachable!("lane_type_and_count({:?})", layout),
173 let lane_layout = layout.field(&ty::layout::LayoutCx {
175 param_env: ParamEnv::reveal_all(),
177 (lane_layout, lane_count)
180 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
181 let (element, count) = match &layout.abi {
182 Abi::Vector { element, count } => (element.clone(), *count),
186 match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
187 // Cranelift currently only implements icmp for 128bit vectors.
188 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
193 fn simd_for_each_lane<'tcx, B: Backend>(
194 fx: &mut FunctionCx<'_, 'tcx, B>,
198 &mut FunctionCx<'_, 'tcx, B>,
204 let layout = val.layout();
206 let (lane_layout, lane_count) = lane_type_and_count(fxcodegen_cx.tcx, layout);
207 let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fxcodegen_cx.tcx, ret.layout());
208 assert_eq!(lane_count, ret_lane_count);
210 for lane_idx in 0..lane_count {
211 let lane_idx = mir::Field::new(lane_idx.try_into().unwrap());
212 let lane = val.value_field(fx, lane_idx).load_scalar(fx);
214 let res_lane = f(fx, lane_layout, ret_lane_layout, lane);
216 ret.place_field(fx, lane_idx).write_cvalue(fx, res_lane);
220 fn simd_pair_for_each_lane<'tcx, B: Backend>(
221 fx: &mut FunctionCx<'_, 'tcx, B>,
226 &mut FunctionCx<'_, 'tcx, B>,
233 assert_eq!(x.layout(), y.layout());
234 let layout = x.layout();
236 let (lane_layout, lane_count) = lane_type_and_count(fxcodegen_cx.tcx, layout);
237 let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fxcodegen_cx.tcx, ret.layout());
238 assert_eq!(lane_count, ret_lane_count);
240 for lane in 0..lane_count {
241 let lane = mir::Field::new(lane.try_into().unwrap());
242 let x_lane = x.value_field(fx, lane).load_scalar(fx);
243 let y_lane = y.value_field(fx, lane).load_scalar(fx);
245 let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
247 ret.place_field(fx, lane).write_cvalue(fx, res_lane);
251 fn bool_to_zero_or_max_uint<'tcx>(
252 fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
253 layout: TyAndLayout<'tcx>,
256 let ty = fx.clif_type(layout.ty).unwrap();
258 let int_ty = match ty {
259 types::F32 => types::I32,
260 types::F64 => types::I64,
264 let val = fx.bcx.ins().bint(int_ty, val);
265 let mut res = fx.bcx.ins().ineg(val);
268 res = fx.bcx.ins().bitcast(ty, res);
271 CValue::by_val(res, layout)
275 ($fx:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
276 let vector_ty = clif_vector_type($fxcodegen_cx.tcx, $x.layout());
278 if let Some(vector_ty) = vector_ty {
279 let x = $x.load_scalar($fx);
280 let y = $y.load_scalar($fx);
281 let val = $fx.bcx.ins().icmp(IntCC::$cc, x, y);
283 // HACK This depends on the fact that icmp for vectors represents bools as 0 and !0, not 0 and 1.
284 let val = $fx.bcx.ins().raw_bitcast(vector_ty, val);
286 $ret.write_cvalue($fx, CValue::by_val(val, $ret.layout()));
288 simd_pair_for_each_lane(
293 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
294 let res_lane = match lane_layout.ty.kind {
295 ty::Uint(_) | ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane),
296 _ => unreachable!("{:?}", lane_layout.ty),
298 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
303 ($fx:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
304 // FIXME use vector icmp when possible
305 simd_pair_for_each_lane(
310 |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
311 let res_lane = match lane_layout.ty.kind {
312 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
313 ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
314 _ => unreachable!("{:?}", lane_layout.ty),
316 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
322 macro simd_int_binop {
323 ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
324 simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
326 ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
327 simd_pair_for_each_lane(
332 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
333 let res_lane = match lane_layout.ty.kind {
334 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
335 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
336 _ => unreachable!("{:?}", lane_layout.ty),
338 CValue::by_val(res_lane, ret_lane_layout)
344 macro simd_int_flt_binop {
345 ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
346 simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
348 ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
349 simd_pair_for_each_lane(
354 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
355 let res_lane = match lane_layout.ty.kind {
356 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
357 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
358 ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
359 _ => unreachable!("{:?}", lane_layout.ty),
361 CValue::by_val(res_lane, ret_lane_layout)
367 macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
368 simd_pair_for_each_lane(
373 |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
374 let res_lane = match lane_layout.ty.kind {
375 ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
376 _ => unreachable!("{:?}", lane_layout.ty),
378 CValue::by_val(res_lane, ret_lane_layout)
383 pub(crate) fn codegen_intrinsic_call<'tcx>(
384 fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
385 instance: Instance<'tcx>,
386 args: &[mir::Operand<'tcx>],
387 destination: Option<(CPlace<'tcx>, BasicBlock)>,
390 let def_id = instance.def_id();
391 let substs = instance.substs;
393 let intrinsic = fxcodegen_cx.tcx.item_name(def_id).as_str();
394 let intrinsic = &intrinsic[..];
396 let ret = match destination {
397 Some((place, _)) => place,
399 // Insert non returning intrinsics here
402 trap_panic(fx, "Called intrinsic::abort.");
405 trap_unreachable(fx, "[corruption] Called intrinsic::unreachable.");
410 "[corruption] Transmuting to uninhabited type.",
413 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
419 if intrinsic.starts_with("simd_") {
420 self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
421 let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
422 fx.bcx.ins().jump(ret_block, &[]);
426 let usize_layout = fx.layout_of(fxcodegen_cx.tcx.types.usize);
428 call_intrinsic_match! {
429 fx, intrinsic, substs, ret, destination, args,
430 expf32(flt) -> f32 => expf,
431 expf64(flt) -> f64 => exp,
432 exp2f32(flt) -> f32 => exp2f,
433 exp2f64(flt) -> f64 => exp2,
434 sqrtf32(flt) -> f32 => sqrtf,
435 sqrtf64(flt) -> f64 => sqrt,
436 powif32(a, x) -> f32 => __powisf2, // compiler-builtins
437 powif64(a, x) -> f64 => __powidf2, // compiler-builtins
438 powf32(a, x) -> f32 => powf,
439 powf64(a, x) -> f64 => pow,
440 logf32(flt) -> f32 => logf,
441 logf64(flt) -> f64 => log,
442 log2f32(flt) -> f32 => log2f,
443 log2f64(flt) -> f64 => log2,
444 log10f32(flt) -> f32 => log10f,
445 log10f64(flt) -> f64 => log10,
446 fabsf32(flt) -> f32 => fabsf,
447 fabsf64(flt) -> f64 => fabs,
448 fmaf32(x, y, z) -> f32 => fmaf,
449 fmaf64(x, y, z) -> f64 => fma,
450 copysignf32(x, y) -> f32 => copysignf,
451 copysignf64(x, y) -> f64 => copysign,
454 // FIXME use clif insts
455 floorf32(flt) -> f32 => floorf,
456 floorf64(flt) -> f64 => floor,
457 ceilf32(flt) -> f32 => ceilf,
458 ceilf64(flt) -> f64 => ceil,
459 truncf32(flt) -> f32 => truncf,
460 truncf64(flt) -> f64 => trunc,
461 roundf32(flt) -> f32 => roundf,
462 roundf64(flt) -> f64 => round,
465 sinf32(flt) -> f32 => sinf,
466 sinf64(flt) -> f64 => sin,
467 cosf32(flt) -> f32 => cosf,
468 cosf64(flt) -> f64 => cos,
469 tanf32(flt) -> f32 => tanf,
470 tanf64(flt) -> f64 => tan,
474 fx, intrinsic, substs, args,
476 fxcodegen_cx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
480 likely | unlikely, (c a) {
481 ret.write_cvalue(fx, a);
484 fx.bcx.ins().debugtrap();
486 copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
487 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
491 .iconst(fx.pointer_type, elem_size as i64);
492 assert_eq!(args.len(), 3);
493 let byte_amount = fx.bcx.ins().imul(count, elem_size);
495 if intrinsic.contains("nonoverlapping") {
496 // FIXME emit_small_memcpy
497 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
499 // FIXME emit_small_memmove
500 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
503 // NOTE: the volatile variants have src and dst swapped
504 volatile_copy_memory | volatile_copy_nonoverlapping_memory, <elem_ty> (v dst, v src, v count) {
505 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
509 .iconst(fx.pointer_type, elem_size as i64);
510 assert_eq!(args.len(), 3);
511 let byte_amount = fx.bcx.ins().imul(count, elem_size);
513 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
514 if intrinsic.contains("nonoverlapping") {
515 // FIXME emit_small_memcpy
516 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
518 // FIXME emit_small_memmove
519 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
522 discriminant_value, (c ptr) {
523 let pointee_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
524 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), pointee_layout);
525 let discr = crate::discriminant::codegen_get_discriminant(fx, val, ret.layout());
526 ret.write_cvalue(fx, discr);
528 size_of_val, <T> (c ptr) {
529 let layout = fx.layout_of(T);
530 let size = if layout.is_unsized() {
531 let (_ptr, info) = ptr.load_scalar_pair(fx);
532 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
538 .iconst(fx.pointer_type, layout.size.bytes() as i64)
540 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
542 min_align_of_val, <T> (c ptr) {
543 let layout = fx.layout_of(T);
544 let align = if layout.is_unsized() {
545 let (_ptr, info) = ptr.load_scalar_pair(fx);
546 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
552 .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
554 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
557 _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
558 // FIXME trap on overflow
559 let bin_op = match intrinsic {
560 "unchecked_add" => BinOp::Add,
561 "unchecked_sub" => BinOp::Sub,
562 "unchecked_div" | "exact_div" => BinOp::Div,
563 "unchecked_rem" => BinOp::Rem,
564 "unchecked_shl" => BinOp::Shl,
565 "unchecked_shr" => BinOp::Shr,
566 _ => unreachable!("intrinsic {}", intrinsic),
568 let res = crate::num::trans_int_binop(fx, bin_op, x, y);
569 ret.write_cvalue(fx, res);
571 _ if intrinsic.ends_with("_with_overflow"), (c x, c y) {
572 assert_eq!(x.layout().ty, y.layout().ty);
573 let bin_op = match intrinsic {
574 "add_with_overflow" => BinOp::Add,
575 "sub_with_overflow" => BinOp::Sub,
576 "mul_with_overflow" => BinOp::Mul,
577 _ => unreachable!("intrinsic {}", intrinsic),
580 let res = crate::num::trans_checked_int_binop(
586 ret.write_cvalue(fx, res);
588 _ if intrinsic.starts_with("wrapping_"), (c x, c y) {
589 assert_eq!(x.layout().ty, y.layout().ty);
590 let bin_op = match intrinsic {
591 "wrapping_add" => BinOp::Add,
592 "wrapping_sub" => BinOp::Sub,
593 "wrapping_mul" => BinOp::Mul,
594 _ => unreachable!("intrinsic {}", intrinsic),
596 let res = crate::num::trans_int_binop(
602 ret.write_cvalue(fx, res);
604 _ if intrinsic.starts_with("saturating_"), <T> (c lhs, c rhs) {
605 assert_eq!(lhs.layout().ty, rhs.layout().ty);
606 let bin_op = match intrinsic {
607 "saturating_add" => BinOp::Add,
608 "saturating_sub" => BinOp::Sub,
609 _ => unreachable!("intrinsic {}", intrinsic),
612 let signed = type_sign(T);
614 let checked_res = crate::num::trans_checked_int_binop(
621 let (val, has_overflow) = checked_res.load_scalar_pair(fx);
622 let clif_ty = fx.clif_type(T).unwrap();
624 // `select.i8` is not implemented by Cranelift.
625 let has_overflow = fx.bcx.ins().uextend(types::I32, has_overflow);
627 let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
629 let val = match (intrinsic, signed) {
630 ("saturating_add", false) => fx.bcx.ins().select(has_overflow, max, val),
631 ("saturating_sub", false) => fx.bcx.ins().select(has_overflow, min, val),
632 ("saturating_add", true) => {
633 let rhs = rhs.load_scalar(fx);
634 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
635 let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
636 fx.bcx.ins().select(has_overflow, sat_val, val)
638 ("saturating_sub", true) => {
639 let rhs = rhs.load_scalar(fx);
640 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
641 let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
642 fx.bcx.ins().select(has_overflow, sat_val, val)
647 let res = CValue::by_val(val, fx.layout_of(T));
649 ret.write_cvalue(fx, res);
651 rotate_left, <T>(v x, v y) {
652 let layout = fx.layout_of(T);
653 let res = fx.bcx.ins().rotl(x, y);
654 ret.write_cvalue(fx, CValue::by_val(res, layout));
656 rotate_right, <T>(v x, v y) {
657 let layout = fx.layout_of(T);
658 let res = fx.bcx.ins().rotr(x, y);
659 ret.write_cvalue(fx, CValue::by_val(res, layout));
662 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
663 // doesn't have UB both are codegen'ed the same way
664 offset | arith_offset, (c base, v offset) {
665 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
666 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
667 let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
668 let base_val = base.load_scalar(fx);
669 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
670 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
673 transmute, (c from) {
674 ret.write_cvalue_transmute(fx, from);
676 write_bytes | volatile_set_memory, (c dst, v val, v count) {
677 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
678 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
679 let count = fx.bcx.ins().imul_imm(count, pointee_size as i64);
680 let dst_ptr = dst.load_scalar(fx);
681 // FIXME make the memset actually volatile when switching to emit_small_memset
682 // FIXME use emit_small_memset
683 fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
685 ctlz | ctlz_nonzero, <T> (v arg) {
686 // FIXME trap on `ctlz_nonzero` with zero arg.
687 let res = if T == fxcodegen_cx.tcx.types.u128 || T == fxcodegen_cx.tcx.types.i128 {
688 // FIXME verify this algorithm is correct
689 let (lsb, msb) = fx.bcx.ins().isplit(arg);
690 let lsb_lz = fx.bcx.ins().clz(lsb);
691 let msb_lz = fx.bcx.ins().clz(msb);
692 let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
693 let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
694 let res = fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz);
695 fx.bcx.ins().uextend(types::I128, res)
697 fx.bcx.ins().clz(arg)
699 let res = CValue::by_val(res, fx.layout_of(T));
700 ret.write_cvalue(fx, res);
702 cttz | cttz_nonzero, <T> (v arg) {
703 // FIXME trap on `cttz_nonzero` with zero arg.
704 let res = if T == fxcodegen_cx.tcx.types.u128 || T == fxcodegen_cx.tcx.types.i128 {
705 // FIXME verify this algorithm is correct
706 let (lsb, msb) = fx.bcx.ins().isplit(arg);
707 let lsb_tz = fx.bcx.ins().ctz(lsb);
708 let msb_tz = fx.bcx.ins().ctz(msb);
709 let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
710 let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
711 let res = fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz);
712 fx.bcx.ins().uextend(types::I128, res)
714 fx.bcx.ins().ctz(arg)
716 let res = CValue::by_val(res, fx.layout_of(T));
717 ret.write_cvalue(fx, res);
720 let res = fx.bcx.ins().popcnt(arg);
721 let res = CValue::by_val(res, fx.layout_of(T));
722 ret.write_cvalue(fx, res);
724 bitreverse, <T> (v arg) {
725 let res = fx.bcx.ins().bitrev(arg);
726 let res = CValue::by_val(res, fx.layout_of(T));
727 ret.write_cvalue(fx, res);
730 // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
731 fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
732 match bcx.func.dfg.value_type(v) {
735 // https://code.woboq.org/gcc/include/bits/byteswap.h.html
737 let tmp1 = bcx.ins().ishl_imm(v, 8);
738 let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
740 let tmp2 = bcx.ins().ushr_imm(v, 8);
741 let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
743 bcx.ins().bor(n1, n2)
746 let tmp1 = bcx.ins().ishl_imm(v, 24);
747 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
749 let tmp2 = bcx.ins().ishl_imm(v, 8);
750 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
752 let tmp3 = bcx.ins().ushr_imm(v, 8);
753 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
755 let tmp4 = bcx.ins().ushr_imm(v, 24);
756 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
758 let or_tmp1 = bcx.ins().bor(n1, n2);
759 let or_tmp2 = bcx.ins().bor(n3, n4);
760 bcx.ins().bor(or_tmp1, or_tmp2)
763 let tmp1 = bcx.ins().ishl_imm(v, 56);
764 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
766 let tmp2 = bcx.ins().ishl_imm(v, 40);
767 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
769 let tmp3 = bcx.ins().ishl_imm(v, 24);
770 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
772 let tmp4 = bcx.ins().ishl_imm(v, 8);
773 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
775 let tmp5 = bcx.ins().ushr_imm(v, 8);
776 let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
778 let tmp6 = bcx.ins().ushr_imm(v, 24);
779 let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
781 let tmp7 = bcx.ins().ushr_imm(v, 40);
782 let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
784 let tmp8 = bcx.ins().ushr_imm(v, 56);
785 let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
787 let or_tmp1 = bcx.ins().bor(n1, n2);
788 let or_tmp2 = bcx.ins().bor(n3, n4);
789 let or_tmp3 = bcx.ins().bor(n5, n6);
790 let or_tmp4 = bcx.ins().bor(n7, n8);
792 let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
793 let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
794 bcx.ins().bor(or_tmp5, or_tmp6)
797 let (lo, hi) = bcx.ins().isplit(v);
798 let lo = swap(bcx, lo);
799 let hi = swap(bcx, hi);
800 bcx.ins().iconcat(hi, lo)
802 ty => unreachable!("bswap {}", ty),
805 let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
806 ret.write_cvalue(fx, res);
808 assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
809 let layout = fx.layout_of(T);
810 if layout.abi.is_uninhabited() {
811 crate::trap::trap_panic(fx, &format!("attempted to instantiate uninhabited type `{}`", T));
815 if intrinsic == "assert_zero_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
816 crate::trap::trap_panic(fx, &format!("attempted to zero-initialize type `{}`, which is invalid", T));
820 if intrinsic == "assert_uninit_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
821 crate::trap::trap_panic(fx, &format!("attempted to leave type `{}` uninitialized, which is invalid", T));
826 volatile_load | unaligned_volatile_load, (c ptr) {
827 // Cranelift treats loads as volatile by default
828 // FIXME ignore during stack2reg optimization
829 // FIXME correctly handle unaligned_volatile_load
831 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
832 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
833 ret.write_cvalue(fx, val);
835 volatile_store | unaligned_volatile_store, (v ptr, c val) {
836 // Cranelift treats stores as volatile by default
837 // FIXME ignore during stack2reg optimization
838 // FIXME correctly handle unaligned_volatile_store
839 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
840 dest.write_cvalue(fx, val);
843 size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name | variant_count, () {
845 fxcodegen_cx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
846 let val = crate::constant::trans_const_value(
851 ret.write_cvalue(fx, val);
854 ptr_offset_from, <T> (v ptr, v base) {
855 let isize_layout = fx.layout_of(fxcodegen_cx.tcx.types.isize);
857 let pointee_size: u64 = fx.layout_of(T).size.bytes();
858 let diff = fx.bcx.ins().isub(ptr, base);
859 // FIXME this can be an exact division.
860 let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
861 ret.write_cvalue(fx, val);
864 ptr_guaranteed_eq, (c a, c b) {
865 let val = crate::num::trans_ptr_binop(fx, BinOp::Eq, a, b);
866 ret.write_cvalue(fx, val);
869 ptr_guaranteed_ne, (c a, c b) {
870 let val = crate::num::trans_ptr_binop(fx, BinOp::Ne, a, b);
871 ret.write_cvalue(fx, val);
874 caller_location, () {
875 let caller_location = fx.get_caller_location(span);
876 ret.write_cvalue(fx, caller_location);
879 _ if intrinsic.starts_with("atomic_fence"), () {
880 crate::atomic_shim::lock_global_lock(fx);
881 crate::atomic_shim::unlock_global_lock(fx);
883 _ if intrinsic.starts_with("atomic_singlethreadfence"), () {
884 crate::atomic_shim::lock_global_lock(fx);
885 crate::atomic_shim::unlock_global_lock(fx);
887 _ if intrinsic.starts_with("atomic_load"), (c ptr) {
888 crate::atomic_shim::lock_global_lock(fx);
891 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
892 validate_atomic_type!(fx, intrinsic, span, inner_layout.ty);
893 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
894 ret.write_cvalue(fx, val);
896 crate::atomic_shim::unlock_global_lock(fx);
898 _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
899 validate_atomic_type!(fx, intrinsic, span, val.layout().ty);
901 crate::atomic_shim::lock_global_lock(fx);
903 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
904 dest.write_cvalue(fx, val);
906 crate::atomic_shim::unlock_global_lock(fx);
908 _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
909 validate_atomic_type!(fx, intrinsic, span, T);
911 crate::atomic_shim::lock_global_lock(fx);
914 let clif_ty = fx.clif_type(T).unwrap();
915 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
916 ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
919 let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
920 dest.write_cvalue(fx, src);
922 crate::atomic_shim::unlock_global_lock(fx);
924 _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
925 validate_atomic_type!(fx, intrinsic, span, T);
927 let test_old = test_old.load_scalar(fx);
928 let new = new.load_scalar(fx);
930 crate::atomic_shim::lock_global_lock(fx);
933 let clif_ty = fx.clif_type(T).unwrap();
934 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
937 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
938 let new = fx.bcx.ins().select(is_eq, new, old); // Keep old if not equal to test_old
941 fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
943 let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
944 ret.write_cvalue(fx, ret_val);
946 crate::atomic_shim::unlock_global_lock(fx);
949 _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, c amount) {
950 validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
951 let amount = amount.load_scalar(fx);
952 atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
954 _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, c amount) {
955 validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
956 let amount = amount.load_scalar(fx);
957 atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
959 _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, c src) {
960 validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
961 let src = src.load_scalar(fx);
962 atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
964 _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, c src) {
965 validate_atomic_type!(fx, intrinsic, span, T);
967 let src = src.load_scalar(fx);
969 crate::atomic_shim::lock_global_lock(fx);
971 let clif_ty = fx.clif_type(T).unwrap();
972 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
973 let and = fx.bcx.ins().band(old, src);
974 let new = fx.bcx.ins().bnot(and);
975 fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
976 ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
978 crate::atomic_shim::unlock_global_lock(fx);
980 _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, c src) {
981 validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
982 let src = src.load_scalar(fx);
983 atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
985 _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, c src) {
986 validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
987 let src = src.load_scalar(fx);
988 atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
991 _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, c src) {
992 validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
993 let src = src.load_scalar(fx);
994 atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
996 _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, c src) {
997 validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
998 let src = src.load_scalar(fx);
999 atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
1001 _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, c src) {
1002 validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
1003 let src = src.load_scalar(fx);
1004 atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
1006 _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, c src) {
1007 validate_atomic_type!(fx, intrinsic, span, ret.layout().ty);
1008 let src = src.load_scalar(fx);
1009 atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
1012 minnumf32, (v a, v b) {
1013 let val = fx.bcx.ins().fmin(a, b);
1014 let val = CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.f32));
1015 ret.write_cvalue(fx, val);
1017 minnumf64, (v a, v b) {
1018 let val = fx.bcx.ins().fmin(a, b);
1019 let val = CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.f64));
1020 ret.write_cvalue(fx, val);
1022 maxnumf32, (v a, v b) {
1023 let val = fx.bcx.ins().fmax(a, b);
1024 let val = CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.f32));
1025 ret.write_cvalue(fx, val);
1027 maxnumf64, (v a, v b) {
1028 let val = fx.bcx.ins().fmax(a, b);
1029 let val = CValue::by_val(val, fx.layout_of(fxcodegen_cx.tcx.types.f64));
1030 ret.write_cvalue(fx, val);
1033 try, (v f, v data, v _catch_fn) {
1034 // FIXME once unwinding is supported, change this to actually catch panics
1035 let f_sig = fx.bcx.func.import_signature(Signature {
1036 call_conv: CallConv::triple_default(fx.triple()),
1037 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1041 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1043 let ret_val = CValue::const_val(fx, ret.layout(), 0);
1044 ret.write_cvalue(fx, ret_val);
1047 fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
1048 let res = crate::num::trans_float_binop(fx, match intrinsic {
1049 "fadd_fast" => BinOp::Add,
1050 "fsub_fast" => BinOp::Sub,
1051 "fmul_fast" => BinOp::Mul,
1052 "fdiv_fast" => BinOp::Div,
1053 "frem_fast" => BinOp::Rem,
1054 _ => unreachable!(),
1056 ret.write_cvalue(fx, res);
1058 float_to_int_unchecked, (v f) {
1059 let res = crate::cast::clif_int_or_float_cast(
1063 fx.clif_type(ret.layout().ty).unwrap(),
1064 type_sign(ret.layout().ty),
1066 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1070 if let Some((_, dest)) = destination {
1071 let ret_block = fx.get_block(dest);
1072 fx.bcx.ins().jump(ret_block, &[]);
1074 trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");