1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
4 macro_rules! intrinsic_pat {
19 macro_rules! intrinsic_arg {
20 (o $fx:expr, $arg:ident) => {};
21 (c $fx:expr, $arg:ident) => {
22 let $arg = codegen_operand($fx, $arg);
24 (v $fx:expr, $arg:ident) => {
25 let $arg = codegen_operand($fx, $arg).load_scalar($fx);
29 macro_rules! intrinsic_match {
30 ($fx:expr, $intrinsic:expr, $args:expr,
33 $($($name:tt).*)|+ $(if $cond:expr)?, ($($a:ident $arg:ident),*) $content:block;
37 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
38 if let [$($arg),*] = $args {
39 $(intrinsic_arg!($a $fx, $arg);)*
42 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
55 pub(crate) use cpuid::codegen_cpuid_call;
56 pub(crate) use llvm::codegen_llvm_intrinsic_call;
58 use rustc_middle::ty::print::with_no_trimmed_paths;
59 use rustc_middle::ty::subst::SubstsRef;
60 use rustc_span::symbol::{kw, sym, Symbol};
61 use rustc_target::abi::InitKind;
63 use crate::prelude::*;
64 use cranelift_codegen::ir::AtomicRmwOp;
66 fn report_atomic_type_validation_error<'tcx>(
67 fx: &mut FunctionCx<'_, '_, 'tcx>,
75 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
79 // Prevent verifier error
80 crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
83 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
84 let (element, count) = match layout.abi {
85 Abi::Vector { element, count } => (element, count),
89 match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
90 // Cranelift currently only implements icmp for 128bit vectors.
91 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
96 fn simd_for_each_lane<'tcx>(
97 fx: &mut FunctionCx<'_, '_, 'tcx>,
100 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
102 let layout = val.layout();
104 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
105 let lane_layout = fx.layout_of(lane_ty);
106 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
107 let ret_lane_layout = fx.layout_of(ret_lane_ty);
108 assert_eq!(lane_count, ret_lane_count);
110 for lane_idx in 0..lane_count {
111 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
113 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
114 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
116 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
120 fn simd_pair_for_each_lane<'tcx>(
121 fx: &mut FunctionCx<'_, '_, 'tcx>,
125 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
127 assert_eq!(x.layout(), y.layout());
128 let layout = x.layout();
130 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
131 let lane_layout = fx.layout_of(lane_ty);
132 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
133 let ret_lane_layout = fx.layout_of(ret_lane_ty);
134 assert_eq!(lane_count, ret_lane_count);
136 for lane_idx in 0..lane_count {
137 let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
138 let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
140 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
141 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
143 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
147 fn simd_reduce<'tcx>(
148 fx: &mut FunctionCx<'_, '_, 'tcx>,
152 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
154 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
155 let lane_layout = fx.layout_of(lane_ty);
156 assert_eq!(lane_layout, ret.layout());
158 let (mut res_val, start_lane) =
159 if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
160 for lane_idx in start_lane..lane_count {
161 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
162 res_val = f(fx, lane_layout.ty, res_val, lane);
164 let res = CValue::by_val(res_val, lane_layout);
165 ret.write_cvalue(fx, res);
168 // FIXME move all uses to `simd_reduce`
169 fn simd_reduce_bool<'tcx>(
170 fx: &mut FunctionCx<'_, '_, 'tcx>,
173 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
175 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
176 assert!(ret.layout().ty.is_bool());
178 let res_val = val.value_lane(fx, 0).load_scalar(fx);
179 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
180 for lane_idx in 1..lane_count {
181 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
182 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
183 res_val = f(fx, res_val, lane);
185 let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
186 fx.bcx.ins().ireduce(types::I8, res_val)
190 let res = CValue::by_val(res_val, ret.layout());
191 ret.write_cvalue(fx, res);
194 fn bool_to_zero_or_max_uint<'tcx>(
195 fx: &mut FunctionCx<'_, '_, 'tcx>,
199 let ty = fx.clif_type(ty).unwrap();
201 let int_ty = match ty {
202 types::F32 => types::I32,
203 types::F64 => types::I64,
207 let val = fx.bcx.ins().bint(int_ty, val);
208 let mut res = fx.bcx.ins().ineg(val);
211 res = fx.bcx.ins().bitcast(ty, res);
217 pub(crate) fn codegen_intrinsic_call<'tcx>(
218 fx: &mut FunctionCx<'_, '_, 'tcx>,
219 instance: Instance<'tcx>,
220 args: &[mir::Operand<'tcx>],
221 destination: CPlace<'tcx>,
222 target: Option<BasicBlock>,
223 source_info: mir::SourceInfo,
225 let intrinsic = fx.tcx.item_name(instance.def_id());
226 let substs = instance.substs;
228 let target = if let Some(target) = target {
231 // Insert non returning intrinsics here
234 fx.bcx.ins().trap(TrapCode::User(0));
237 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
239 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
244 if intrinsic.as_str().starts_with("simd_") {
245 self::simd::codegen_simd_intrinsic_call(
253 let ret_block = fx.get_block(target);
254 fx.bcx.ins().jump(ret_block, &[]);
255 } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
256 let ret_block = fx.get_block(target);
257 fx.bcx.ins().jump(ret_block, &[]);
259 codegen_regular_intrinsic_call(
272 fn codegen_float_intrinsic_call<'tcx>(
273 fx: &mut FunctionCx<'_, '_, 'tcx>,
275 args: &[mir::Operand<'tcx>],
278 let (name, arg_count, ty) = match intrinsic {
279 sym::expf32 => ("expf", 1, fx.tcx.types.f32),
280 sym::expf64 => ("exp", 1, fx.tcx.types.f64),
281 sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
282 sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
283 sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
284 sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
285 sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
286 sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
287 sym::powf32 => ("powf", 2, fx.tcx.types.f32),
288 sym::powf64 => ("pow", 2, fx.tcx.types.f64),
289 sym::logf32 => ("logf", 1, fx.tcx.types.f32),
290 sym::logf64 => ("log", 1, fx.tcx.types.f64),
291 sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
292 sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
293 sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
294 sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
295 sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
296 sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
297 sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
298 sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
299 sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
300 sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
301 sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
302 sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
303 sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
304 sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
305 sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
306 sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
307 sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
308 sym::roundf64 => ("round", 1, fx.tcx.types.f64),
309 sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
310 sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
311 sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
312 sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
316 if args.len() != arg_count {
317 bug!("wrong number of args for intrinsic {:?}", intrinsic);
321 let args = match args {
323 a = [codegen_operand(fx, x)];
327 b = [codegen_operand(fx, x), codegen_operand(fx, y)];
331 c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
337 let res = fx.easy_call(name, &args, ty);
338 ret.write_cvalue(fx, res);
343 fn codegen_regular_intrinsic_call<'tcx>(
344 fx: &mut FunctionCx<'_, '_, 'tcx>,
345 instance: Instance<'tcx>,
347 substs: SubstsRef<'tcx>,
348 args: &[mir::Operand<'tcx>],
350 destination: Option<BasicBlock>,
351 source_info: mir::SourceInfo,
353 let usize_layout = fx.layout_of(fx.tcx.types.usize);
358 fx.tcx.sess.span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
362 likely | unlikely, (c a) {
363 ret.write_cvalue(fx, a);
366 fx.bcx.ins().debugtrap();
368 copy | copy_nonoverlapping, (v src, v dst, v count) {
369 let elem_ty = substs.type_at(0);
370 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
371 assert_eq!(args.len(), 3);
372 let byte_amount = if elem_size != 1 {
373 fx.bcx.ins().imul_imm(count, elem_size as i64)
378 if intrinsic == sym::copy_nonoverlapping {
379 // FIXME emit_small_memcpy
380 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
382 // FIXME emit_small_memmove
383 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
386 // NOTE: the volatile variants have src and dst swapped
387 volatile_copy_memory | volatile_copy_nonoverlapping_memory, (v dst, v src, v count) {
388 let elem_ty = substs.type_at(0);
389 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
390 assert_eq!(args.len(), 3);
391 let byte_amount = if elem_size != 1 {
392 fx.bcx.ins().imul_imm(count, elem_size as i64)
397 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
398 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
399 // FIXME emit_small_memcpy
400 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
402 // FIXME emit_small_memmove
403 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
406 size_of_val, (c ptr) {
407 let layout = fx.layout_of(substs.type_at(0));
408 let size = if layout.is_unsized() {
409 let (_ptr, info) = ptr.load_scalar_pair(fx);
410 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
416 .iconst(fx.pointer_type, layout.size.bytes() as i64)
418 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
420 min_align_of_val, (c ptr) {
421 let layout = fx.layout_of(substs.type_at(0));
422 let align = if layout.is_unsized() {
423 let (_ptr, info) = ptr.load_scalar_pair(fx);
424 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
430 .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
432 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
435 unchecked_add | unchecked_sub | unchecked_mul | unchecked_div | exact_div | unchecked_rem
436 | unchecked_shl | unchecked_shr, (c x, c y) {
437 // FIXME trap on overflow
438 let bin_op = match intrinsic {
439 sym::unchecked_add => BinOp::Add,
440 sym::unchecked_sub => BinOp::Sub,
441 sym::unchecked_mul => BinOp::Mul,
442 sym::unchecked_div | sym::exact_div => BinOp::Div,
443 sym::unchecked_rem => BinOp::Rem,
444 sym::unchecked_shl => BinOp::Shl,
445 sym::unchecked_shr => BinOp::Shr,
448 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
449 ret.write_cvalue(fx, res);
451 add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
452 assert_eq!(x.layout().ty, y.layout().ty);
453 let bin_op = match intrinsic {
454 sym::add_with_overflow => BinOp::Add,
455 sym::sub_with_overflow => BinOp::Sub,
456 sym::mul_with_overflow => BinOp::Mul,
460 let res = crate::num::codegen_checked_int_binop(
466 ret.write_cvalue(fx, res);
468 saturating_add | saturating_sub, (c lhs, c rhs) {
469 assert_eq!(lhs.layout().ty, rhs.layout().ty);
470 let bin_op = match intrinsic {
471 sym::saturating_add => BinOp::Add,
472 sym::saturating_sub => BinOp::Sub,
476 let signed = type_sign(lhs.layout().ty);
478 let checked_res = crate::num::codegen_checked_int_binop(
485 let (val, has_overflow) = checked_res.load_scalar_pair(fx);
486 let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
488 let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
490 let val = match (intrinsic, signed) {
491 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
492 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
493 (sym::saturating_add, true) => {
494 let rhs = rhs.load_scalar(fx);
495 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
496 let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
497 fx.bcx.ins().select(has_overflow, sat_val, val)
499 (sym::saturating_sub, true) => {
500 let rhs = rhs.load_scalar(fx);
501 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
502 let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
503 fx.bcx.ins().select(has_overflow, sat_val, val)
508 let res = CValue::by_val(val, lhs.layout());
510 ret.write_cvalue(fx, res);
512 rotate_left, (c x, v y) {
513 let layout = x.layout();
514 let x = x.load_scalar(fx);
515 let res = fx.bcx.ins().rotl(x, y);
516 ret.write_cvalue(fx, CValue::by_val(res, layout));
518 rotate_right, (c x, v y) {
519 let layout = x.layout();
520 let x = x.load_scalar(fx);
521 let res = fx.bcx.ins().rotr(x, y);
522 ret.write_cvalue(fx, CValue::by_val(res, layout));
525 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
526 // doesn't have UB both are codegen'ed the same way
527 offset | arith_offset, (c base, v offset) {
528 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
529 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
530 let ptr_diff = if pointee_size != 1 {
531 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
535 let base_val = base.load_scalar(fx);
536 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
537 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
540 transmute, (c from) {
541 ret.write_cvalue_transmute(fx, from);
543 write_bytes | volatile_set_memory, (c dst, v val, v count) {
544 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
545 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
546 let count = if pointee_size != 1 {
547 fx.bcx.ins().imul_imm(count, pointee_size as i64)
551 let dst_ptr = dst.load_scalar(fx);
552 // FIXME make the memset actually volatile when switching to emit_small_memset
553 // FIXME use emit_small_memset
554 fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
556 ctlz | ctlz_nonzero, (c arg) {
557 let val = arg.load_scalar(fx);
558 // FIXME trap on `ctlz_nonzero` with zero arg.
559 let res = fx.bcx.ins().clz(val);
560 let res = CValue::by_val(res, arg.layout());
561 ret.write_cvalue(fx, res);
563 cttz | cttz_nonzero, (c arg) {
564 let val = arg.load_scalar(fx);
565 // FIXME trap on `cttz_nonzero` with zero arg.
566 let res = fx.bcx.ins().ctz(val);
567 let res = CValue::by_val(res, arg.layout());
568 ret.write_cvalue(fx, res);
571 let val = arg.load_scalar(fx);
572 let res = fx.bcx.ins().popcnt(val);
573 let res = CValue::by_val(res, arg.layout());
574 ret.write_cvalue(fx, res);
576 bitreverse, (c arg) {
577 let val = arg.load_scalar(fx);
578 let res = fx.bcx.ins().bitrev(val);
579 let res = CValue::by_val(res, arg.layout());
580 ret.write_cvalue(fx, res);
583 // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
584 fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
585 match bcx.func.dfg.value_type(v) {
588 // https://code.woboq.org/gcc/include/bits/byteswap.h.html
590 let tmp1 = bcx.ins().ishl_imm(v, 8);
591 let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
593 let tmp2 = bcx.ins().ushr_imm(v, 8);
594 let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
596 bcx.ins().bor(n1, n2)
599 let tmp1 = bcx.ins().ishl_imm(v, 24);
600 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
602 let tmp2 = bcx.ins().ishl_imm(v, 8);
603 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
605 let tmp3 = bcx.ins().ushr_imm(v, 8);
606 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
608 let tmp4 = bcx.ins().ushr_imm(v, 24);
609 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
611 let or_tmp1 = bcx.ins().bor(n1, n2);
612 let or_tmp2 = bcx.ins().bor(n3, n4);
613 bcx.ins().bor(or_tmp1, or_tmp2)
616 let tmp1 = bcx.ins().ishl_imm(v, 56);
617 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
619 let tmp2 = bcx.ins().ishl_imm(v, 40);
620 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
622 let tmp3 = bcx.ins().ishl_imm(v, 24);
623 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
625 let tmp4 = bcx.ins().ishl_imm(v, 8);
626 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
628 let tmp5 = bcx.ins().ushr_imm(v, 8);
629 let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
631 let tmp6 = bcx.ins().ushr_imm(v, 24);
632 let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
634 let tmp7 = bcx.ins().ushr_imm(v, 40);
635 let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
637 let tmp8 = bcx.ins().ushr_imm(v, 56);
638 let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
640 let or_tmp1 = bcx.ins().bor(n1, n2);
641 let or_tmp2 = bcx.ins().bor(n3, n4);
642 let or_tmp3 = bcx.ins().bor(n5, n6);
643 let or_tmp4 = bcx.ins().bor(n7, n8);
645 let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
646 let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
647 bcx.ins().bor(or_tmp5, or_tmp6)
650 let (lo, hi) = bcx.ins().isplit(v);
651 let lo = swap(bcx, lo);
652 let hi = swap(bcx, hi);
653 bcx.ins().iconcat(hi, lo)
655 ty => unreachable!("bswap {}", ty),
658 let val = arg.load_scalar(fx);
659 let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
660 ret.write_cvalue(fx, res);
662 assert_inhabited | assert_zero_valid | assert_uninit_valid, () {
663 let layout = fx.layout_of(substs.type_at(0));
664 if layout.abi.is_uninhabited() {
665 with_no_trimmed_paths!({
666 crate::base::codegen_panic(
668 &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
675 if intrinsic == sym::assert_zero_valid
676 && !layout.might_permit_raw_init(
679 fx.tcx.sess.opts.debugging_opts.strict_init_checks) {
681 with_no_trimmed_paths!({
682 crate::base::codegen_panic(
684 &format!("attempted to zero-initialize type `{}`, which is invalid", layout.ty),
691 if intrinsic == sym::assert_uninit_valid
692 && !layout.might_permit_raw_init(
695 fx.tcx.sess.opts.debugging_opts.strict_init_checks) {
697 with_no_trimmed_paths!({
698 crate::base::codegen_panic(
700 &format!("attempted to leave type `{}` uninitialized, which is invalid", layout.ty),
708 volatile_load | unaligned_volatile_load, (c ptr) {
709 // Cranelift treats loads as volatile by default
710 // FIXME correctly handle unaligned_volatile_load
712 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
713 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
714 ret.write_cvalue(fx, val);
716 volatile_store | unaligned_volatile_store, (v ptr, c val) {
717 // Cranelift treats stores as volatile by default
718 // FIXME correctly handle unaligned_volatile_store
719 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
720 dest.write_cvalue(fx, val);
723 pref_align_of | needs_drop | type_id | type_name | variant_count, () {
725 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
726 let val = crate::constant::codegen_const_value(
731 ret.write_cvalue(fx, val);
734 ptr_offset_from | ptr_offset_from_unsigned, (v ptr, v base) {
735 let ty = substs.type_at(0);
737 let pointee_size: u64 = fx.layout_of(ty).size.bytes();
738 let diff_bytes = fx.bcx.ins().isub(ptr, base);
739 // FIXME this can be an exact division.
740 let val = if intrinsic == sym::ptr_offset_from_unsigned {
741 let usize_layout = fx.layout_of(fx.tcx.types.usize);
742 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
743 // but unsigned is slightly easier to codegen, so might as well.
744 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
746 let isize_layout = fx.layout_of(fx.tcx.types.isize);
747 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
749 ret.write_cvalue(fx, val);
752 ptr_guaranteed_eq, (c a, c b) {
753 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
754 ret.write_cvalue(fx, val);
757 ptr_guaranteed_ne, (c a, c b) {
758 let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
759 ret.write_cvalue(fx, val);
762 caller_location, () {
763 let caller_location = fx.get_caller_location(source_info);
764 ret.write_cvalue(fx, caller_location);
767 _ if intrinsic.as_str().starts_with("atomic_fence"), () {
768 fx.bcx.ins().fence();
770 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
771 // FIXME use a compiler fence once Cranelift supports it
772 fx.bcx.ins().fence();
774 _ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) {
775 let ty = substs.type_at(0);
777 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
778 // FIXME implement 128bit atomics
779 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
780 // special case for compiler-builtins to avoid having to patch it
781 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
782 let ret_block = fx.get_block(destination.unwrap());
783 fx.bcx.ins().jump(ret_block, &[]);
786 fx.tcx.sess.span_fatal(source_info.span, "128bit atomics not yet supported");
789 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
791 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
795 let clif_ty = fx.clif_type(ty).unwrap();
797 let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
799 let val = CValue::by_val(val, fx.layout_of(ty));
800 ret.write_cvalue(fx, val);
802 _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
803 let ty = substs.type_at(0);
805 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
806 // FIXME implement 128bit atomics
807 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
808 // special case for compiler-builtins to avoid having to patch it
809 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
810 let ret_block = fx.get_block(destination.unwrap());
811 fx.bcx.ins().jump(ret_block, &[]);
814 fx.tcx.sess.span_fatal(source_info.span, "128bit atomics not yet supported");
817 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
819 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
824 let val = val.load_scalar(fx);
826 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
828 _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
829 let layout = new.layout();
830 match layout.ty.kind() {
831 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
833 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
837 let ty = fx.clif_type(layout.ty).unwrap();
839 let new = new.load_scalar(fx);
841 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
843 let old = CValue::by_val(old, layout);
844 ret.write_cvalue(fx, old);
846 _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
847 let layout = new.layout();
848 match layout.ty.kind() {
849 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
851 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
856 let test_old = test_old.load_scalar(fx);
857 let new = new.load_scalar(fx);
859 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
860 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
862 let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
863 ret.write_cvalue(fx, ret_val)
866 _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
867 let layout = amount.layout();
868 match layout.ty.kind() {
869 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
871 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
875 let ty = fx.clif_type(layout.ty).unwrap();
877 let amount = amount.load_scalar(fx);
879 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
881 let old = CValue::by_val(old, layout);
882 ret.write_cvalue(fx, old);
884 _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
885 let layout = amount.layout();
886 match layout.ty.kind() {
887 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
889 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
893 let ty = fx.clif_type(layout.ty).unwrap();
895 let amount = amount.load_scalar(fx);
897 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
899 let old = CValue::by_val(old, layout);
900 ret.write_cvalue(fx, old);
902 _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
903 let layout = src.layout();
904 match layout.ty.kind() {
905 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
907 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
911 let ty = fx.clif_type(layout.ty).unwrap();
913 let src = src.load_scalar(fx);
915 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
917 let old = CValue::by_val(old, layout);
918 ret.write_cvalue(fx, old);
920 _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
921 let layout = src.layout();
922 match layout.ty.kind() {
923 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
925 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
929 let ty = fx.clif_type(layout.ty).unwrap();
931 let src = src.load_scalar(fx);
933 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
935 let old = CValue::by_val(old, layout);
936 ret.write_cvalue(fx, old);
938 _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
939 let layout = src.layout();
940 match layout.ty.kind() {
941 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
943 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
947 let ty = fx.clif_type(layout.ty).unwrap();
949 let src = src.load_scalar(fx);
951 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
953 let old = CValue::by_val(old, layout);
954 ret.write_cvalue(fx, old);
956 _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
957 let layout = src.layout();
958 match layout.ty.kind() {
959 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
961 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
965 let ty = fx.clif_type(layout.ty).unwrap();
967 let src = src.load_scalar(fx);
969 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
971 let old = CValue::by_val(old, layout);
972 ret.write_cvalue(fx, old);
974 _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
975 let layout = src.layout();
976 match layout.ty.kind() {
977 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
979 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
983 let ty = fx.clif_type(layout.ty).unwrap();
985 let src = src.load_scalar(fx);
987 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
989 let old = CValue::by_val(old, layout);
990 ret.write_cvalue(fx, old);
992 _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
993 let layout = src.layout();
994 match layout.ty.kind() {
995 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
997 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1001 let ty = fx.clif_type(layout.ty).unwrap();
1003 let src = src.load_scalar(fx);
1005 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1007 let old = CValue::by_val(old, layout);
1008 ret.write_cvalue(fx, old);
1010 _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
1011 let layout = src.layout();
1012 match layout.ty.kind() {
1013 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1015 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1019 let ty = fx.clif_type(layout.ty).unwrap();
1021 let src = src.load_scalar(fx);
1023 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1025 let old = CValue::by_val(old, layout);
1026 ret.write_cvalue(fx, old);
1028 _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
1029 let layout = src.layout();
1030 match layout.ty.kind() {
1031 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1033 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1037 let ty = fx.clif_type(layout.ty).unwrap();
1039 let src = src.load_scalar(fx);
1041 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1043 let old = CValue::by_val(old, layout);
1044 ret.write_cvalue(fx, old);
1047 minnumf32, (v a, v b) {
1048 let val = crate::num::codegen_float_min(fx, a, b);
1049 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1050 ret.write_cvalue(fx, val);
1052 minnumf64, (v a, v b) {
1053 let val = crate::num::codegen_float_min(fx, a, b);
1054 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1055 ret.write_cvalue(fx, val);
1057 maxnumf32, (v a, v b) {
1058 let val = crate::num::codegen_float_max(fx, a, b);
1059 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1060 ret.write_cvalue(fx, val);
1062 maxnumf64, (v a, v b) {
1063 let val = crate::num::codegen_float_max(fx, a, b);
1064 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1065 ret.write_cvalue(fx, val);
1068 kw.Try, (v f, v data, v _catch_fn) {
1069 // FIXME once unwinding is supported, change this to actually catch panics
1070 let f_sig = fx.bcx.func.import_signature(Signature {
1071 call_conv: fx.target_config.default_call_conv,
1072 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1076 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1078 let layout = ret.layout();
1079 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1080 ret.write_cvalue(fx, ret_val);
1083 fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
1084 let res = crate::num::codegen_float_binop(fx, match intrinsic {
1085 sym::fadd_fast => BinOp::Add,
1086 sym::fsub_fast => BinOp::Sub,
1087 sym::fmul_fast => BinOp::Mul,
1088 sym::fdiv_fast => BinOp::Div,
1089 sym::frem_fast => BinOp::Rem,
1090 _ => unreachable!(),
1092 ret.write_cvalue(fx, res);
1094 float_to_int_unchecked, (v f) {
1095 let res = crate::cast::clif_int_or_float_cast(
1099 fx.clif_type(ret.layout().ty).unwrap(),
1100 type_sign(ret.layout().ty),
1102 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1105 raw_eq, (v lhs_ref, v rhs_ref) {
1106 let size = fx.layout_of(substs.type_at(0)).layout.size();
1107 // FIXME add and use emit_small_memcmp
1109 if size == Size::ZERO {
1110 // No bytes means they're trivially equal
1111 fx.bcx.ins().iconst(types::I8, 1)
1112 } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1113 // Can't use `trusted` for these loads; they could be unaligned.
1114 let mut flags = MemFlags::new();
1116 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1117 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1118 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1119 fx.bcx.ins().bint(types::I8, eq)
1121 // Just call `memcmp` (like slices do in core) when the
1122 // size is too large or it's not a power-of-two.
1123 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1124 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1125 let params = vec![AbiParam::new(fx.pointer_type); 3];
1126 let returns = vec![AbiParam::new(types::I32)];
1127 let args = &[lhs_ref, rhs_ref, bytes_val];
1128 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1129 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1130 fx.bcx.ins().bint(types::I8, eq)
1132 ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1135 const_allocate, (c _size, c _align) {
1136 // returns a null pointer at runtime.
1137 let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1138 ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1141 const_deallocate, (c _ptr, c _size, c _align) {
1146 // FIXME implement black_box semantics
1147 ret.write_cvalue(fx, a);
1151 let ret_block = fx.get_block(destination.unwrap());
1152 fx.bcx.ins().jump(ret_block, &[]);