1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
8 pub(crate) use cpuid::codegen_cpuid_call;
9 pub(crate) use llvm::codegen_llvm_intrinsic_call;
11 use rustc_middle::ty::print::with_no_trimmed_paths;
12 use rustc_middle::ty::subst::SubstsRef;
13 use rustc_span::symbol::{kw, sym, Symbol};
15 use crate::prelude::*;
16 use cranelift_codegen::ir::AtomicRmwOp;
34 (o $fx:expr, $arg:ident) => {},
35 (c $fx:expr, $arg:ident) => {
36 let $arg = codegen_operand($fx, $arg);
38 (v $fx:expr, $arg:ident) => {
39 let $arg = codegen_operand($fx, $arg).load_scalar($fx);
43 macro intrinsic_match {
44 ($fx:expr, $intrinsic:expr, $args:expr,
47 $($($name:tt).*)|+ $(if $cond:expr)?, ($($a:ident $arg:ident),*) $content:block;
51 $(intrinsic_pat!($($name).*))|* $(if $cond)? => {
52 if let [$($arg),*] = $args {
53 $(intrinsic_arg!($a $fx, $arg);)*
56 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
65 fn report_atomic_type_validation_error<'tcx>(
66 fx: &mut FunctionCx<'_, '_, 'tcx>,
74 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
78 // Prevent verifier error
79 crate::trap::trap_unreachable(fx, "compilation should not have succeeded");
82 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
83 let (element, count) = match layout.abi {
84 Abi::Vector { element, count } => (element, count),
88 match scalar_to_clif_type(tcx, element).by(u16::try_from(count).unwrap()) {
89 // Cranelift currently only implements icmp for 128bit vectors.
90 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
95 fn simd_for_each_lane<'tcx>(
96 fx: &mut FunctionCx<'_, '_, 'tcx>,
99 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
101 let layout = val.layout();
103 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
104 let lane_layout = fx.layout_of(lane_ty);
105 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
106 let ret_lane_layout = fx.layout_of(ret_lane_ty);
107 assert_eq!(lane_count, ret_lane_count);
109 for lane_idx in 0..lane_count {
110 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
112 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
113 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
115 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
119 fn simd_pair_for_each_lane<'tcx>(
120 fx: &mut FunctionCx<'_, '_, 'tcx>,
124 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
126 assert_eq!(x.layout(), y.layout());
127 let layout = x.layout();
129 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
130 let lane_layout = fx.layout_of(lane_ty);
131 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
132 let ret_lane_layout = fx.layout_of(ret_lane_ty);
133 assert_eq!(lane_count, ret_lane_count);
135 for lane_idx in 0..lane_count {
136 let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
137 let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
139 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
140 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
142 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
146 fn simd_reduce<'tcx>(
147 fx: &mut FunctionCx<'_, '_, 'tcx>,
151 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
153 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
154 let lane_layout = fx.layout_of(lane_ty);
155 assert_eq!(lane_layout, ret.layout());
157 let (mut res_val, start_lane) =
158 if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
159 for lane_idx in start_lane..lane_count {
160 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
161 res_val = f(fx, lane_layout.ty, res_val, lane);
163 let res = CValue::by_val(res_val, lane_layout);
164 ret.write_cvalue(fx, res);
167 // FIXME move all uses to `simd_reduce`
168 fn simd_reduce_bool<'tcx>(
169 fx: &mut FunctionCx<'_, '_, 'tcx>,
172 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
174 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
175 assert!(ret.layout().ty.is_bool());
177 let res_val = val.value_lane(fx, 0).load_scalar(fx);
178 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
179 for lane_idx in 1..lane_count {
180 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
181 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
182 res_val = f(fx, res_val, lane);
184 let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
185 fx.bcx.ins().ireduce(types::I8, res_val)
189 let res = CValue::by_val(res_val, ret.layout());
190 ret.write_cvalue(fx, res);
193 fn bool_to_zero_or_max_uint<'tcx>(
194 fx: &mut FunctionCx<'_, '_, 'tcx>,
198 let ty = fx.clif_type(ty).unwrap();
200 let int_ty = match ty {
201 types::F32 => types::I32,
202 types::F64 => types::I64,
206 let val = fx.bcx.ins().bint(int_ty, val);
207 let mut res = fx.bcx.ins().ineg(val);
210 res = fx.bcx.ins().bitcast(ty, res);
216 pub(crate) fn codegen_intrinsic_call<'tcx>(
217 fx: &mut FunctionCx<'_, '_, 'tcx>,
218 instance: Instance<'tcx>,
219 args: &[mir::Operand<'tcx>],
220 destination: Option<(CPlace<'tcx>, BasicBlock)>,
223 let intrinsic = fx.tcx.item_name(instance.def_id());
224 let substs = instance.substs;
226 let ret = match destination {
227 Some((place, _)) => place,
229 // Insert non returning intrinsics here
232 trap_abort(fx, "Called intrinsic::abort.");
235 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", span);
237 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
243 if intrinsic.as_str().starts_with("simd_") {
244 self::simd::codegen_simd_intrinsic_call(fx, intrinsic, substs, args, ret, span);
245 let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
246 fx.bcx.ins().jump(ret_block, &[]);
247 } else if codegen_float_intrinsic_call(fx, intrinsic, args, ret) {
248 let ret_block = fx.get_block(destination.expect("Float intrinsics don't diverge").1);
249 fx.bcx.ins().jump(ret_block, &[]);
251 codegen_regular_intrinsic_call(
264 fn codegen_float_intrinsic_call<'tcx>(
265 fx: &mut FunctionCx<'_, '_, 'tcx>,
267 args: &[mir::Operand<'tcx>],
270 let (name, arg_count, ty) = match intrinsic {
271 sym::expf32 => ("expf", 1, fx.tcx.types.f32),
272 sym::expf64 => ("exp", 1, fx.tcx.types.f64),
273 sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
274 sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
275 sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
276 sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
277 sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
278 sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
279 sym::powf32 => ("powf", 2, fx.tcx.types.f32),
280 sym::powf64 => ("pow", 2, fx.tcx.types.f64),
281 sym::logf32 => ("logf", 1, fx.tcx.types.f32),
282 sym::logf64 => ("log", 1, fx.tcx.types.f64),
283 sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
284 sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
285 sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
286 sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
287 sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
288 sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
289 sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
290 sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
291 sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
292 sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
293 sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
294 sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
295 sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
296 sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
297 sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
298 sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
299 sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
300 sym::roundf64 => ("round", 1, fx.tcx.types.f64),
301 sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
302 sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
303 sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
304 sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
308 if args.len() != arg_count {
309 bug!("wrong number of args for intrinsic {:?}", intrinsic);
313 let args = match args {
315 a = [codegen_operand(fx, x)];
319 b = [codegen_operand(fx, x), codegen_operand(fx, y)];
323 c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
329 let res = fx.easy_call(name, &args, ty);
330 ret.write_cvalue(fx, res);
335 fn codegen_regular_intrinsic_call<'tcx>(
336 fx: &mut FunctionCx<'_, '_, 'tcx>,
337 instance: Instance<'tcx>,
339 substs: SubstsRef<'tcx>,
340 args: &[mir::Operand<'tcx>],
343 destination: Option<(CPlace<'tcx>, BasicBlock)>,
345 let usize_layout = fx.layout_of(fx.tcx.types.usize);
350 fx.tcx.sess.span_fatal(span, &format!("unsupported intrinsic {}", intrinsic));
354 likely | unlikely, (c a) {
355 ret.write_cvalue(fx, a);
358 fx.bcx.ins().debugtrap();
360 copy | copy_nonoverlapping, (v src, v dst, v count) {
361 let elem_ty = substs.type_at(0);
362 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
363 assert_eq!(args.len(), 3);
364 let byte_amount = if elem_size != 1 {
365 fx.bcx.ins().imul_imm(count, elem_size as i64)
370 if intrinsic == sym::copy_nonoverlapping {
371 // FIXME emit_small_memcpy
372 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
374 // FIXME emit_small_memmove
375 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
378 // NOTE: the volatile variants have src and dst swapped
379 volatile_copy_memory | volatile_copy_nonoverlapping_memory, (v dst, v src, v count) {
380 let elem_ty = substs.type_at(0);
381 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
382 assert_eq!(args.len(), 3);
383 let byte_amount = if elem_size != 1 {
384 fx.bcx.ins().imul_imm(count, elem_size as i64)
389 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
390 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
391 // FIXME emit_small_memcpy
392 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
394 // FIXME emit_small_memmove
395 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
398 size_of_val, (c ptr) {
399 let layout = fx.layout_of(substs.type_at(0));
400 let size = if layout.is_unsized() {
401 let (_ptr, info) = ptr.load_scalar_pair(fx);
402 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
408 .iconst(fx.pointer_type, layout.size.bytes() as i64)
410 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
412 min_align_of_val, (c ptr) {
413 let layout = fx.layout_of(substs.type_at(0));
414 let align = if layout.is_unsized() {
415 let (_ptr, info) = ptr.load_scalar_pair(fx);
416 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
422 .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
424 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
427 unchecked_add | unchecked_sub | unchecked_mul | unchecked_div | exact_div | unchecked_rem
428 | unchecked_shl | unchecked_shr, (c x, c y) {
429 // FIXME trap on overflow
430 let bin_op = match intrinsic {
431 sym::unchecked_add => BinOp::Add,
432 sym::unchecked_sub => BinOp::Sub,
433 sym::unchecked_mul => BinOp::Mul,
434 sym::unchecked_div | sym::exact_div => BinOp::Div,
435 sym::unchecked_rem => BinOp::Rem,
436 sym::unchecked_shl => BinOp::Shl,
437 sym::unchecked_shr => BinOp::Shr,
440 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
441 ret.write_cvalue(fx, res);
443 add_with_overflow | sub_with_overflow | mul_with_overflow, (c x, c y) {
444 assert_eq!(x.layout().ty, y.layout().ty);
445 let bin_op = match intrinsic {
446 sym::add_with_overflow => BinOp::Add,
447 sym::sub_with_overflow => BinOp::Sub,
448 sym::mul_with_overflow => BinOp::Mul,
452 let res = crate::num::codegen_checked_int_binop(
458 ret.write_cvalue(fx, res);
460 saturating_add | saturating_sub, (c lhs, c rhs) {
461 assert_eq!(lhs.layout().ty, rhs.layout().ty);
462 let bin_op = match intrinsic {
463 sym::saturating_add => BinOp::Add,
464 sym::saturating_sub => BinOp::Sub,
468 let signed = type_sign(lhs.layout().ty);
470 let checked_res = crate::num::codegen_checked_int_binop(
477 let (val, has_overflow) = checked_res.load_scalar_pair(fx);
478 let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
480 let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
482 let val = match (intrinsic, signed) {
483 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
484 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
485 (sym::saturating_add, true) => {
486 let rhs = rhs.load_scalar(fx);
487 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
488 let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
489 fx.bcx.ins().select(has_overflow, sat_val, val)
491 (sym::saturating_sub, true) => {
492 let rhs = rhs.load_scalar(fx);
493 let rhs_ge_zero = fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
494 let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
495 fx.bcx.ins().select(has_overflow, sat_val, val)
500 let res = CValue::by_val(val, lhs.layout());
502 ret.write_cvalue(fx, res);
504 rotate_left, (c x, v y) {
505 let layout = x.layout();
506 let x = x.load_scalar(fx);
507 let res = fx.bcx.ins().rotl(x, y);
508 ret.write_cvalue(fx, CValue::by_val(res, layout));
510 rotate_right, (c x, v y) {
511 let layout = x.layout();
512 let x = x.load_scalar(fx);
513 let res = fx.bcx.ins().rotr(x, y);
514 ret.write_cvalue(fx, CValue::by_val(res, layout));
517 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
518 // doesn't have UB both are codegen'ed the same way
519 offset | arith_offset, (c base, v offset) {
520 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
521 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
522 let ptr_diff = if pointee_size != 1 {
523 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
527 let base_val = base.load_scalar(fx);
528 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
529 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
532 transmute, (c from) {
533 ret.write_cvalue_transmute(fx, from);
535 write_bytes | volatile_set_memory, (c dst, v val, v count) {
536 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
537 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
538 let count = if pointee_size != 1 {
539 fx.bcx.ins().imul_imm(count, pointee_size as i64)
543 let dst_ptr = dst.load_scalar(fx);
544 // FIXME make the memset actually volatile when switching to emit_small_memset
545 // FIXME use emit_small_memset
546 fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
548 ctlz | ctlz_nonzero, (c arg) {
549 let val = arg.load_scalar(fx);
550 // FIXME trap on `ctlz_nonzero` with zero arg.
551 let res = fx.bcx.ins().clz(val);
552 let res = CValue::by_val(res, arg.layout());
553 ret.write_cvalue(fx, res);
555 cttz | cttz_nonzero, (c arg) {
556 let val = arg.load_scalar(fx);
557 // FIXME trap on `cttz_nonzero` with zero arg.
558 let res = fx.bcx.ins().ctz(val);
559 let res = CValue::by_val(res, arg.layout());
560 ret.write_cvalue(fx, res);
563 let val = arg.load_scalar(fx);
564 let res = fx.bcx.ins().popcnt(val);
565 let res = CValue::by_val(res, arg.layout());
566 ret.write_cvalue(fx, res);
568 bitreverse, (c arg) {
569 let val = arg.load_scalar(fx);
570 let res = fx.bcx.ins().bitrev(val);
571 let res = CValue::by_val(res, arg.layout());
572 ret.write_cvalue(fx, res);
575 // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
576 fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
577 match bcx.func.dfg.value_type(v) {
580 // https://code.woboq.org/gcc/include/bits/byteswap.h.html
582 let tmp1 = bcx.ins().ishl_imm(v, 8);
583 let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
585 let tmp2 = bcx.ins().ushr_imm(v, 8);
586 let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
588 bcx.ins().bor(n1, n2)
591 let tmp1 = bcx.ins().ishl_imm(v, 24);
592 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
594 let tmp2 = bcx.ins().ishl_imm(v, 8);
595 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
597 let tmp3 = bcx.ins().ushr_imm(v, 8);
598 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
600 let tmp4 = bcx.ins().ushr_imm(v, 24);
601 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
603 let or_tmp1 = bcx.ins().bor(n1, n2);
604 let or_tmp2 = bcx.ins().bor(n3, n4);
605 bcx.ins().bor(or_tmp1, or_tmp2)
608 let tmp1 = bcx.ins().ishl_imm(v, 56);
609 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
611 let tmp2 = bcx.ins().ishl_imm(v, 40);
612 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
614 let tmp3 = bcx.ins().ishl_imm(v, 24);
615 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
617 let tmp4 = bcx.ins().ishl_imm(v, 8);
618 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
620 let tmp5 = bcx.ins().ushr_imm(v, 8);
621 let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
623 let tmp6 = bcx.ins().ushr_imm(v, 24);
624 let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
626 let tmp7 = bcx.ins().ushr_imm(v, 40);
627 let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
629 let tmp8 = bcx.ins().ushr_imm(v, 56);
630 let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
632 let or_tmp1 = bcx.ins().bor(n1, n2);
633 let or_tmp2 = bcx.ins().bor(n3, n4);
634 let or_tmp3 = bcx.ins().bor(n5, n6);
635 let or_tmp4 = bcx.ins().bor(n7, n8);
637 let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
638 let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
639 bcx.ins().bor(or_tmp5, or_tmp6)
642 let (lo, hi) = bcx.ins().isplit(v);
643 let lo = swap(bcx, lo);
644 let hi = swap(bcx, hi);
645 bcx.ins().iconcat(hi, lo)
647 ty => unreachable!("bswap {}", ty),
650 let val = arg.load_scalar(fx);
651 let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
652 ret.write_cvalue(fx, res);
654 assert_inhabited | assert_zero_valid | assert_uninit_valid, () {
655 let layout = fx.layout_of(substs.type_at(0));
656 if layout.abi.is_uninhabited() {
657 with_no_trimmed_paths(|| crate::base::codegen_panic(
659 &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
665 if intrinsic == sym::assert_zero_valid && !layout.might_permit_raw_init(fx, /*zero:*/ true) {
666 with_no_trimmed_paths(|| crate::base::codegen_panic(
668 &format!("attempted to zero-initialize type `{}`, which is invalid", layout.ty),
674 if intrinsic == sym::assert_uninit_valid && !layout.might_permit_raw_init(fx, /*zero:*/ false) {
675 with_no_trimmed_paths(|| crate::base::codegen_panic(
677 &format!("attempted to leave type `{}` uninitialized, which is invalid", layout.ty),
684 volatile_load | unaligned_volatile_load, (c ptr) {
685 // Cranelift treats loads as volatile by default
686 // FIXME correctly handle unaligned_volatile_load
688 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
689 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
690 ret.write_cvalue(fx, val);
692 volatile_store | unaligned_volatile_store, (v ptr, c val) {
693 // Cranelift treats stores as volatile by default
694 // FIXME correctly handle unaligned_volatile_store
695 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
696 dest.write_cvalue(fx, val);
699 pref_align_of | needs_drop | type_id | type_name | variant_count, () {
701 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
702 let val = crate::constant::codegen_const_value(
707 ret.write_cvalue(fx, val);
710 ptr_offset_from, (v ptr, v base) {
711 let ty = substs.type_at(0);
712 let isize_layout = fx.layout_of(fx.tcx.types.isize);
714 let pointee_size: u64 = fx.layout_of(ty).size.bytes();
715 let diff = fx.bcx.ins().isub(ptr, base);
716 // FIXME this can be an exact division.
717 let val = CValue::by_val(fx.bcx.ins().sdiv_imm(diff, pointee_size as i64), isize_layout);
718 ret.write_cvalue(fx, val);
721 ptr_guaranteed_eq, (c a, c b) {
722 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
723 ret.write_cvalue(fx, val);
726 ptr_guaranteed_ne, (c a, c b) {
727 let val = crate::num::codegen_ptr_binop(fx, BinOp::Ne, a, b);
728 ret.write_cvalue(fx, val);
731 caller_location, () {
732 let caller_location = fx.get_caller_location(span);
733 ret.write_cvalue(fx, caller_location);
736 _ if intrinsic.as_str().starts_with("atomic_fence"), () {
737 fx.bcx.ins().fence();
739 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence"), () {
740 // FIXME use a compiler fence once Cranelift supports it
741 fx.bcx.ins().fence();
743 _ if intrinsic.as_str().starts_with("atomic_load"), (v ptr) {
744 let ty = substs.type_at(0);
746 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
748 report_atomic_type_validation_error(fx, intrinsic, span, ty);
752 let clif_ty = fx.clif_type(ty).unwrap();
754 let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
756 let val = CValue::by_val(val, fx.layout_of(ty));
757 ret.write_cvalue(fx, val);
759 _ if intrinsic.as_str().starts_with("atomic_store"), (v ptr, c val) {
760 let ty = substs.type_at(0);
762 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
764 report_atomic_type_validation_error(fx, intrinsic, span, ty);
769 let val = val.load_scalar(fx);
771 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
773 _ if intrinsic.as_str().starts_with("atomic_xchg"), (v ptr, c new) {
774 let layout = new.layout();
775 match layout.ty.kind() {
776 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
778 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
782 let ty = fx.clif_type(layout.ty).unwrap();
784 let new = new.load_scalar(fx);
786 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
788 let old = CValue::by_val(old, layout);
789 ret.write_cvalue(fx, old);
791 _ if intrinsic.as_str().starts_with("atomic_cxchg"), (v ptr, c test_old, c new) { // both atomic_cxchg_* and atomic_cxchgweak_*
792 let layout = new.layout();
793 match layout.ty.kind() {
794 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
796 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
801 let test_old = test_old.load_scalar(fx);
802 let new = new.load_scalar(fx);
804 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
805 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
807 let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
808 ret.write_cvalue(fx, ret_val)
811 _ if intrinsic.as_str().starts_with("atomic_xadd"), (v ptr, c amount) {
812 let layout = amount.layout();
813 match layout.ty.kind() {
814 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
816 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
820 let ty = fx.clif_type(layout.ty).unwrap();
822 let amount = amount.load_scalar(fx);
824 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
826 let old = CValue::by_val(old, layout);
827 ret.write_cvalue(fx, old);
829 _ if intrinsic.as_str().starts_with("atomic_xsub"), (v ptr, c amount) {
830 let layout = amount.layout();
831 match layout.ty.kind() {
832 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
834 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
838 let ty = fx.clif_type(layout.ty).unwrap();
840 let amount = amount.load_scalar(fx);
842 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
844 let old = CValue::by_val(old, layout);
845 ret.write_cvalue(fx, old);
847 _ if intrinsic.as_str().starts_with("atomic_and"), (v ptr, c src) {
848 let layout = src.layout();
849 match layout.ty.kind() {
850 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
852 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
856 let ty = fx.clif_type(layout.ty).unwrap();
858 let src = src.load_scalar(fx);
860 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
862 let old = CValue::by_val(old, layout);
863 ret.write_cvalue(fx, old);
865 _ if intrinsic.as_str().starts_with("atomic_or"), (v ptr, c src) {
866 let layout = src.layout();
867 match layout.ty.kind() {
868 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
870 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
874 let ty = fx.clif_type(layout.ty).unwrap();
876 let src = src.load_scalar(fx);
878 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
880 let old = CValue::by_val(old, layout);
881 ret.write_cvalue(fx, old);
883 _ if intrinsic.as_str().starts_with("atomic_xor"), (v ptr, c src) {
884 let layout = src.layout();
885 match layout.ty.kind() {
886 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
888 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
892 let ty = fx.clif_type(layout.ty).unwrap();
894 let src = src.load_scalar(fx);
896 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
898 let old = CValue::by_val(old, layout);
899 ret.write_cvalue(fx, old);
901 _ if intrinsic.as_str().starts_with("atomic_nand"), (v ptr, c src) {
902 let layout = src.layout();
903 match layout.ty.kind() {
904 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
906 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
910 let ty = fx.clif_type(layout.ty).unwrap();
912 let src = src.load_scalar(fx);
914 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
916 let old = CValue::by_val(old, layout);
917 ret.write_cvalue(fx, old);
919 _ if intrinsic.as_str().starts_with("atomic_max"), (v ptr, c src) {
920 let layout = src.layout();
921 match layout.ty.kind() {
922 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
924 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
928 let ty = fx.clif_type(layout.ty).unwrap();
930 let src = src.load_scalar(fx);
932 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
934 let old = CValue::by_val(old, layout);
935 ret.write_cvalue(fx, old);
937 _ if intrinsic.as_str().starts_with("atomic_umax"), (v ptr, c src) {
938 let layout = src.layout();
939 match layout.ty.kind() {
940 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
942 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
946 let ty = fx.clif_type(layout.ty).unwrap();
948 let src = src.load_scalar(fx);
950 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
952 let old = CValue::by_val(old, layout);
953 ret.write_cvalue(fx, old);
955 _ if intrinsic.as_str().starts_with("atomic_min"), (v ptr, c src) {
956 let layout = src.layout();
957 match layout.ty.kind() {
958 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
960 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
964 let ty = fx.clif_type(layout.ty).unwrap();
966 let src = src.load_scalar(fx);
968 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
970 let old = CValue::by_val(old, layout);
971 ret.write_cvalue(fx, old);
973 _ if intrinsic.as_str().starts_with("atomic_umin"), (v ptr, c src) {
974 let layout = src.layout();
975 match layout.ty.kind() {
976 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
978 report_atomic_type_validation_error(fx, intrinsic, span, layout.ty);
982 let ty = fx.clif_type(layout.ty).unwrap();
984 let src = src.load_scalar(fx);
986 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
988 let old = CValue::by_val(old, layout);
989 ret.write_cvalue(fx, old);
992 // In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
993 // For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
994 // and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
995 // a float against itself. Only in case of NaN is it not equal to itself.
996 minnumf32, (v a, v b) {
997 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
998 let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
999 let temp = fx.bcx.ins().select(a_ge_b, b, a);
1000 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1001 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1002 ret.write_cvalue(fx, val);
1004 minnumf64, (v a, v b) {
1005 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1006 let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
1007 let temp = fx.bcx.ins().select(a_ge_b, b, a);
1008 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1009 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1010 ret.write_cvalue(fx, val);
1012 maxnumf32, (v a, v b) {
1013 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1014 let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
1015 let temp = fx.bcx.ins().select(a_le_b, b, a);
1016 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1017 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1018 ret.write_cvalue(fx, val);
1020 maxnumf64, (v a, v b) {
1021 let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
1022 let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
1023 let temp = fx.bcx.ins().select(a_le_b, b, a);
1024 let val = fx.bcx.ins().select(a_is_nan, b, temp);
1025 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1026 ret.write_cvalue(fx, val);
1029 kw.Try, (v f, v data, v _catch_fn) {
1030 // FIXME once unwinding is supported, change this to actually catch panics
1031 let f_sig = fx.bcx.func.import_signature(Signature {
1032 call_conv: fx.target_config.default_call_conv,
1033 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1037 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1039 let layout = ret.layout();
1040 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1041 ret.write_cvalue(fx, ret_val);
1044 fadd_fast | fsub_fast | fmul_fast | fdiv_fast | frem_fast, (c x, c y) {
1045 let res = crate::num::codegen_float_binop(fx, match intrinsic {
1046 sym::fadd_fast => BinOp::Add,
1047 sym::fsub_fast => BinOp::Sub,
1048 sym::fmul_fast => BinOp::Mul,
1049 sym::fdiv_fast => BinOp::Div,
1050 sym::frem_fast => BinOp::Rem,
1051 _ => unreachable!(),
1053 ret.write_cvalue(fx, res);
1055 float_to_int_unchecked, (v f) {
1056 let res = crate::cast::clif_int_or_float_cast(
1060 fx.clif_type(ret.layout().ty).unwrap(),
1061 type_sign(ret.layout().ty),
1063 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1066 raw_eq, (v lhs_ref, v rhs_ref) {
1067 let size = fx.layout_of(substs.type_at(0)).layout.size;
1068 // FIXME add and use emit_small_memcmp
1070 if size == Size::ZERO {
1071 // No bytes means they're trivially equal
1072 fx.bcx.ins().iconst(types::I8, 1)
1073 } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1074 // Can't use `trusted` for these loads; they could be unaligned.
1075 let mut flags = MemFlags::new();
1077 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1078 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1079 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1080 fx.bcx.ins().bint(types::I8, eq)
1082 // Just call `memcmp` (like slices do in core) when the
1083 // size is too large or it's not a power-of-two.
1084 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1085 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1086 let params = vec![AbiParam::new(fx.pointer_type); 3];
1087 let returns = vec![AbiParam::new(types::I32)];
1088 let args = &[lhs_ref, rhs_ref, bytes_val];
1089 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1090 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1091 fx.bcx.ins().bint(types::I8, eq)
1093 ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1096 const_allocate, (c _size, c _align) {
1097 // returns a null pointer at runtime.
1098 let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1099 ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1102 const_deallocate, (c _ptr, c _size, c _align) {
1107 // FIXME implement black_box semantics
1108 ret.write_cvalue(fx, a);
1112 if let Some((_, dest)) = destination {
1113 let ret_block = fx.get_block(dest);
1114 fx.bcx.ins().jump(ret_block, &[]);
1116 trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");