1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
4 macro_rules! intrinsic_args {
5 ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6 #[allow(unused_parens)]
7 let ($($arg),*) = if let [$($arg),*] = $args {
8 ($(codegen_operand($fx, $arg)),*)
10 $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
21 pub(crate) use cpuid::codegen_cpuid_call;
22 pub(crate) use llvm::codegen_llvm_intrinsic_call;
24 use rustc_middle::ty::layout::HasParamEnv;
25 use rustc_middle::ty::print::with_no_trimmed_paths;
26 use rustc_middle::ty::subst::SubstsRef;
27 use rustc_span::symbol::{kw, sym, Symbol};
29 use crate::prelude::*;
30 use cranelift_codegen::ir::AtomicRmwOp;
32 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
33 bug!("wrong number of args for intrinsic {}", intrinsic);
36 fn report_atomic_type_validation_error<'tcx>(
37 fx: &mut FunctionCx<'_, '_, 'tcx>,
45 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
49 // Prevent verifier error
50 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
53 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
54 let (element, count) = match layout.abi {
55 Abi::Vector { element, count } => (element, count),
59 match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
60 // Cranelift currently only implements icmp for 128bit vectors.
61 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
66 fn simd_for_each_lane<'tcx>(
67 fx: &mut FunctionCx<'_, '_, 'tcx>,
70 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
72 let layout = val.layout();
74 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
75 let lane_layout = fx.layout_of(lane_ty);
76 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
77 let ret_lane_layout = fx.layout_of(ret_lane_ty);
78 assert_eq!(lane_count, ret_lane_count);
80 for lane_idx in 0..lane_count {
81 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
83 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
84 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
86 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
90 fn simd_pair_for_each_lane_typed<'tcx>(
91 fx: &mut FunctionCx<'_, '_, 'tcx>,
95 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, CValue<'tcx>, CValue<'tcx>) -> CValue<'tcx>,
97 assert_eq!(x.layout(), y.layout());
98 let layout = x.layout();
100 let (lane_count, _lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
101 let (ret_lane_count, _ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
102 assert_eq!(lane_count, ret_lane_count);
104 for lane_idx in 0..lane_count {
105 let x_lane = x.value_lane(fx, lane_idx);
106 let y_lane = y.value_lane(fx, lane_idx);
108 let res_lane = f(fx, x_lane, y_lane);
110 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
114 fn simd_pair_for_each_lane<'tcx>(
115 fx: &mut FunctionCx<'_, '_, 'tcx>,
119 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
121 assert_eq!(x.layout(), y.layout());
122 let layout = x.layout();
124 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
125 let lane_layout = fx.layout_of(lane_ty);
126 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
127 let ret_lane_layout = fx.layout_of(ret_lane_ty);
128 assert_eq!(lane_count, ret_lane_count);
130 for lane_idx in 0..lane_count {
131 let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
132 let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
134 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
135 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
137 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
141 fn simd_reduce<'tcx>(
142 fx: &mut FunctionCx<'_, '_, 'tcx>,
146 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
148 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
149 let lane_layout = fx.layout_of(lane_ty);
150 assert_eq!(lane_layout, ret.layout());
152 let (mut res_val, start_lane) =
153 if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
154 for lane_idx in start_lane..lane_count {
155 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
156 res_val = f(fx, lane_layout.ty, res_val, lane);
158 let res = CValue::by_val(res_val, lane_layout);
159 ret.write_cvalue(fx, res);
162 // FIXME move all uses to `simd_reduce`
163 fn simd_reduce_bool<'tcx>(
164 fx: &mut FunctionCx<'_, '_, 'tcx>,
167 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
169 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
170 assert!(ret.layout().ty.is_bool());
172 let res_val = val.value_lane(fx, 0).load_scalar(fx);
173 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
174 for lane_idx in 1..lane_count {
175 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
176 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
177 res_val = f(fx, res_val, lane);
179 let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
180 fx.bcx.ins().ireduce(types::I8, res_val)
184 let res = CValue::by_val(res_val, ret.layout());
185 ret.write_cvalue(fx, res);
188 fn bool_to_zero_or_max_uint<'tcx>(
189 fx: &mut FunctionCx<'_, '_, 'tcx>,
193 let ty = fx.clif_type(ty).unwrap();
195 let int_ty = match ty {
196 types::F32 => types::I32,
197 types::F64 => types::I64,
201 let mut res = fx.bcx.ins().bmask(int_ty, val);
204 res = fx.bcx.ins().bitcast(ty, res);
210 pub(crate) fn codegen_intrinsic_call<'tcx>(
211 fx: &mut FunctionCx<'_, '_, 'tcx>,
212 instance: Instance<'tcx>,
213 args: &[mir::Operand<'tcx>],
214 destination: CPlace<'tcx>,
215 target: Option<BasicBlock>,
216 source_info: mir::SourceInfo,
218 let intrinsic = fx.tcx.item_name(instance.def_id());
219 let substs = instance.substs;
221 let target = if let Some(target) = target {
224 // Insert non returning intrinsics here
227 fx.bcx.ins().trap(TrapCode::User(0));
230 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
232 _ => unimplemented!("unsupported intrinsic {}", intrinsic),
237 if intrinsic.as_str().starts_with("simd_") {
238 self::simd::codegen_simd_intrinsic_call(
246 let ret_block = fx.get_block(target);
247 fx.bcx.ins().jump(ret_block, &[]);
248 } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
249 let ret_block = fx.get_block(target);
250 fx.bcx.ins().jump(ret_block, &[]);
252 codegen_regular_intrinsic_call(
265 fn codegen_float_intrinsic_call<'tcx>(
266 fx: &mut FunctionCx<'_, '_, 'tcx>,
268 args: &[mir::Operand<'tcx>],
271 let (name, arg_count, ty) = match intrinsic {
272 sym::expf32 => ("expf", 1, fx.tcx.types.f32),
273 sym::expf64 => ("exp", 1, fx.tcx.types.f64),
274 sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
275 sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
276 sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
277 sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
278 sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
279 sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
280 sym::powf32 => ("powf", 2, fx.tcx.types.f32),
281 sym::powf64 => ("pow", 2, fx.tcx.types.f64),
282 sym::logf32 => ("logf", 1, fx.tcx.types.f32),
283 sym::logf64 => ("log", 1, fx.tcx.types.f64),
284 sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
285 sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
286 sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
287 sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
288 sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
289 sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
290 sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
291 sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
292 sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
293 sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
294 sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
295 sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
296 sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
297 sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
298 sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
299 sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
300 sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
301 sym::roundf64 => ("round", 1, fx.tcx.types.f64),
302 sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
303 sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
304 sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
305 sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
309 if args.len() != arg_count {
310 bug!("wrong number of args for intrinsic {:?}", intrinsic);
314 let args = match args {
316 a = [codegen_operand(fx, x)];
320 b = [codegen_operand(fx, x), codegen_operand(fx, y)];
324 c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
330 let layout = fx.layout_of(ty);
331 let res = match intrinsic {
332 sym::fmaf32 | sym::fmaf64 => {
333 let a = args[0].load_scalar(fx);
334 let b = args[1].load_scalar(fx);
335 let c = args[2].load_scalar(fx);
336 CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
338 sym::copysignf32 | sym::copysignf64 => {
339 let a = args[0].load_scalar(fx);
340 let b = args[1].load_scalar(fx);
341 CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
351 let a = args[0].load_scalar(fx);
353 let val = match intrinsic {
354 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
355 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
356 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
357 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
361 CValue::by_val(val, layout)
363 // These intrinsics aren't supported natively by Cranelift.
364 // Lower them to a libcall.
365 _ => fx.easy_call(name, &args, ty),
368 ret.write_cvalue(fx, res);
373 fn codegen_regular_intrinsic_call<'tcx>(
374 fx: &mut FunctionCx<'_, '_, 'tcx>,
375 instance: Instance<'tcx>,
377 substs: SubstsRef<'tcx>,
378 args: &[mir::Operand<'tcx>],
380 destination: Option<BasicBlock>,
381 source_info: mir::SourceInfo,
383 let usize_layout = fx.layout_of(fx.tcx.types.usize);
386 sym::likely | sym::unlikely => {
387 intrinsic_args!(fx, args => (a); intrinsic);
389 ret.write_cvalue(fx, a);
392 intrinsic_args!(fx, args => (); intrinsic);
394 fx.bcx.ins().debugtrap();
396 sym::copy | sym::copy_nonoverlapping => {
397 intrinsic_args!(fx, args => (src, dst, count); intrinsic);
398 let src = src.load_scalar(fx);
399 let dst = dst.load_scalar(fx);
400 let count = count.load_scalar(fx);
402 let elem_ty = substs.type_at(0);
403 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
404 assert_eq!(args.len(), 3);
406 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
408 if intrinsic == sym::copy_nonoverlapping {
409 // FIXME emit_small_memcpy
410 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
412 // FIXME emit_small_memmove
413 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
416 sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
417 // NOTE: the volatile variants have src and dst swapped
418 intrinsic_args!(fx, args => (dst, src, count); intrinsic);
419 let dst = dst.load_scalar(fx);
420 let src = src.load_scalar(fx);
421 let count = count.load_scalar(fx);
423 let elem_ty = substs.type_at(0);
424 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
425 assert_eq!(args.len(), 3);
427 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
429 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
430 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
431 // FIXME emit_small_memcpy
432 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
434 // FIXME emit_small_memmove
435 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
438 sym::size_of_val => {
439 intrinsic_args!(fx, args => (ptr); intrinsic);
441 let layout = fx.layout_of(substs.type_at(0));
442 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
444 let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
445 let (_ptr, info) = ptr.load_scalar_pair(fx);
446 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
449 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
451 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
453 sym::min_align_of_val => {
454 intrinsic_args!(fx, args => (ptr); intrinsic);
456 let layout = fx.layout_of(substs.type_at(0));
457 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
459 let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
460 let (_ptr, info) = ptr.load_scalar_pair(fx);
461 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
464 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
466 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
469 sym::vtable_size => {
470 intrinsic_args!(fx, args => (vtable); intrinsic);
471 let vtable = vtable.load_scalar(fx);
473 let size = crate::vtable::size_of_obj(fx, vtable);
474 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
477 sym::vtable_align => {
478 intrinsic_args!(fx, args => (vtable); intrinsic);
479 let vtable = vtable.load_scalar(fx);
481 let align = crate::vtable::min_align_of_obj(fx, vtable);
482 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
492 | sym::unchecked_shr => {
493 intrinsic_args!(fx, args => (x, y); intrinsic);
495 // FIXME trap on overflow
496 let bin_op = match intrinsic {
497 sym::unchecked_add => BinOp::Add,
498 sym::unchecked_sub => BinOp::Sub,
499 sym::unchecked_mul => BinOp::Mul,
500 sym::unchecked_div | sym::exact_div => BinOp::Div,
501 sym::unchecked_rem => BinOp::Rem,
502 sym::unchecked_shl => BinOp::Shl,
503 sym::unchecked_shr => BinOp::Shr,
506 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
507 ret.write_cvalue(fx, res);
509 sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
510 intrinsic_args!(fx, args => (x, y); intrinsic);
512 assert_eq!(x.layout().ty, y.layout().ty);
513 let bin_op = match intrinsic {
514 sym::add_with_overflow => BinOp::Add,
515 sym::sub_with_overflow => BinOp::Sub,
516 sym::mul_with_overflow => BinOp::Mul,
520 let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
521 ret.write_cvalue(fx, res);
523 sym::saturating_add | sym::saturating_sub => {
524 intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
526 assert_eq!(lhs.layout().ty, rhs.layout().ty);
527 let bin_op = match intrinsic {
528 sym::saturating_add => BinOp::Add,
529 sym::saturating_sub => BinOp::Sub,
533 let res = crate::num::codegen_saturating_int_binop(fx, bin_op, lhs, rhs);
534 ret.write_cvalue(fx, res);
536 sym::rotate_left => {
537 intrinsic_args!(fx, args => (x, y); intrinsic);
538 let y = y.load_scalar(fx);
540 let layout = x.layout();
541 let x = x.load_scalar(fx);
542 let res = fx.bcx.ins().rotl(x, y);
543 ret.write_cvalue(fx, CValue::by_val(res, layout));
545 sym::rotate_right => {
546 intrinsic_args!(fx, args => (x, y); intrinsic);
547 let y = y.load_scalar(fx);
549 let layout = x.layout();
550 let x = x.load_scalar(fx);
551 let res = fx.bcx.ins().rotr(x, y);
552 ret.write_cvalue(fx, CValue::by_val(res, layout));
555 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
556 // doesn't have UB both are codegen'ed the same way
557 sym::offset | sym::arith_offset => {
558 intrinsic_args!(fx, args => (base, offset); intrinsic);
559 let offset = offset.load_scalar(fx);
561 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
562 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
563 let ptr_diff = if pointee_size != 1 {
564 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
568 let base_val = base.load_scalar(fx);
569 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
570 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
574 intrinsic_args!(fx, args => (ptr, mask); intrinsic);
575 let ptr = ptr.load_scalar(fx);
576 let mask = mask.load_scalar(fx);
577 fx.bcx.ins().band(ptr, mask);
581 intrinsic_args!(fx, args => (from); intrinsic);
583 ret.write_cvalue_transmute(fx, from);
585 sym::write_bytes | sym::volatile_set_memory => {
586 intrinsic_args!(fx, args => (dst, val, count); intrinsic);
587 let val = val.load_scalar(fx);
588 let count = count.load_scalar(fx);
590 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
591 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
592 let count = if pointee_size != 1 {
593 fx.bcx.ins().imul_imm(count, pointee_size as i64)
597 let dst_ptr = dst.load_scalar(fx);
598 // FIXME make the memset actually volatile when switching to emit_small_memset
599 // FIXME use emit_small_memset
600 fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
602 sym::ctlz | sym::ctlz_nonzero => {
603 intrinsic_args!(fx, args => (arg); intrinsic);
604 let val = arg.load_scalar(fx);
606 // FIXME trap on `ctlz_nonzero` with zero arg.
607 let res = fx.bcx.ins().clz(val);
608 let res = CValue::by_val(res, arg.layout());
609 ret.write_cvalue(fx, res);
611 sym::cttz | sym::cttz_nonzero => {
612 intrinsic_args!(fx, args => (arg); intrinsic);
613 let val = arg.load_scalar(fx);
615 // FIXME trap on `cttz_nonzero` with zero arg.
616 let res = fx.bcx.ins().ctz(val);
617 let res = CValue::by_val(res, arg.layout());
618 ret.write_cvalue(fx, res);
621 intrinsic_args!(fx, args => (arg); intrinsic);
622 let val = arg.load_scalar(fx);
624 let res = fx.bcx.ins().popcnt(val);
625 let res = CValue::by_val(res, arg.layout());
626 ret.write_cvalue(fx, res);
629 intrinsic_args!(fx, args => (arg); intrinsic);
630 let val = arg.load_scalar(fx);
632 let res = fx.bcx.ins().bitrev(val);
633 let res = CValue::by_val(res, arg.layout());
634 ret.write_cvalue(fx, res);
637 intrinsic_args!(fx, args => (arg); intrinsic);
638 let val = arg.load_scalar(fx);
640 let res = if fx.bcx.func.dfg.value_type(val) == types::I8 {
643 fx.bcx.ins().bswap(val)
645 let res = CValue::by_val(res, arg.layout());
646 ret.write_cvalue(fx, res);
648 sym::assert_inhabited | sym::assert_zero_valid | sym::assert_mem_uninitialized_valid => {
649 intrinsic_args!(fx, args => (); intrinsic);
651 let layout = fx.layout_of(substs.type_at(0));
652 if layout.abi.is_uninhabited() {
653 with_no_trimmed_paths!({
654 crate::base::codegen_panic(
656 &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
663 if intrinsic == sym::assert_zero_valid
664 && !fx.tcx.permits_zero_init(fx.param_env().and(layout))
666 with_no_trimmed_paths!({
667 crate::base::codegen_panic(
670 "attempted to zero-initialize type `{}`, which is invalid",
679 if intrinsic == sym::assert_mem_uninitialized_valid
680 && !fx.tcx.permits_uninit_init(fx.param_env().and(layout))
682 with_no_trimmed_paths!({
683 crate::base::codegen_panic(
686 "attempted to leave type `{}` uninitialized, which is invalid",
696 sym::volatile_load | sym::unaligned_volatile_load => {
697 intrinsic_args!(fx, args => (ptr); intrinsic);
699 // Cranelift treats loads as volatile by default
700 // FIXME correctly handle unaligned_volatile_load
701 let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
702 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
703 ret.write_cvalue(fx, val);
705 sym::volatile_store | sym::unaligned_volatile_store => {
706 intrinsic_args!(fx, args => (ptr, val); intrinsic);
707 let ptr = ptr.load_scalar(fx);
709 // Cranelift treats stores as volatile by default
710 // FIXME correctly handle unaligned_volatile_store
711 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
712 dest.write_cvalue(fx, val);
719 | sym::variant_count => {
720 intrinsic_args!(fx, args => (); intrinsic);
723 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
724 let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
725 ret.write_cvalue(fx, val);
728 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
729 intrinsic_args!(fx, args => (ptr, base); intrinsic);
730 let ptr = ptr.load_scalar(fx);
731 let base = base.load_scalar(fx);
732 let ty = substs.type_at(0);
734 let pointee_size: u64 = fx.layout_of(ty).size.bytes();
735 let diff_bytes = fx.bcx.ins().isub(ptr, base);
736 // FIXME this can be an exact division.
737 let val = if intrinsic == sym::ptr_offset_from_unsigned {
738 let usize_layout = fx.layout_of(fx.tcx.types.usize);
739 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
740 // but unsigned is slightly easier to codegen, so might as well.
741 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
743 let isize_layout = fx.layout_of(fx.tcx.types.isize);
744 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
746 ret.write_cvalue(fx, val);
749 sym::ptr_guaranteed_cmp => {
750 intrinsic_args!(fx, args => (a, b); intrinsic);
752 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b).load_scalar(fx);
753 ret.write_cvalue(fx, CValue::by_val(val, fx.layout_of(fx.tcx.types.u8)));
756 sym::caller_location => {
757 intrinsic_args!(fx, args => (); intrinsic);
759 let caller_location = fx.get_caller_location(source_info);
760 ret.write_cvalue(fx, caller_location);
763 _ if intrinsic.as_str().starts_with("atomic_fence") => {
764 intrinsic_args!(fx, args => (); intrinsic);
766 fx.bcx.ins().fence();
768 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
769 intrinsic_args!(fx, args => (); intrinsic);
771 // FIXME use a compiler fence once Cranelift supports it
772 fx.bcx.ins().fence();
774 _ if intrinsic.as_str().starts_with("atomic_load") => {
775 intrinsic_args!(fx, args => (ptr); intrinsic);
776 let ptr = ptr.load_scalar(fx);
778 let ty = substs.type_at(0);
780 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
781 // FIXME implement 128bit atomics
782 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
783 // special case for compiler-builtins to avoid having to patch it
784 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
789 .span_fatal(source_info.span, "128bit atomics not yet supported");
792 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
794 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
798 let clif_ty = fx.clif_type(ty).unwrap();
800 let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
802 let val = CValue::by_val(val, fx.layout_of(ty));
803 ret.write_cvalue(fx, val);
805 _ if intrinsic.as_str().starts_with("atomic_store") => {
806 intrinsic_args!(fx, args => (ptr, val); intrinsic);
807 let ptr = ptr.load_scalar(fx);
809 let ty = substs.type_at(0);
811 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
812 // FIXME implement 128bit atomics
813 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
814 // special case for compiler-builtins to avoid having to patch it
815 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
820 .span_fatal(source_info.span, "128bit atomics not yet supported");
823 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
825 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
830 let val = val.load_scalar(fx);
832 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
834 _ if intrinsic.as_str().starts_with("atomic_xchg") => {
835 intrinsic_args!(fx, args => (ptr, new); intrinsic);
836 let ptr = ptr.load_scalar(fx);
838 let layout = new.layout();
839 match layout.ty.kind() {
840 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
842 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
846 let ty = fx.clif_type(layout.ty).unwrap();
848 let new = new.load_scalar(fx);
850 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
852 let old = CValue::by_val(old, layout);
853 ret.write_cvalue(fx, old);
855 _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
856 // both atomic_cxchg_* and atomic_cxchgweak_*
857 intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
858 let ptr = ptr.load_scalar(fx);
860 let layout = new.layout();
861 match layout.ty.kind() {
862 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
864 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
869 let test_old = test_old.load_scalar(fx);
870 let new = new.load_scalar(fx);
872 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
873 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
875 let ret_val = CValue::by_val_pair(old, is_eq, ret.layout());
876 ret.write_cvalue(fx, ret_val)
879 _ if intrinsic.as_str().starts_with("atomic_xadd") => {
880 intrinsic_args!(fx, args => (ptr, amount); intrinsic);
881 let ptr = ptr.load_scalar(fx);
883 let layout = amount.layout();
884 match layout.ty.kind() {
885 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
887 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
891 let ty = fx.clif_type(layout.ty).unwrap();
893 let amount = amount.load_scalar(fx);
896 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
898 let old = CValue::by_val(old, layout);
899 ret.write_cvalue(fx, old);
901 _ if intrinsic.as_str().starts_with("atomic_xsub") => {
902 intrinsic_args!(fx, args => (ptr, amount); intrinsic);
903 let ptr = ptr.load_scalar(fx);
905 let layout = amount.layout();
906 match layout.ty.kind() {
907 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
909 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
913 let ty = fx.clif_type(layout.ty).unwrap();
915 let amount = amount.load_scalar(fx);
918 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
920 let old = CValue::by_val(old, layout);
921 ret.write_cvalue(fx, old);
923 _ if intrinsic.as_str().starts_with("atomic_and") => {
924 intrinsic_args!(fx, args => (ptr, src); intrinsic);
925 let ptr = ptr.load_scalar(fx);
927 let layout = src.layout();
928 match layout.ty.kind() {
929 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
931 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
935 let ty = fx.clif_type(layout.ty).unwrap();
937 let src = src.load_scalar(fx);
939 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
941 let old = CValue::by_val(old, layout);
942 ret.write_cvalue(fx, old);
944 _ if intrinsic.as_str().starts_with("atomic_or") => {
945 intrinsic_args!(fx, args => (ptr, src); intrinsic);
946 let ptr = ptr.load_scalar(fx);
948 let layout = src.layout();
949 match layout.ty.kind() {
950 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
952 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
956 let ty = fx.clif_type(layout.ty).unwrap();
958 let src = src.load_scalar(fx);
960 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
962 let old = CValue::by_val(old, layout);
963 ret.write_cvalue(fx, old);
965 _ if intrinsic.as_str().starts_with("atomic_xor") => {
966 intrinsic_args!(fx, args => (ptr, src); intrinsic);
967 let ptr = ptr.load_scalar(fx);
969 let layout = src.layout();
970 match layout.ty.kind() {
971 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
973 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
977 let ty = fx.clif_type(layout.ty).unwrap();
979 let src = src.load_scalar(fx);
981 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
983 let old = CValue::by_val(old, layout);
984 ret.write_cvalue(fx, old);
986 _ if intrinsic.as_str().starts_with("atomic_nand") => {
987 intrinsic_args!(fx, args => (ptr, src); intrinsic);
988 let ptr = ptr.load_scalar(fx);
990 let layout = src.layout();
991 match layout.ty.kind() {
992 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
994 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
998 let ty = fx.clif_type(layout.ty).unwrap();
1000 let src = src.load_scalar(fx);
1002 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1004 let old = CValue::by_val(old, layout);
1005 ret.write_cvalue(fx, old);
1007 _ if intrinsic.as_str().starts_with("atomic_max") => {
1008 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1009 let ptr = ptr.load_scalar(fx);
1011 let layout = src.layout();
1012 match layout.ty.kind() {
1013 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1015 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1019 let ty = fx.clif_type(layout.ty).unwrap();
1021 let src = src.load_scalar(fx);
1023 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1025 let old = CValue::by_val(old, layout);
1026 ret.write_cvalue(fx, old);
1028 _ if intrinsic.as_str().starts_with("atomic_umax") => {
1029 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1030 let ptr = ptr.load_scalar(fx);
1032 let layout = src.layout();
1033 match layout.ty.kind() {
1034 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1036 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1040 let ty = fx.clif_type(layout.ty).unwrap();
1042 let src = src.load_scalar(fx);
1044 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1046 let old = CValue::by_val(old, layout);
1047 ret.write_cvalue(fx, old);
1049 _ if intrinsic.as_str().starts_with("atomic_min") => {
1050 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1051 let ptr = ptr.load_scalar(fx);
1053 let layout = src.layout();
1054 match layout.ty.kind() {
1055 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1057 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1061 let ty = fx.clif_type(layout.ty).unwrap();
1063 let src = src.load_scalar(fx);
1065 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1067 let old = CValue::by_val(old, layout);
1068 ret.write_cvalue(fx, old);
1070 _ if intrinsic.as_str().starts_with("atomic_umin") => {
1071 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1072 let ptr = ptr.load_scalar(fx);
1074 let layout = src.layout();
1075 match layout.ty.kind() {
1076 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1078 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1082 let ty = fx.clif_type(layout.ty).unwrap();
1084 let src = src.load_scalar(fx);
1086 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1088 let old = CValue::by_val(old, layout);
1089 ret.write_cvalue(fx, old);
1093 intrinsic_args!(fx, args => (a, b); intrinsic);
1094 let a = a.load_scalar(fx);
1095 let b = b.load_scalar(fx);
1097 let val = crate::num::codegen_float_min(fx, a, b);
1098 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1099 ret.write_cvalue(fx, val);
1102 intrinsic_args!(fx, args => (a, b); intrinsic);
1103 let a = a.load_scalar(fx);
1104 let b = b.load_scalar(fx);
1106 let val = crate::num::codegen_float_min(fx, a, b);
1107 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1108 ret.write_cvalue(fx, val);
1111 intrinsic_args!(fx, args => (a, b); intrinsic);
1112 let a = a.load_scalar(fx);
1113 let b = b.load_scalar(fx);
1115 let val = crate::num::codegen_float_max(fx, a, b);
1116 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1117 ret.write_cvalue(fx, val);
1120 intrinsic_args!(fx, args => (a, b); intrinsic);
1121 let a = a.load_scalar(fx);
1122 let b = b.load_scalar(fx);
1124 let val = crate::num::codegen_float_max(fx, a, b);
1125 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1126 ret.write_cvalue(fx, val);
1130 intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1131 let f = f.load_scalar(fx);
1132 let data = data.load_scalar(fx);
1133 let _catch_fn = catch_fn.load_scalar(fx);
1135 // FIXME once unwinding is supported, change this to actually catch panics
1136 let f_sig = fx.bcx.func.import_signature(Signature {
1137 call_conv: fx.target_config.default_call_conv,
1138 params: vec![AbiParam::new(pointer_ty(fx.tcx))],
1142 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1144 let layout = ret.layout();
1145 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1146 ret.write_cvalue(fx, ret_val);
1149 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1150 intrinsic_args!(fx, args => (x, y); intrinsic);
1152 let res = crate::num::codegen_float_binop(
1155 sym::fadd_fast => BinOp::Add,
1156 sym::fsub_fast => BinOp::Sub,
1157 sym::fmul_fast => BinOp::Mul,
1158 sym::fdiv_fast => BinOp::Div,
1159 sym::frem_fast => BinOp::Rem,
1160 _ => unreachable!(),
1165 ret.write_cvalue(fx, res);
1167 sym::float_to_int_unchecked => {
1168 intrinsic_args!(fx, args => (f); intrinsic);
1169 let f = f.load_scalar(fx);
1171 let res = crate::cast::clif_int_or_float_cast(
1175 fx.clif_type(ret.layout().ty).unwrap(),
1176 type_sign(ret.layout().ty),
1178 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1182 intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1183 let lhs_ref = lhs_ref.load_scalar(fx);
1184 let rhs_ref = rhs_ref.load_scalar(fx);
1186 let size = fx.layout_of(substs.type_at(0)).layout.size();
1187 // FIXME add and use emit_small_memcmp
1188 let is_eq_value = if size == Size::ZERO {
1189 // No bytes means they're trivially equal
1190 fx.bcx.ins().iconst(types::I8, 1)
1191 } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1192 // Can't use `trusted` for these loads; they could be unaligned.
1193 let mut flags = MemFlags::new();
1195 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1196 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1197 fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val)
1199 // Just call `memcmp` (like slices do in core) when the
1200 // size is too large or it's not a power-of-two.
1201 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1202 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1203 let params = vec![AbiParam::new(fx.pointer_type); 3];
1204 let returns = vec![AbiParam::new(types::I32)];
1205 let args = &[lhs_ref, rhs_ref, bytes_val];
1206 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1207 fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0)
1209 ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1212 sym::const_allocate => {
1213 intrinsic_args!(fx, args => (_size, _align); intrinsic);
1215 // returns a null pointer at runtime.
1216 let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1217 ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1220 sym::const_deallocate => {
1221 intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1226 intrinsic_args!(fx, args => (a); intrinsic);
1228 // FIXME implement black_box semantics
1229 ret.write_cvalue(fx, a);
1232 // FIXME implement variadics in cranelift
1233 sym::va_copy | sym::va_arg | sym::va_end => {
1234 fx.tcx.sess.span_fatal(
1236 "Defining variadic functions is not yet supported by Cranelift",
1243 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1247 let ret_block = fx.get_block(destination.unwrap());
1248 fx.bcx.ins().jump(ret_block, &[]);