1 //! Codegen of intrinsics. This includes `extern "rust-intrinsic"`, `extern "platform-intrinsic"`
2 //! and LLVM intrinsics that have symbol names starting with `llvm.`.
4 macro_rules! intrinsic_args {
5 ($fx:expr, $args:expr => ($($arg:tt),*); $intrinsic:expr) => {
6 #[allow(unused_parens)]
7 let ($($arg),*) = if let [$($arg),*] = $args {
8 ($(codegen_operand($fx, $arg)),*)
10 $crate::intrinsics::bug_on_incorrect_arg_count($intrinsic);
19 pub(crate) use cpuid::codegen_cpuid_call;
20 pub(crate) use llvm::codegen_llvm_intrinsic_call;
22 use rustc_middle::ty::print::with_no_trimmed_paths;
23 use rustc_middle::ty::subst::SubstsRef;
24 use rustc_span::symbol::{kw, sym, Symbol};
26 use crate::prelude::*;
27 use cranelift_codegen::ir::AtomicRmwOp;
29 fn bug_on_incorrect_arg_count(intrinsic: impl std::fmt::Display) -> ! {
30 bug!("wrong number of args for intrinsic {}", intrinsic);
33 fn report_atomic_type_validation_error<'tcx>(
34 fx: &mut FunctionCx<'_, '_, 'tcx>,
42 "`{}` intrinsic: expected basic integer or raw pointer type, found `{:?}`",
46 // Prevent verifier error
47 fx.bcx.ins().trap(TrapCode::UnreachableCodeReached);
50 pub(crate) fn clif_vector_type<'tcx>(tcx: TyCtxt<'tcx>, layout: TyAndLayout<'tcx>) -> Option<Type> {
51 let (element, count) = match layout.abi {
52 Abi::Vector { element, count } => (element, count),
56 match scalar_to_clif_type(tcx, element).by(u32::try_from(count).unwrap()) {
57 // Cranelift currently only implements icmp for 128bit vectors.
58 Some(vector_ty) if vector_ty.bits() == 128 => Some(vector_ty),
63 fn simd_for_each_lane<'tcx>(
64 fx: &mut FunctionCx<'_, '_, 'tcx>,
67 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value) -> Value,
69 let layout = val.layout();
71 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
72 let lane_layout = fx.layout_of(lane_ty);
73 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
74 let ret_lane_layout = fx.layout_of(ret_lane_ty);
75 assert_eq!(lane_count, ret_lane_count);
77 for lane_idx in 0..lane_count {
78 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
80 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, lane);
81 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
83 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
87 fn simd_pair_for_each_lane<'tcx>(
88 fx: &mut FunctionCx<'_, '_, 'tcx>,
92 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Ty<'tcx>, Value, Value) -> Value,
94 assert_eq!(x.layout(), y.layout());
95 let layout = x.layout();
97 let (lane_count, lane_ty) = layout.ty.simd_size_and_type(fx.tcx);
98 let lane_layout = fx.layout_of(lane_ty);
99 let (ret_lane_count, ret_lane_ty) = ret.layout().ty.simd_size_and_type(fx.tcx);
100 let ret_lane_layout = fx.layout_of(ret_lane_ty);
101 assert_eq!(lane_count, ret_lane_count);
103 for lane_idx in 0..lane_count {
104 let x_lane = x.value_lane(fx, lane_idx).load_scalar(fx);
105 let y_lane = y.value_lane(fx, lane_idx).load_scalar(fx);
107 let res_lane = f(fx, lane_layout.ty, ret_lane_layout.ty, x_lane, y_lane);
108 let res_lane = CValue::by_val(res_lane, ret_lane_layout);
110 ret.place_lane(fx, lane_idx).write_cvalue(fx, res_lane);
114 fn simd_reduce<'tcx>(
115 fx: &mut FunctionCx<'_, '_, 'tcx>,
119 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Ty<'tcx>, Value, Value) -> Value,
121 let (lane_count, lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
122 let lane_layout = fx.layout_of(lane_ty);
123 assert_eq!(lane_layout, ret.layout());
125 let (mut res_val, start_lane) =
126 if let Some(acc) = acc { (acc, 0) } else { (val.value_lane(fx, 0).load_scalar(fx), 1) };
127 for lane_idx in start_lane..lane_count {
128 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
129 res_val = f(fx, lane_layout.ty, res_val, lane);
131 let res = CValue::by_val(res_val, lane_layout);
132 ret.write_cvalue(fx, res);
135 // FIXME move all uses to `simd_reduce`
136 fn simd_reduce_bool<'tcx>(
137 fx: &mut FunctionCx<'_, '_, 'tcx>,
140 f: &dyn Fn(&mut FunctionCx<'_, '_, 'tcx>, Value, Value) -> Value,
142 let (lane_count, _lane_ty) = val.layout().ty.simd_size_and_type(fx.tcx);
143 assert!(ret.layout().ty.is_bool());
145 let res_val = val.value_lane(fx, 0).load_scalar(fx);
146 let mut res_val = fx.bcx.ins().band_imm(res_val, 1); // mask to boolean
147 for lane_idx in 1..lane_count {
148 let lane = val.value_lane(fx, lane_idx).load_scalar(fx);
149 let lane = fx.bcx.ins().band_imm(lane, 1); // mask to boolean
150 res_val = f(fx, res_val, lane);
152 let res_val = if fx.bcx.func.dfg.value_type(res_val) != types::I8 {
153 fx.bcx.ins().ireduce(types::I8, res_val)
157 let res = CValue::by_val(res_val, ret.layout());
158 ret.write_cvalue(fx, res);
161 fn bool_to_zero_or_max_uint<'tcx>(
162 fx: &mut FunctionCx<'_, '_, 'tcx>,
166 let ty = fx.clif_type(ty).unwrap();
168 let int_ty = match ty {
169 types::F32 => types::I32,
170 types::F64 => types::I64,
174 let val = fx.bcx.ins().bint(int_ty, val);
175 let mut res = fx.bcx.ins().ineg(val);
178 res = fx.bcx.ins().bitcast(ty, res);
184 pub(crate) fn codegen_intrinsic_call<'tcx>(
185 fx: &mut FunctionCx<'_, '_, 'tcx>,
186 instance: Instance<'tcx>,
187 args: &[mir::Operand<'tcx>],
188 destination: CPlace<'tcx>,
189 target: Option<BasicBlock>,
190 source_info: mir::SourceInfo,
192 let intrinsic = fx.tcx.item_name(instance.def_id());
193 let substs = instance.substs;
195 let target = if let Some(target) = target {
198 // Insert non returning intrinsics here
201 fx.bcx.ins().trap(TrapCode::User(0));
204 crate::base::codegen_panic(fx, "Transmuting to uninhabited type.", source_info);
206 _ => unimplemented!("unsupported intrinsic {}", intrinsic),
211 if intrinsic.as_str().starts_with("simd_") {
212 self::simd::codegen_simd_intrinsic_call(
220 let ret_block = fx.get_block(target);
221 fx.bcx.ins().jump(ret_block, &[]);
222 } else if codegen_float_intrinsic_call(fx, intrinsic, args, destination) {
223 let ret_block = fx.get_block(target);
224 fx.bcx.ins().jump(ret_block, &[]);
226 codegen_regular_intrinsic_call(
239 fn codegen_float_intrinsic_call<'tcx>(
240 fx: &mut FunctionCx<'_, '_, 'tcx>,
242 args: &[mir::Operand<'tcx>],
245 let (name, arg_count, ty) = match intrinsic {
246 sym::expf32 => ("expf", 1, fx.tcx.types.f32),
247 sym::expf64 => ("exp", 1, fx.tcx.types.f64),
248 sym::exp2f32 => ("exp2f", 1, fx.tcx.types.f32),
249 sym::exp2f64 => ("exp2", 1, fx.tcx.types.f64),
250 sym::sqrtf32 => ("sqrtf", 1, fx.tcx.types.f32),
251 sym::sqrtf64 => ("sqrt", 1, fx.tcx.types.f64),
252 sym::powif32 => ("__powisf2", 2, fx.tcx.types.f32), // compiler-builtins
253 sym::powif64 => ("__powidf2", 2, fx.tcx.types.f64), // compiler-builtins
254 sym::powf32 => ("powf", 2, fx.tcx.types.f32),
255 sym::powf64 => ("pow", 2, fx.tcx.types.f64),
256 sym::logf32 => ("logf", 1, fx.tcx.types.f32),
257 sym::logf64 => ("log", 1, fx.tcx.types.f64),
258 sym::log2f32 => ("log2f", 1, fx.tcx.types.f32),
259 sym::log2f64 => ("log2", 1, fx.tcx.types.f64),
260 sym::log10f32 => ("log10f", 1, fx.tcx.types.f32),
261 sym::log10f64 => ("log10", 1, fx.tcx.types.f64),
262 sym::fabsf32 => ("fabsf", 1, fx.tcx.types.f32),
263 sym::fabsf64 => ("fabs", 1, fx.tcx.types.f64),
264 sym::fmaf32 => ("fmaf", 3, fx.tcx.types.f32),
265 sym::fmaf64 => ("fma", 3, fx.tcx.types.f64),
266 sym::copysignf32 => ("copysignf", 2, fx.tcx.types.f32),
267 sym::copysignf64 => ("copysign", 2, fx.tcx.types.f64),
268 sym::floorf32 => ("floorf", 1, fx.tcx.types.f32),
269 sym::floorf64 => ("floor", 1, fx.tcx.types.f64),
270 sym::ceilf32 => ("ceilf", 1, fx.tcx.types.f32),
271 sym::ceilf64 => ("ceil", 1, fx.tcx.types.f64),
272 sym::truncf32 => ("truncf", 1, fx.tcx.types.f32),
273 sym::truncf64 => ("trunc", 1, fx.tcx.types.f64),
274 sym::roundf32 => ("roundf", 1, fx.tcx.types.f32),
275 sym::roundf64 => ("round", 1, fx.tcx.types.f64),
276 sym::sinf32 => ("sinf", 1, fx.tcx.types.f32),
277 sym::sinf64 => ("sin", 1, fx.tcx.types.f64),
278 sym::cosf32 => ("cosf", 1, fx.tcx.types.f32),
279 sym::cosf64 => ("cos", 1, fx.tcx.types.f64),
283 if args.len() != arg_count {
284 bug!("wrong number of args for intrinsic {:?}", intrinsic);
288 let args = match args {
290 a = [codegen_operand(fx, x)];
294 b = [codegen_operand(fx, x), codegen_operand(fx, y)];
298 c = [codegen_operand(fx, x), codegen_operand(fx, y), codegen_operand(fx, z)];
304 let layout = fx.layout_of(ty);
305 let res = match intrinsic {
306 sym::fmaf32 | sym::fmaf64 => {
307 let a = args[0].load_scalar(fx);
308 let b = args[1].load_scalar(fx);
309 let c = args[2].load_scalar(fx);
310 CValue::by_val(fx.bcx.ins().fma(a, b, c), layout)
312 sym::copysignf32 | sym::copysignf64 => {
313 let a = args[0].load_scalar(fx);
314 let b = args[1].load_scalar(fx);
315 CValue::by_val(fx.bcx.ins().fcopysign(a, b), layout)
325 let a = args[0].load_scalar(fx);
327 let val = match intrinsic {
328 sym::fabsf32 | sym::fabsf64 => fx.bcx.ins().fabs(a),
329 sym::floorf32 | sym::floorf64 => fx.bcx.ins().floor(a),
330 sym::ceilf32 | sym::ceilf64 => fx.bcx.ins().ceil(a),
331 sym::truncf32 | sym::truncf64 => fx.bcx.ins().trunc(a),
335 CValue::by_val(val, layout)
337 // These intrinsics aren't supported natively by Cranelift.
338 // Lower them to a libcall.
339 _ => fx.easy_call(name, &args, ty),
342 ret.write_cvalue(fx, res);
347 fn codegen_regular_intrinsic_call<'tcx>(
348 fx: &mut FunctionCx<'_, '_, 'tcx>,
349 instance: Instance<'tcx>,
351 substs: SubstsRef<'tcx>,
352 args: &[mir::Operand<'tcx>],
354 destination: Option<BasicBlock>,
355 source_info: mir::SourceInfo,
357 let usize_layout = fx.layout_of(fx.tcx.types.usize);
360 sym::likely | sym::unlikely => {
361 intrinsic_args!(fx, args => (a); intrinsic);
363 ret.write_cvalue(fx, a);
366 intrinsic_args!(fx, args => (); intrinsic);
368 fx.bcx.ins().debugtrap();
370 sym::copy | sym::copy_nonoverlapping => {
371 intrinsic_args!(fx, args => (src, dst, count); intrinsic);
372 let src = src.load_scalar(fx);
373 let dst = dst.load_scalar(fx);
374 let count = count.load_scalar(fx);
376 let elem_ty = substs.type_at(0);
377 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
378 assert_eq!(args.len(), 3);
380 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
382 if intrinsic == sym::copy_nonoverlapping {
383 // FIXME emit_small_memcpy
384 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
386 // FIXME emit_small_memmove
387 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
390 sym::volatile_copy_memory | sym::volatile_copy_nonoverlapping_memory => {
391 // NOTE: the volatile variants have src and dst swapped
392 intrinsic_args!(fx, args => (dst, src, count); intrinsic);
393 let dst = dst.load_scalar(fx);
394 let src = src.load_scalar(fx);
395 let count = count.load_scalar(fx);
397 let elem_ty = substs.type_at(0);
398 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
399 assert_eq!(args.len(), 3);
401 if elem_size != 1 { fx.bcx.ins().imul_imm(count, elem_size as i64) } else { count };
403 // FIXME make the copy actually volatile when using emit_small_mem{cpy,move}
404 if intrinsic == sym::volatile_copy_nonoverlapping_memory {
405 // FIXME emit_small_memcpy
406 fx.bcx.call_memcpy(fx.target_config, dst, src, byte_amount);
408 // FIXME emit_small_memmove
409 fx.bcx.call_memmove(fx.target_config, dst, src, byte_amount);
412 sym::size_of_val => {
413 intrinsic_args!(fx, args => (ptr); intrinsic);
415 let layout = fx.layout_of(substs.type_at(0));
416 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
418 let size = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
419 let (_ptr, info) = ptr.load_scalar_pair(fx);
420 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
423 fx.bcx.ins().iconst(fx.pointer_type, layout.size.bytes() as i64)
425 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
427 sym::min_align_of_val => {
428 intrinsic_args!(fx, args => (ptr); intrinsic);
430 let layout = fx.layout_of(substs.type_at(0));
431 // Note: Can't use is_unsized here as truly unsized types need to take the fixed size
433 let align = if let Abi::ScalarPair(_, _) = ptr.layout().abi {
434 let (_ptr, info) = ptr.load_scalar_pair(fx);
435 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
438 fx.bcx.ins().iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
440 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
443 sym::vtable_size => {
444 intrinsic_args!(fx, args => (vtable); intrinsic);
445 let vtable = vtable.load_scalar(fx);
447 let size = crate::vtable::size_of_obj(fx, vtable);
448 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
451 sym::vtable_align => {
452 intrinsic_args!(fx, args => (vtable); intrinsic);
453 let vtable = vtable.load_scalar(fx);
455 let align = crate::vtable::min_align_of_obj(fx, vtable);
456 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
466 | sym::unchecked_shr => {
467 intrinsic_args!(fx, args => (x, y); intrinsic);
469 // FIXME trap on overflow
470 let bin_op = match intrinsic {
471 sym::unchecked_add => BinOp::Add,
472 sym::unchecked_sub => BinOp::Sub,
473 sym::unchecked_mul => BinOp::Mul,
474 sym::unchecked_div | sym::exact_div => BinOp::Div,
475 sym::unchecked_rem => BinOp::Rem,
476 sym::unchecked_shl => BinOp::Shl,
477 sym::unchecked_shr => BinOp::Shr,
480 let res = crate::num::codegen_int_binop(fx, bin_op, x, y);
481 ret.write_cvalue(fx, res);
483 sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
484 intrinsic_args!(fx, args => (x, y); intrinsic);
486 assert_eq!(x.layout().ty, y.layout().ty);
487 let bin_op = match intrinsic {
488 sym::add_with_overflow => BinOp::Add,
489 sym::sub_with_overflow => BinOp::Sub,
490 sym::mul_with_overflow => BinOp::Mul,
494 let res = crate::num::codegen_checked_int_binop(fx, bin_op, x, y);
495 ret.write_cvalue(fx, res);
497 sym::saturating_add | sym::saturating_sub => {
498 intrinsic_args!(fx, args => (lhs, rhs); intrinsic);
500 assert_eq!(lhs.layout().ty, rhs.layout().ty);
501 let bin_op = match intrinsic {
502 sym::saturating_add => BinOp::Add,
503 sym::saturating_sub => BinOp::Sub,
507 let signed = type_sign(lhs.layout().ty);
509 let checked_res = crate::num::codegen_checked_int_binop(fx, bin_op, lhs, rhs);
511 let (val, has_overflow) = checked_res.load_scalar_pair(fx);
512 let clif_ty = fx.clif_type(lhs.layout().ty).unwrap();
514 let (min, max) = type_min_max_value(&mut fx.bcx, clif_ty, signed);
516 let val = match (intrinsic, signed) {
517 (sym::saturating_add, false) => fx.bcx.ins().select(has_overflow, max, val),
518 (sym::saturating_sub, false) => fx.bcx.ins().select(has_overflow, min, val),
519 (sym::saturating_add, true) => {
520 let rhs = rhs.load_scalar(fx);
522 fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
523 let sat_val = fx.bcx.ins().select(rhs_ge_zero, max, min);
524 fx.bcx.ins().select(has_overflow, sat_val, val)
526 (sym::saturating_sub, true) => {
527 let rhs = rhs.load_scalar(fx);
529 fx.bcx.ins().icmp_imm(IntCC::SignedGreaterThanOrEqual, rhs, 0);
530 let sat_val = fx.bcx.ins().select(rhs_ge_zero, min, max);
531 fx.bcx.ins().select(has_overflow, sat_val, val)
536 let res = CValue::by_val(val, lhs.layout());
538 ret.write_cvalue(fx, res);
540 sym::rotate_left => {
541 intrinsic_args!(fx, args => (x, y); intrinsic);
542 let y = y.load_scalar(fx);
544 let layout = x.layout();
545 let x = x.load_scalar(fx);
546 let res = fx.bcx.ins().rotl(x, y);
547 ret.write_cvalue(fx, CValue::by_val(res, layout));
549 sym::rotate_right => {
550 intrinsic_args!(fx, args => (x, y); intrinsic);
551 let y = y.load_scalar(fx);
553 let layout = x.layout();
554 let x = x.load_scalar(fx);
555 let res = fx.bcx.ins().rotr(x, y);
556 ret.write_cvalue(fx, CValue::by_val(res, layout));
559 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
560 // doesn't have UB both are codegen'ed the same way
561 sym::offset | sym::arith_offset => {
562 intrinsic_args!(fx, args => (base, offset); intrinsic);
563 let offset = offset.load_scalar(fx);
565 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
566 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
567 let ptr_diff = if pointee_size != 1 {
568 fx.bcx.ins().imul_imm(offset, pointee_size as i64)
572 let base_val = base.load_scalar(fx);
573 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
574 ret.write_cvalue(fx, CValue::by_val(res, base.layout()));
578 intrinsic_args!(fx, args => (ptr, mask); intrinsic);
579 let ptr = ptr.load_scalar(fx);
580 let mask = mask.load_scalar(fx);
581 fx.bcx.ins().band(ptr, mask);
585 intrinsic_args!(fx, args => (from); intrinsic);
587 ret.write_cvalue_transmute(fx, from);
589 sym::write_bytes | sym::volatile_set_memory => {
590 intrinsic_args!(fx, args => (dst, val, count); intrinsic);
591 let val = val.load_scalar(fx);
592 let count = count.load_scalar(fx);
594 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
595 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
596 let count = if pointee_size != 1 {
597 fx.bcx.ins().imul_imm(count, pointee_size as i64)
601 let dst_ptr = dst.load_scalar(fx);
602 // FIXME make the memset actually volatile when switching to emit_small_memset
603 // FIXME use emit_small_memset
604 fx.bcx.call_memset(fx.target_config, dst_ptr, val, count);
606 sym::ctlz | sym::ctlz_nonzero => {
607 intrinsic_args!(fx, args => (arg); intrinsic);
608 let val = arg.load_scalar(fx);
610 // FIXME trap on `ctlz_nonzero` with zero arg.
611 let res = fx.bcx.ins().clz(val);
612 let res = CValue::by_val(res, arg.layout());
613 ret.write_cvalue(fx, res);
615 sym::cttz | sym::cttz_nonzero => {
616 intrinsic_args!(fx, args => (arg); intrinsic);
617 let val = arg.load_scalar(fx);
619 // FIXME trap on `cttz_nonzero` with zero arg.
620 let res = fx.bcx.ins().ctz(val);
621 let res = CValue::by_val(res, arg.layout());
622 ret.write_cvalue(fx, res);
625 intrinsic_args!(fx, args => (arg); intrinsic);
626 let val = arg.load_scalar(fx);
628 let res = fx.bcx.ins().popcnt(val);
629 let res = CValue::by_val(res, arg.layout());
630 ret.write_cvalue(fx, res);
633 intrinsic_args!(fx, args => (arg); intrinsic);
634 let val = arg.load_scalar(fx);
636 let res = fx.bcx.ins().bitrev(val);
637 let res = CValue::by_val(res, arg.layout());
638 ret.write_cvalue(fx, res);
641 // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
642 fn swap(bcx: &mut FunctionBuilder<'_>, v: Value) -> Value {
643 match bcx.func.dfg.value_type(v) {
646 // https://code.woboq.org/gcc/include/bits/byteswap.h.html
648 let tmp1 = bcx.ins().ishl_imm(v, 8);
649 let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
651 let tmp2 = bcx.ins().ushr_imm(v, 8);
652 let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
654 bcx.ins().bor(n1, n2)
657 let tmp1 = bcx.ins().ishl_imm(v, 24);
658 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
660 let tmp2 = bcx.ins().ishl_imm(v, 8);
661 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
663 let tmp3 = bcx.ins().ushr_imm(v, 8);
664 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
666 let tmp4 = bcx.ins().ushr_imm(v, 24);
667 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
669 let or_tmp1 = bcx.ins().bor(n1, n2);
670 let or_tmp2 = bcx.ins().bor(n3, n4);
671 bcx.ins().bor(or_tmp1, or_tmp2)
674 let tmp1 = bcx.ins().ishl_imm(v, 56);
675 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
677 let tmp2 = bcx.ins().ishl_imm(v, 40);
678 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
680 let tmp3 = bcx.ins().ishl_imm(v, 24);
681 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
683 let tmp4 = bcx.ins().ishl_imm(v, 8);
684 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
686 let tmp5 = bcx.ins().ushr_imm(v, 8);
687 let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
689 let tmp6 = bcx.ins().ushr_imm(v, 24);
690 let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
692 let tmp7 = bcx.ins().ushr_imm(v, 40);
693 let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
695 let tmp8 = bcx.ins().ushr_imm(v, 56);
696 let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
698 let or_tmp1 = bcx.ins().bor(n1, n2);
699 let or_tmp2 = bcx.ins().bor(n3, n4);
700 let or_tmp3 = bcx.ins().bor(n5, n6);
701 let or_tmp4 = bcx.ins().bor(n7, n8);
703 let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
704 let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
705 bcx.ins().bor(or_tmp5, or_tmp6)
708 let (lo, hi) = bcx.ins().isplit(v);
709 let lo = swap(bcx, lo);
710 let hi = swap(bcx, hi);
711 bcx.ins().iconcat(hi, lo)
713 ty => unreachable!("bswap {}", ty),
716 intrinsic_args!(fx, args => (arg); intrinsic);
717 let val = arg.load_scalar(fx);
719 let res = CValue::by_val(swap(&mut fx.bcx, val), arg.layout());
720 ret.write_cvalue(fx, res);
722 sym::assert_inhabited | sym::assert_zero_valid | sym::assert_uninit_valid => {
723 intrinsic_args!(fx, args => (); intrinsic);
725 let layout = fx.layout_of(substs.type_at(0));
726 if layout.abi.is_uninhabited() {
727 with_no_trimmed_paths!({
728 crate::base::codegen_panic(
730 &format!("attempted to instantiate uninhabited type `{}`", layout.ty),
737 if intrinsic == sym::assert_zero_valid && !fx.tcx.permits_zero_init(layout) {
738 with_no_trimmed_paths!({
739 crate::base::codegen_panic(
742 "attempted to zero-initialize type `{}`, which is invalid",
751 if intrinsic == sym::assert_uninit_valid && !fx.tcx.permits_uninit_init(layout) {
752 with_no_trimmed_paths!({
753 crate::base::codegen_panic(
756 "attempted to leave type `{}` uninitialized, which is invalid",
766 sym::volatile_load | sym::unaligned_volatile_load => {
767 intrinsic_args!(fx, args => (ptr); intrinsic);
769 // Cranelift treats loads as volatile by default
770 // FIXME correctly handle unaligned_volatile_load
771 let inner_layout = fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
772 let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
773 ret.write_cvalue(fx, val);
775 sym::volatile_store | sym::unaligned_volatile_store => {
776 intrinsic_args!(fx, args => (ptr, val); intrinsic);
777 let ptr = ptr.load_scalar(fx);
779 // Cranelift treats stores as volatile by default
780 // FIXME correctly handle unaligned_volatile_store
781 let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
782 dest.write_cvalue(fx, val);
789 | sym::variant_count => {
790 intrinsic_args!(fx, args => (); intrinsic);
793 fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
794 let val = crate::constant::codegen_const_value(fx, const_val, ret.layout().ty);
795 ret.write_cvalue(fx, val);
798 sym::ptr_offset_from | sym::ptr_offset_from_unsigned => {
799 intrinsic_args!(fx, args => (ptr, base); intrinsic);
800 let ptr = ptr.load_scalar(fx);
801 let base = base.load_scalar(fx);
802 let ty = substs.type_at(0);
804 let pointee_size: u64 = fx.layout_of(ty).size.bytes();
805 let diff_bytes = fx.bcx.ins().isub(ptr, base);
806 // FIXME this can be an exact division.
807 let val = if intrinsic == sym::ptr_offset_from_unsigned {
808 let usize_layout = fx.layout_of(fx.tcx.types.usize);
809 // Because diff_bytes ULE isize::MAX, this would be fine as signed,
810 // but unsigned is slightly easier to codegen, so might as well.
811 CValue::by_val(fx.bcx.ins().udiv_imm(diff_bytes, pointee_size as i64), usize_layout)
813 let isize_layout = fx.layout_of(fx.tcx.types.isize);
814 CValue::by_val(fx.bcx.ins().sdiv_imm(diff_bytes, pointee_size as i64), isize_layout)
816 ret.write_cvalue(fx, val);
819 sym::ptr_guaranteed_cmp => {
820 intrinsic_args!(fx, args => (a, b); intrinsic);
822 let val = crate::num::codegen_ptr_binop(fx, BinOp::Eq, a, b);
823 ret.write_cvalue(fx, val);
826 sym::caller_location => {
827 intrinsic_args!(fx, args => (); intrinsic);
829 let caller_location = fx.get_caller_location(source_info);
830 ret.write_cvalue(fx, caller_location);
833 _ if intrinsic.as_str().starts_with("atomic_fence") => {
834 intrinsic_args!(fx, args => (); intrinsic);
836 fx.bcx.ins().fence();
838 _ if intrinsic.as_str().starts_with("atomic_singlethreadfence") => {
839 intrinsic_args!(fx, args => (); intrinsic);
841 // FIXME use a compiler fence once Cranelift supports it
842 fx.bcx.ins().fence();
844 _ if intrinsic.as_str().starts_with("atomic_load") => {
845 intrinsic_args!(fx, args => (ptr); intrinsic);
846 let ptr = ptr.load_scalar(fx);
848 let ty = substs.type_at(0);
850 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
851 // FIXME implement 128bit atomics
852 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
853 // special case for compiler-builtins to avoid having to patch it
854 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
859 .span_fatal(source_info.span, "128bit atomics not yet supported");
862 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
864 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
868 let clif_ty = fx.clif_type(ty).unwrap();
870 let val = fx.bcx.ins().atomic_load(clif_ty, MemFlags::trusted(), ptr);
872 let val = CValue::by_val(val, fx.layout_of(ty));
873 ret.write_cvalue(fx, val);
875 _ if intrinsic.as_str().starts_with("atomic_store") => {
876 intrinsic_args!(fx, args => (ptr, val); intrinsic);
877 let ptr = ptr.load_scalar(fx);
879 let ty = substs.type_at(0);
881 ty::Uint(UintTy::U128) | ty::Int(IntTy::I128) => {
882 // FIXME implement 128bit atomics
883 if fx.tcx.is_compiler_builtins(LOCAL_CRATE) {
884 // special case for compiler-builtins to avoid having to patch it
885 crate::trap::trap_unimplemented(fx, "128bit atomics not yet supported");
890 .span_fatal(source_info.span, "128bit atomics not yet supported");
893 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
895 report_atomic_type_validation_error(fx, intrinsic, source_info.span, ty);
900 let val = val.load_scalar(fx);
902 fx.bcx.ins().atomic_store(MemFlags::trusted(), val, ptr);
904 _ if intrinsic.as_str().starts_with("atomic_xchg") => {
905 intrinsic_args!(fx, args => (ptr, new); intrinsic);
906 let ptr = ptr.load_scalar(fx);
908 let layout = new.layout();
909 match layout.ty.kind() {
910 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
912 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
916 let ty = fx.clif_type(layout.ty).unwrap();
918 let new = new.load_scalar(fx);
920 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xchg, ptr, new);
922 let old = CValue::by_val(old, layout);
923 ret.write_cvalue(fx, old);
925 _ if intrinsic.as_str().starts_with("atomic_cxchg") => {
926 // both atomic_cxchg_* and atomic_cxchgweak_*
927 intrinsic_args!(fx, args => (ptr, test_old, new); intrinsic);
928 let ptr = ptr.load_scalar(fx);
930 let layout = new.layout();
931 match layout.ty.kind() {
932 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
934 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
939 let test_old = test_old.load_scalar(fx);
940 let new = new.load_scalar(fx);
942 let old = fx.bcx.ins().atomic_cas(MemFlags::trusted(), ptr, test_old, new);
943 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
946 CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
947 ret.write_cvalue(fx, ret_val)
950 _ if intrinsic.as_str().starts_with("atomic_xadd") => {
951 intrinsic_args!(fx, args => (ptr, amount); intrinsic);
952 let ptr = ptr.load_scalar(fx);
954 let layout = amount.layout();
955 match layout.ty.kind() {
956 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
958 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
962 let ty = fx.clif_type(layout.ty).unwrap();
964 let amount = amount.load_scalar(fx);
967 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Add, ptr, amount);
969 let old = CValue::by_val(old, layout);
970 ret.write_cvalue(fx, old);
972 _ if intrinsic.as_str().starts_with("atomic_xsub") => {
973 intrinsic_args!(fx, args => (ptr, amount); intrinsic);
974 let ptr = ptr.load_scalar(fx);
976 let layout = amount.layout();
977 match layout.ty.kind() {
978 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
980 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
984 let ty = fx.clif_type(layout.ty).unwrap();
986 let amount = amount.load_scalar(fx);
989 fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Sub, ptr, amount);
991 let old = CValue::by_val(old, layout);
992 ret.write_cvalue(fx, old);
994 _ if intrinsic.as_str().starts_with("atomic_and") => {
995 intrinsic_args!(fx, args => (ptr, src); intrinsic);
996 let ptr = ptr.load_scalar(fx);
998 let layout = src.layout();
999 match layout.ty.kind() {
1000 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1002 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1006 let ty = fx.clif_type(layout.ty).unwrap();
1008 let src = src.load_scalar(fx);
1010 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::And, ptr, src);
1012 let old = CValue::by_val(old, layout);
1013 ret.write_cvalue(fx, old);
1015 _ if intrinsic.as_str().starts_with("atomic_or") => {
1016 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1017 let ptr = ptr.load_scalar(fx);
1019 let layout = src.layout();
1020 match layout.ty.kind() {
1021 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1023 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1027 let ty = fx.clif_type(layout.ty).unwrap();
1029 let src = src.load_scalar(fx);
1031 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Or, ptr, src);
1033 let old = CValue::by_val(old, layout);
1034 ret.write_cvalue(fx, old);
1036 _ if intrinsic.as_str().starts_with("atomic_xor") => {
1037 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1038 let ptr = ptr.load_scalar(fx);
1040 let layout = src.layout();
1041 match layout.ty.kind() {
1042 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1044 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1048 let ty = fx.clif_type(layout.ty).unwrap();
1050 let src = src.load_scalar(fx);
1052 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Xor, ptr, src);
1054 let old = CValue::by_val(old, layout);
1055 ret.write_cvalue(fx, old);
1057 _ if intrinsic.as_str().starts_with("atomic_nand") => {
1058 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1059 let ptr = ptr.load_scalar(fx);
1061 let layout = src.layout();
1062 match layout.ty.kind() {
1063 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1065 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1069 let ty = fx.clif_type(layout.ty).unwrap();
1071 let src = src.load_scalar(fx);
1073 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Nand, ptr, src);
1075 let old = CValue::by_val(old, layout);
1076 ret.write_cvalue(fx, old);
1078 _ if intrinsic.as_str().starts_with("atomic_max") => {
1079 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1080 let ptr = ptr.load_scalar(fx);
1082 let layout = src.layout();
1083 match layout.ty.kind() {
1084 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1086 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1090 let ty = fx.clif_type(layout.ty).unwrap();
1092 let src = src.load_scalar(fx);
1094 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smax, ptr, src);
1096 let old = CValue::by_val(old, layout);
1097 ret.write_cvalue(fx, old);
1099 _ if intrinsic.as_str().starts_with("atomic_umax") => {
1100 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1101 let ptr = ptr.load_scalar(fx);
1103 let layout = src.layout();
1104 match layout.ty.kind() {
1105 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1107 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1111 let ty = fx.clif_type(layout.ty).unwrap();
1113 let src = src.load_scalar(fx);
1115 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umax, ptr, src);
1117 let old = CValue::by_val(old, layout);
1118 ret.write_cvalue(fx, old);
1120 _ if intrinsic.as_str().starts_with("atomic_min") => {
1121 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1122 let ptr = ptr.load_scalar(fx);
1124 let layout = src.layout();
1125 match layout.ty.kind() {
1126 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1128 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1132 let ty = fx.clif_type(layout.ty).unwrap();
1134 let src = src.load_scalar(fx);
1136 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Smin, ptr, src);
1138 let old = CValue::by_val(old, layout);
1139 ret.write_cvalue(fx, old);
1141 _ if intrinsic.as_str().starts_with("atomic_umin") => {
1142 intrinsic_args!(fx, args => (ptr, src); intrinsic);
1143 let ptr = ptr.load_scalar(fx);
1145 let layout = src.layout();
1146 match layout.ty.kind() {
1147 ty::Uint(_) | ty::Int(_) | ty::RawPtr(..) => {}
1149 report_atomic_type_validation_error(fx, intrinsic, source_info.span, layout.ty);
1153 let ty = fx.clif_type(layout.ty).unwrap();
1155 let src = src.load_scalar(fx);
1157 let old = fx.bcx.ins().atomic_rmw(ty, MemFlags::trusted(), AtomicRmwOp::Umin, ptr, src);
1159 let old = CValue::by_val(old, layout);
1160 ret.write_cvalue(fx, old);
1164 intrinsic_args!(fx, args => (a, b); intrinsic);
1165 let a = a.load_scalar(fx);
1166 let b = b.load_scalar(fx);
1168 let val = crate::num::codegen_float_min(fx, a, b);
1169 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1170 ret.write_cvalue(fx, val);
1173 intrinsic_args!(fx, args => (a, b); intrinsic);
1174 let a = a.load_scalar(fx);
1175 let b = b.load_scalar(fx);
1177 let val = crate::num::codegen_float_min(fx, a, b);
1178 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1179 ret.write_cvalue(fx, val);
1182 intrinsic_args!(fx, args => (a, b); intrinsic);
1183 let a = a.load_scalar(fx);
1184 let b = b.load_scalar(fx);
1186 let val = crate::num::codegen_float_max(fx, a, b);
1187 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
1188 ret.write_cvalue(fx, val);
1191 intrinsic_args!(fx, args => (a, b); intrinsic);
1192 let a = a.load_scalar(fx);
1193 let b = b.load_scalar(fx);
1195 let val = crate::num::codegen_float_max(fx, a, b);
1196 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
1197 ret.write_cvalue(fx, val);
1201 intrinsic_args!(fx, args => (f, data, catch_fn); intrinsic);
1202 let f = f.load_scalar(fx);
1203 let data = data.load_scalar(fx);
1204 let _catch_fn = catch_fn.load_scalar(fx);
1206 // FIXME once unwinding is supported, change this to actually catch panics
1207 let f_sig = fx.bcx.func.import_signature(Signature {
1208 call_conv: fx.target_config.default_call_conv,
1209 params: vec![AbiParam::new(fx.bcx.func.dfg.value_type(data))],
1213 fx.bcx.ins().call_indirect(f_sig, f, &[data]);
1215 let layout = ret.layout();
1216 let ret_val = CValue::const_val(fx, layout, ty::ScalarInt::null(layout.size));
1217 ret.write_cvalue(fx, ret_val);
1220 sym::fadd_fast | sym::fsub_fast | sym::fmul_fast | sym::fdiv_fast | sym::frem_fast => {
1221 intrinsic_args!(fx, args => (x, y); intrinsic);
1223 let res = crate::num::codegen_float_binop(
1226 sym::fadd_fast => BinOp::Add,
1227 sym::fsub_fast => BinOp::Sub,
1228 sym::fmul_fast => BinOp::Mul,
1229 sym::fdiv_fast => BinOp::Div,
1230 sym::frem_fast => BinOp::Rem,
1231 _ => unreachable!(),
1236 ret.write_cvalue(fx, res);
1238 sym::float_to_int_unchecked => {
1239 intrinsic_args!(fx, args => (f); intrinsic);
1240 let f = f.load_scalar(fx);
1242 let res = crate::cast::clif_int_or_float_cast(
1246 fx.clif_type(ret.layout().ty).unwrap(),
1247 type_sign(ret.layout().ty),
1249 ret.write_cvalue(fx, CValue::by_val(res, ret.layout()));
1253 intrinsic_args!(fx, args => (lhs_ref, rhs_ref); intrinsic);
1254 let lhs_ref = lhs_ref.load_scalar(fx);
1255 let rhs_ref = rhs_ref.load_scalar(fx);
1257 let size = fx.layout_of(substs.type_at(0)).layout.size();
1258 // FIXME add and use emit_small_memcmp
1259 let is_eq_value = if size == Size::ZERO {
1260 // No bytes means they're trivially equal
1261 fx.bcx.ins().iconst(types::I8, 1)
1262 } else if let Some(clty) = size.bits().try_into().ok().and_then(Type::int) {
1263 // Can't use `trusted` for these loads; they could be unaligned.
1264 let mut flags = MemFlags::new();
1266 let lhs_val = fx.bcx.ins().load(clty, flags, lhs_ref, 0);
1267 let rhs_val = fx.bcx.ins().load(clty, flags, rhs_ref, 0);
1268 let eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_val, rhs_val);
1269 fx.bcx.ins().bint(types::I8, eq)
1271 // Just call `memcmp` (like slices do in core) when the
1272 // size is too large or it's not a power-of-two.
1273 let signed_bytes = i64::try_from(size.bytes()).unwrap();
1274 let bytes_val = fx.bcx.ins().iconst(fx.pointer_type, signed_bytes);
1275 let params = vec![AbiParam::new(fx.pointer_type); 3];
1276 let returns = vec![AbiParam::new(types::I32)];
1277 let args = &[lhs_ref, rhs_ref, bytes_val];
1278 let cmp = fx.lib_call("memcmp", params, returns, args)[0];
1279 let eq = fx.bcx.ins().icmp_imm(IntCC::Equal, cmp, 0);
1280 fx.bcx.ins().bint(types::I8, eq)
1282 ret.write_cvalue(fx, CValue::by_val(is_eq_value, ret.layout()));
1285 sym::const_allocate => {
1286 intrinsic_args!(fx, args => (_size, _align); intrinsic);
1288 // returns a null pointer at runtime.
1289 let null = fx.bcx.ins().iconst(fx.pointer_type, 0);
1290 ret.write_cvalue(fx, CValue::by_val(null, ret.layout()));
1293 sym::const_deallocate => {
1294 intrinsic_args!(fx, args => (_ptr, _size, _align); intrinsic);
1299 intrinsic_args!(fx, args => (a); intrinsic);
1301 // FIXME implement black_box semantics
1302 ret.write_cvalue(fx, a);
1305 // FIXME implement variadics in cranelift
1306 sym::va_copy | sym::va_arg | sym::va_end => {
1307 fx.tcx.sess.span_fatal(
1309 "Defining variadic functions is not yet supported by Cranelift",
1316 .span_fatal(source_info.span, &format!("unsupported intrinsic {}", intrinsic));
1320 let ret_block = fx.get_block(destination.unwrap());
1321 fx.bcx.ins().jump(ret_block, &[]);