3 use rustc::ty::subst::SubstsRef;
5 macro_rules! intrinsic_pat {
14 macro_rules! intrinsic_arg {
15 (c $fx:expr, $arg:ident) => {
18 (v $fx:expr, $arg:ident) => {
23 macro_rules! intrinsic_substs {
24 ($substs:expr, $index:expr,) => {};
25 ($substs:expr, $index:expr, $first:ident $(,$rest:ident)*) => {
26 let $first = $substs.type_at($index);
27 intrinsic_substs!($substs, $index+1, $($rest),*);
31 macro_rules! intrinsic_match {
32 ($fx:expr, $intrinsic:expr, $substs:expr, $args:expr, $(
33 $($name:tt)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
37 $(intrinsic_pat!($name))|* $(if $cond)? => {
38 #[allow(unused_parens, non_snake_case)]
41 intrinsic_substs!($substs, 0, $($subst),*);
43 if let [$($arg),*] = *$args {
45 $(intrinsic_arg!($a $fx, $arg)),*
47 #[warn(unused_parens, non_snake_case)]
52 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
57 _ => unimpl!("unsupported intrinsic {}", $intrinsic),
62 macro_rules! call_intrinsic_match {
63 ($fx:expr, $intrinsic:expr, $substs:expr, $ret:expr, $destination:expr, $args:expr, $(
64 $name:ident($($arg:ident),*) -> $ty:ident => $func:ident,
68 stringify!($name) => {
69 assert!($substs.is_noop());
70 if let [$($arg),*] = *$args {
71 let res = $fx.easy_call(stringify!($func), &[$($arg),*], $fx.tcx.types.$ty);
72 $ret.write_cvalue($fx, res);
74 if let Some((_, dest)) = $destination {
75 let ret_ebb = $fx.get_ebb(dest);
76 $fx.bcx.ins().jump(ret_ebb, &[]);
82 bug!("wrong number of args for intrinsic {:?}", $intrinsic);
91 macro_rules! atomic_binop_return_old {
92 ($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) => {
93 let clif_ty = $fx.clif_type($T).unwrap();
94 let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
95 let new = $fx.bcx.ins().$op(old, $src);
96 $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
97 $ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
101 macro_rules! atomic_minmax {
102 ($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) => {
104 let clif_ty = $fx.clif_type($T).unwrap();
105 let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
108 let is_eq = $fx.bcx.ins().icmp(IntCC::SignedGreaterThan, old, $src);
109 let new = crate::common::codegen_select(&mut $fx.bcx, is_eq, old, $src);
112 $fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
114 let ret_val = CValue::by_val(old, $ret.layout());
115 $ret.write_cvalue($fx, ret_val);
119 fn lane_type_and_count<'tcx>(
120 fx: &FunctionCx<'_, 'tcx, impl Backend>,
121 layout: TyLayout<'tcx>,
123 ) -> (TyLayout<'tcx>, usize) {
124 assert!(layout.ty.is_simd());
125 let lane_count = match layout.fields {
126 layout::FieldPlacement::Array { stride: _, count } => usize::try_from(count).unwrap(),
127 _ => panic!("Non vector type {:?} passed to or returned from simd_* intrinsic {}", layout.ty, intrinsic),
129 let lane_layout = layout.field(fx, 0);
130 (lane_layout, lane_count)
133 fn simd_for_each_lane<'tcx, B: Backend>(
134 fx: &mut FunctionCx<'_, 'tcx, B>,
139 f: impl Fn(&mut FunctionCx<'_, 'tcx, B>, TyLayout<'tcx>, TyLayout<'tcx>, Value, Value) -> CValue<'tcx>,
141 assert_eq!(x.layout(), y.layout());
142 let layout = x.layout();
144 let (lane_layout, lane_count) = lane_type_and_count(fx, layout, intrinsic);
145 let (ret_lane_layout, ret_lane_count) = lane_type_and_count(fx, ret.layout(), intrinsic);
146 assert_eq!(lane_count, ret_lane_count);
148 for lane in 0..lane_count {
149 let lane = mir::Field::new(lane);
150 let x_lane = x.value_field(fx, lane).load_scalar(fx);
151 let y_lane = y.value_field(fx, lane).load_scalar(fx);
153 let res_lane = f(fx, lane_layout, ret_lane_layout, x_lane, y_lane);
155 ret.place_field(fx, lane).write_cvalue(fx, res_lane);
159 fn bool_to_zero_or_max_uint<'tcx>(
160 fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
161 layout: TyLayout<'tcx>,
164 let ty = fx.clif_type(layout.ty).unwrap();
166 let zero = fx.bcx.ins().iconst(ty, 0);
167 let max = fx.bcx.ins().iconst(ty, (u64::max_value() >> (64 - ty.bits())) as i64);
168 let res = crate::common::codegen_select(&mut fx.bcx, val, max, zero);
169 CValue::by_val(res, layout)
172 macro_rules! simd_cmp {
173 ($fx:expr, $intrinsic:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
174 simd_for_each_lane($fx, $intrinsic, $x, $y, $ret, |fx, _lane_layout, res_lane_layout, x_lane, y_lane| {
175 let res_lane = fx.bcx.ins().icmp(IntCC::$cc, x_lane, y_lane);
176 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
179 ($fx:expr, $intrinsic:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
180 simd_for_each_lane($fx, $intrinsic, $x, $y, $ret, |fx, lane_layout, res_lane_layout, x_lane, y_lane| {
181 let res_lane = match lane_layout.ty.sty {
182 ty::Uint(_) => fx.bcx.ins().icmp(IntCC::$cc_u, x_lane, y_lane),
183 ty::Int(_) => fx.bcx.ins().icmp(IntCC::$cc_s, x_lane, y_lane),
184 _ => unreachable!("{:?}", lane_layout.ty),
186 bool_to_zero_or_max_uint(fx, res_lane_layout, res_lane)
192 macro_rules! simd_binop {
193 ($fx:expr, $intrinsic:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
194 simd_for_each_lane($fx, $intrinsic, $x, $y, $ret, |fx, _lane_layout, ret_lane_layout, x_lane, y_lane| {
195 let res_lane = fx.bcx.ins().$op(x_lane, y_lane);
196 CValue::by_val(res_lane, ret_lane_layout)
199 ($fx:expr, $intrinsic:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
200 simd_for_each_lane($fx, $intrinsic, $x, $y, $ret, |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
201 let res_lane = match lane_layout.ty.sty {
202 ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
203 ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
204 _ => unreachable!("{:?}", lane_layout.ty),
206 CValue::by_val(res_lane, ret_lane_layout)
211 pub fn codegen_intrinsic_call<'a, 'tcx: 'a>(
212 fx: &mut FunctionCx<'a, 'tcx, impl Backend>,
214 substs: SubstsRef<'tcx>,
215 args: Vec<CValue<'tcx>>,
216 destination: Option<(CPlace<'tcx>, BasicBlock)>,
218 let intrinsic = fx.tcx.item_name(def_id).as_str();
219 let intrinsic = &intrinsic[..];
221 let ret = match destination {
222 Some((place, _)) => place,
224 // Insert non returning intrinsics here
227 trap_panic(fx, "Called intrinsic::abort.");
230 trap_unreachable(fx, "[corruption] Called intrinsic::unreachable.");
232 _ => unimplemented!("unsupported instrinsic {}", intrinsic),
238 let u64_layout = fx.layout_of(fx.tcx.types.u64);
239 let usize_layout = fx.layout_of(fx.tcx.types.usize);
241 call_intrinsic_match! {
242 fx, intrinsic, substs, ret, destination, args,
243 expf32(flt) -> f32 => expf,
244 expf64(flt) -> f64 => exp,
245 exp2f32(flt) -> f32 => exp2f,
246 exp2f64(flt) -> f64 => exp2,
247 sqrtf32(flt) -> f32 => sqrtf,
248 sqrtf64(flt) -> f64 => sqrt,
249 powif32(a, x) -> f32 => __powisf2, // compiler-builtins
250 powif64(a, x) -> f64 => __powidf2, // compiler-builtins
251 logf32(flt) -> f32 => logf,
252 logf64(flt) -> f64 => log,
253 fabsf32(flt) -> f32 => fabsf,
254 fabsf64(flt) -> f64 => fabs,
255 fmaf32(x, y, z) -> f32 => fmaf,
256 fmaf64(x, y, z) -> f64 => fma,
259 floorf32(flt) -> f32 => floorf,
260 floorf64(flt) -> f64 => floor,
261 ceilf32(flt) -> f32 => ceilf,
262 ceilf64(flt) -> f64 => ceil,
263 truncf32(flt) -> f32 => truncf,
264 truncf64(flt) -> f64 => trunc,
265 roundf32(flt) -> f32 => roundf,
266 roundf64(flt) -> f64 => round,
269 sinf32(flt) -> f32 => sinf,
270 sinf64(flt) -> f64 => sin,
271 cosf32(flt) -> f32 => cosf,
272 cosf64(flt) -> f64 => cos,
273 tanf32(flt) -> f32 => tanf,
274 tanf64(flt) -> f64 => tan,
278 fx, intrinsic, substs, args,
281 likely | unlikely, (c a) {
282 ret.write_cvalue(fx, a);
285 fx.bcx.ins().debugtrap();
287 copy | copy_nonoverlapping, <elem_ty> (v src, v dst, v count) {
288 let elem_size: u64 = fx.layout_of(elem_ty).size.bytes();
292 .iconst(fx.pointer_type, elem_size as i64);
293 assert_eq!(args.len(), 3);
294 let byte_amount = fx.bcx.ins().imul(count, elem_size);
296 if intrinsic.ends_with("_nonoverlapping") {
297 fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
299 fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
302 discriminant_value, (c val) {
303 let pointee_layout = fx.layout_of(val.layout().ty.builtin_deref(true).unwrap().ty);
304 let place = CPlace::for_addr(val.load_scalar(fx), pointee_layout);
305 let discr = crate::base::trans_get_discriminant(fx, place, ret.layout());
306 ret.write_cvalue(fx, discr);
309 let size_of = fx.layout_of(T).size.bytes();
310 let size_of = CValue::const_val(fx, usize_layout.ty, size_of.into());
311 ret.write_cvalue(fx, size_of);
313 size_of_val, <T> (c ptr) {
314 let layout = fx.layout_of(T);
315 let size = if layout.is_unsized() {
316 let (_ptr, info) = ptr.load_scalar_pair(fx);
317 let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
323 .iconst(fx.pointer_type, layout.size.bytes() as i64)
325 ret.write_cvalue(fx, CValue::by_val(size, usize_layout));
327 min_align_of, <T> () {
328 let min_align = fx.layout_of(T).align.abi.bytes();
329 let min_align = CValue::const_val(fx, usize_layout.ty, min_align.into());
330 ret.write_cvalue(fx, min_align);
332 min_align_of_val, <T> (c ptr) {
333 let layout = fx.layout_of(T);
334 let align = if layout.is_unsized() {
335 let (_ptr, info) = ptr.load_scalar_pair(fx);
336 let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
342 .iconst(fx.pointer_type, layout.align.abi.bytes() as i64)
344 ret.write_cvalue(fx, CValue::by_val(align, usize_layout));
346 pref_align_of, <T> () {
347 let pref_align = fx.layout_of(T).align.pref.bytes();
348 let pref_align = CValue::const_val(fx, usize_layout.ty, pref_align.into());
349 ret.write_cvalue(fx, pref_align);
354 let type_id = fx.tcx.type_id_hash(T);
355 let type_id = CValue::const_val(fx, u64_layout.ty, type_id.into());
356 ret.write_cvalue(fx, type_id);
359 let type_name = fx.tcx.type_name(T);
360 let type_name = crate::constant::trans_const_value(fx, type_name);
361 ret.write_cvalue(fx, type_name);
364 _ if intrinsic.starts_with("unchecked_") || intrinsic == "exact_div", (c x, c y) {
365 // FIXME trap on overflow
366 let bin_op = match intrinsic {
367 "unchecked_sub" => BinOp::Sub,
368 "unchecked_div" | "exact_div" => BinOp::Div,
369 "unchecked_rem" => BinOp::Rem,
370 "unchecked_shl" => BinOp::Shl,
371 "unchecked_shr" => BinOp::Shr,
372 _ => unimplemented!("intrinsic {}", intrinsic),
374 let res = match ret.layout().ty.sty {
375 ty::Uint(_) => crate::base::trans_int_binop(
383 ty::Int(_) => crate::base::trans_int_binop(
393 ret.write_cvalue(fx, res);
395 _ if intrinsic.ends_with("_with_overflow"), <T> (c x, c y) {
396 assert_eq!(x.layout().ty, y.layout().ty);
397 let bin_op = match intrinsic {
398 "add_with_overflow" => BinOp::Add,
399 "sub_with_overflow" => BinOp::Sub,
400 "mul_with_overflow" => BinOp::Mul,
401 _ => unimplemented!("intrinsic {}", intrinsic),
403 let res = match T.sty {
404 ty::Uint(_) => crate::base::trans_checked_int_binop(
412 ty::Int(_) => crate::base::trans_checked_int_binop(
422 ret.write_cvalue(fx, res);
424 _ if intrinsic.starts_with("overflowing_"), <T> (c x, c y) {
425 assert_eq!(x.layout().ty, y.layout().ty);
426 let bin_op = match intrinsic {
427 "overflowing_add" => BinOp::Add,
428 "overflowing_sub" => BinOp::Sub,
429 "overflowing_mul" => BinOp::Mul,
430 _ => unimplemented!("intrinsic {}", intrinsic),
432 let res = match T.sty {
433 ty::Uint(_) => crate::base::trans_int_binop(
441 ty::Int(_) => crate::base::trans_int_binop(
451 ret.write_cvalue(fx, res);
453 _ if intrinsic.starts_with("saturating_"), <T> (c x, c y) {
454 // FIXME implement saturating behavior
455 assert_eq!(x.layout().ty, y.layout().ty);
456 let bin_op = match intrinsic {
457 "saturating_add" => BinOp::Add,
458 "saturating_sub" => BinOp::Sub,
459 "saturating_mul" => BinOp::Mul,
460 _ => unimplemented!("intrinsic {}", intrinsic),
462 let res = match T.sty {
463 ty::Uint(_) => crate::base::trans_int_binop(
471 ty::Int(_) => crate::base::trans_int_binop(
481 ret.write_cvalue(fx, res);
483 rotate_left, <T>(v x, v y) {
484 let layout = fx.layout_of(T);
485 let res = fx.bcx.ins().rotl(x, y);
486 ret.write_cvalue(fx, CValue::by_val(res, layout));
488 rotate_right, <T>(v x, v y) {
489 let layout = fx.layout_of(T);
490 let res = fx.bcx.ins().rotr(x, y);
491 ret.write_cvalue(fx, CValue::by_val(res, layout));
494 // The only difference between offset and arith_offset is regarding UB. Because Cranelift
495 // doesn't have UB both are codegen'ed the same way
496 offset | arith_offset, (c base, v offset) {
497 let pointee_ty = base.layout().ty.builtin_deref(true).unwrap().ty;
498 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
499 let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
500 let base_val = base.load_scalar(fx);
501 let res = fx.bcx.ins().iadd(base_val, ptr_diff);
502 ret.write_cvalue(fx, CValue::by_val(res, args[0].layout()));
505 transmute, <src_ty, dst_ty> (c from) {
506 assert_eq!(from.layout().ty, src_ty);
507 let addr = from.force_stack(fx);
508 let dst_layout = fx.layout_of(dst_ty);
509 ret.write_cvalue(fx, CValue::by_ref(addr, dst_layout))
512 if ret.layout().abi == Abi::Uninhabited {
513 crate::trap::trap_panic(fx, "[panic] Called intrinsic::init for uninhabited type.");
518 CPlace::NoPlace(_layout) => {}
519 CPlace::Var(var, layout) => {
520 let clif_ty = fx.clif_type(layout.ty).unwrap();
521 let val = match clif_ty {
522 types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 0),
524 let zero = fx.bcx.ins().iconst(types::I32, 0);
525 fx.bcx.ins().bitcast(types::F32, zero)
528 let zero = fx.bcx.ins().iconst(types::I64, 0);
529 fx.bcx.ins().bitcast(types::F64, zero)
531 _ => panic!("clif_type returned {}", clif_ty),
533 fx.bcx.def_var(mir_var(var), val);
536 let addr = ret.to_addr(fx);
537 let layout = ret.layout();
538 fx.bcx.emit_small_memset(fx.module.target_config(), addr, 0, layout.size.bytes(), 1);
542 write_bytes, (c dst, v val, v count) {
543 let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
544 let pointee_size = fx.layout_of(pointee_ty).size.bytes();
545 let count = fx.bcx.ins().imul_imm(count, pointee_size as i64);
546 let dst_ptr = dst.load_scalar(fx);
547 fx.bcx.call_memset(fx.module.target_config(), dst_ptr, val, count);
549 ctlz | ctlz_nonzero, <T> (v arg) {
550 let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
551 // FIXME verify this algorithm is correct
552 let (lsb, msb) = fx.bcx.ins().isplit(arg);
553 let lsb_lz = fx.bcx.ins().clz(lsb);
554 let msb_lz = fx.bcx.ins().clz(msb);
555 let msb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, msb, 0);
556 let lsb_lz_plus_64 = fx.bcx.ins().iadd_imm(lsb_lz, 64);
557 fx.bcx.ins().select(msb_is_zero, lsb_lz_plus_64, msb_lz)
559 fx.bcx.ins().clz(arg)
561 let res = CValue::by_val(res, fx.layout_of(T));
562 ret.write_cvalue(fx, res);
564 cttz | cttz_nonzero, <T> (v arg) {
565 let res = if T == fx.tcx.types.u128 || T == fx.tcx.types.i128 {
566 // FIXME verify this algorithm is correct
567 let (lsb, msb) = fx.bcx.ins().isplit(arg);
568 let lsb_tz = fx.bcx.ins().ctz(lsb);
569 let msb_tz = fx.bcx.ins().ctz(msb);
570 let lsb_is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, lsb, 0);
571 let msb_tz_plus_64 = fx.bcx.ins().iadd_imm(msb_tz, 64);
572 fx.bcx.ins().select(lsb_is_zero, msb_tz_plus_64, lsb_tz)
574 fx.bcx.ins().ctz(arg)
576 let res = CValue::by_val(res, fx.layout_of(T));
577 ret.write_cvalue(fx, res);
580 let res = CValue::by_val(fx.bcx.ins().popcnt(arg), fx.layout_of(T));
581 ret.write_cvalue(fx, res);
583 bitreverse, <T> (v arg) {
584 let res = CValue::by_val(fx.bcx.ins().bitrev(arg), fx.layout_of(T));
585 ret.write_cvalue(fx, res);
588 // FIXME(CraneStation/cranelift#794) add bswap instruction to cranelift
589 fn swap(bcx: &mut FunctionBuilder, v: Value) -> Value {
590 match bcx.func.dfg.value_type(v) {
593 // https://code.woboq.org/gcc/include/bits/byteswap.h.html
595 let tmp1 = bcx.ins().ishl_imm(v, 8);
596 let n1 = bcx.ins().band_imm(tmp1, 0xFF00);
598 let tmp2 = bcx.ins().ushr_imm(v, 8);
599 let n2 = bcx.ins().band_imm(tmp2, 0x00FF);
601 bcx.ins().bor(n1, n2)
604 let tmp1 = bcx.ins().ishl_imm(v, 24);
605 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000);
607 let tmp2 = bcx.ins().ishl_imm(v, 8);
608 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000);
610 let tmp3 = bcx.ins().ushr_imm(v, 8);
611 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00);
613 let tmp4 = bcx.ins().ushr_imm(v, 24);
614 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF);
616 let or_tmp1 = bcx.ins().bor(n1, n2);
617 let or_tmp2 = bcx.ins().bor(n3, n4);
618 bcx.ins().bor(or_tmp1, or_tmp2)
621 let tmp1 = bcx.ins().ishl_imm(v, 56);
622 let n1 = bcx.ins().band_imm(tmp1, 0xFF00_0000_0000_0000u64 as i64);
624 let tmp2 = bcx.ins().ishl_imm(v, 40);
625 let n2 = bcx.ins().band_imm(tmp2, 0x00FF_0000_0000_0000u64 as i64);
627 let tmp3 = bcx.ins().ishl_imm(v, 24);
628 let n3 = bcx.ins().band_imm(tmp3, 0x0000_FF00_0000_0000u64 as i64);
630 let tmp4 = bcx.ins().ishl_imm(v, 8);
631 let n4 = bcx.ins().band_imm(tmp4, 0x0000_00FF_0000_0000u64 as i64);
633 let tmp5 = bcx.ins().ushr_imm(v, 8);
634 let n5 = bcx.ins().band_imm(tmp5, 0x0000_0000_FF00_0000u64 as i64);
636 let tmp6 = bcx.ins().ushr_imm(v, 24);
637 let n6 = bcx.ins().band_imm(tmp6, 0x0000_0000_00FF_0000u64 as i64);
639 let tmp7 = bcx.ins().ushr_imm(v, 40);
640 let n7 = bcx.ins().band_imm(tmp7, 0x0000_0000_0000_FF00u64 as i64);
642 let tmp8 = bcx.ins().ushr_imm(v, 56);
643 let n8 = bcx.ins().band_imm(tmp8, 0x0000_0000_0000_00FFu64 as i64);
645 let or_tmp1 = bcx.ins().bor(n1, n2);
646 let or_tmp2 = bcx.ins().bor(n3, n4);
647 let or_tmp3 = bcx.ins().bor(n5, n6);
648 let or_tmp4 = bcx.ins().bor(n7, n8);
650 let or_tmp5 = bcx.ins().bor(or_tmp1, or_tmp2);
651 let or_tmp6 = bcx.ins().bor(or_tmp3, or_tmp4);
652 bcx.ins().bor(or_tmp5, or_tmp6)
655 let (lo, hi) = bcx.ins().isplit(v);
656 let lo = swap(bcx, lo);
657 let hi = swap(bcx, hi);
658 bcx.ins().iconcat(hi, lo)
660 ty => unimplemented!("bswap {}", ty),
663 let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
664 ret.write_cvalue(fx, res);
667 let needs_drop = if T.needs_drop(fx.tcx, ParamEnv::reveal_all()) {
672 let needs_drop = CValue::const_val(fx, fx.tcx.types.bool, needs_drop);
673 ret.write_cvalue(fx, needs_drop);
675 panic_if_uninhabited, <T> () {
676 if fx.layout_of(T).abi.is_uninhabited() {
677 crate::trap::trap_panic(fx, "[panic] Called intrinsic::panic_if_uninhabited for uninhabited type.");
682 volatile_load, (c ptr) {
683 // Cranelift treats loads as volatile by default
685 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
686 let val = CValue::by_ref(ptr.load_scalar(fx), inner_layout);
687 ret.write_cvalue(fx, val);
689 volatile_store, (v ptr, c val) {
690 // Cranelift treats stores as volatile by default
691 let dest = CPlace::for_addr(ptr, val.layout());
692 dest.write_cvalue(fx, val);
695 _ if intrinsic.starts_with("atomic_fence"), () {};
696 _ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
697 _ if intrinsic.starts_with("atomic_load"), (c ptr) {
699 fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
700 let val = CValue::by_ref(ptr.load_scalar(fx), inner_layout);
701 ret.write_cvalue(fx, val);
703 _ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
704 let dest = CPlace::for_addr(ptr, val.layout());
705 dest.write_cvalue(fx, val);
707 _ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
709 let clif_ty = fx.clif_type(T).unwrap();
710 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
711 ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
714 let dest = CPlace::for_addr(ptr, src.layout());
715 dest.write_cvalue(fx, src);
717 _ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
719 let clif_ty = fx.clif_type(T).unwrap();
720 let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
723 let is_eq = fx.bcx.ins().icmp(IntCC::Equal, old, test_old);
724 let new = crate::common::codegen_select(&mut fx.bcx, is_eq, new, old); // Keep old if not equal to test_old
727 fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
729 let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
730 ret.write_cvalue(fx, ret_val);
733 _ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
734 atomic_binop_return_old! (fx, iadd<T>(ptr, amount) -> ret);
736 _ if intrinsic.starts_with("atomic_xsub"), <T> (v ptr, v amount) {
737 atomic_binop_return_old! (fx, isub<T>(ptr, amount) -> ret);
739 _ if intrinsic.starts_with("atomic_and"), <T> (v ptr, v src) {
740 atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
742 _ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
743 atomic_binop_return_old! (fx, band_not<T>(ptr, src) -> ret);
745 _ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
746 atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
748 _ if intrinsic.starts_with("atomic_xor"), <T> (v ptr, v src) {
749 atomic_binop_return_old! (fx, bxor<T>(ptr, src) -> ret);
752 _ if intrinsic.starts_with("atomic_max"), <T> (v ptr, v src) {
753 atomic_minmax!(fx, IntCC::SignedGreaterThan, <T> (ptr, src) -> ret);
755 _ if intrinsic.starts_with("atomic_umax"), <T> (v ptr, v src) {
756 atomic_minmax!(fx, IntCC::UnsignedGreaterThan, <T> (ptr, src) -> ret);
758 _ if intrinsic.starts_with("atomic_min"), <T> (v ptr, v src) {
759 atomic_minmax!(fx, IntCC::SignedLessThan, <T> (ptr, src) -> ret);
761 _ if intrinsic.starts_with("atomic_umin"), <T> (v ptr, v src) {
762 atomic_minmax!(fx, IntCC::UnsignedLessThan, <T> (ptr, src) -> ret);
765 minnumf32, (v a, v b) {
766 let val = fx.bcx.ins().fmin(a, b);
767 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
768 ret.write_cvalue(fx, val);
770 minnumf64, (v a, v b) {
771 let val = fx.bcx.ins().fmin(a, b);
772 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
773 ret.write_cvalue(fx, val);
775 maxnumf32, (v a, v b) {
776 let val = fx.bcx.ins().fmax(a, b);
777 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f32));
778 ret.write_cvalue(fx, val);
780 maxnumf64, (v a, v b) {
781 let val = fx.bcx.ins().fmax(a, b);
782 let val = CValue::by_val(val, fx.layout_of(fx.tcx.types.f64));
783 ret.write_cvalue(fx, val);
787 ret.write_cvalue(fx, x.unchecked_cast_to(ret.layout()));
790 simd_eq, (c x, c y) {
791 simd_cmp!(fx, intrinsic, Equal(x, y) -> ret);
793 simd_ne, (c x, c y) {
794 simd_cmp!(fx, intrinsic, NotEqual(x, y) -> ret);
796 simd_lt, (c x, c y) {
797 simd_cmp!(fx, intrinsic, UnsignedLessThan|SignedLessThan(x, y) -> ret);
799 simd_le, (c x, c y) {
800 simd_cmp!(fx, intrinsic, UnsignedLessThanOrEqual|SignedLessThanOrEqual(x, y) -> ret);
802 simd_gt, (c x, c y) {
803 simd_cmp!(fx, intrinsic, UnsignedGreaterThan|SignedGreaterThan(x, y) -> ret);
805 simd_ge, (c x, c y) {
806 simd_cmp!(fx, intrinsic, UnsignedGreaterThanOrEqual|SignedGreaterThanOrEqual(x, y) -> ret);
809 // simd_shuffle32<T, U>(x: T, y: T, idx: [u32; 32]) -> U
810 _ if intrinsic.starts_with("simd_shuffle"), (c x, c y, c idx) {
811 let n: usize = intrinsic["simd_shuffle".len()..].parse().unwrap();
813 assert_eq!(x.layout(), y.layout());
814 let layout = x.layout();
816 let (lane_type, lane_count) = lane_type_and_count(fx, layout, intrinsic);
817 let (ret_lane_type, ret_lane_count) = lane_type_and_count(fx, ret.layout(), intrinsic);
819 assert_eq!(lane_type, ret_lane_type);
820 assert_eq!(n, ret_lane_count);
822 let total_len = lane_count * 2;
824 // TODO get shuffle indices
825 fx.tcx.sess.warn("simd_shuffle* not yet implemented");
826 crate::trap::trap_unimplemented(fx, "simd_shuffle* not yet implemented");
829 simd_add, (c x, c y) {
830 simd_binop!(fx, intrinsic, iadd(x, y) -> ret);
832 simd_sub, (c x, c y) {
833 simd_binop!(fx, intrinsic, isub(x, y) -> ret);
835 simd_mul, (c x, c y) {
836 simd_binop!(fx, intrinsic, imul(x, y) -> ret);
838 simd_div, (c x, c y) {
839 simd_binop!(fx, intrinsic, udiv|sdiv(x, y) -> ret);
841 simd_rem, (c x, c y) {
842 simd_binop!(fx, intrinsic, urem|srem(x, y) -> ret);
844 simd_shl, (c x, c y) {
845 simd_binop!(fx, intrinsic, ishl(x, y) -> ret);
847 simd_shr, (c x, c y) {
848 simd_binop!(fx, intrinsic, ushr|sshr(x, y) -> ret);
850 simd_and, (c x, c y) {
851 simd_binop!(fx, intrinsic, band(x, y) -> ret);
853 simd_or, (c x, c y) {
854 simd_binop!(fx, intrinsic, bor(x, y) -> ret);
856 simd_xor, (c x, c y) {
857 simd_binop!(fx, intrinsic, bxor(x, y) -> ret);
860 simd_fmin, (c x, c y) {
861 simd_binop!(fx, intrinsic, fmin(x, y) -> ret);
863 simd_fmax, (c x, c y) {
864 simd_binop!(fx, intrinsic, fmax(x, y) -> ret);
868 if let Some((_, dest)) = destination {
869 let ret_ebb = fx.get_ebb(dest);
870 fx.bcx.ins().jump(ret_ebb, &[]);
872 trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");