$(
$($($name:tt).*)|+ $(if $cond:expr)?, $(<$($subst:ident),*>)? ($($a:ident $arg:ident),*) $content:block;
)*) => {
+ let _ = $substs; // Silence warning when substs is unused.
match $intrinsic {
$(
$(intrinsic_pat!($($name).*))|* $(if $cond)? => {
$ret.write_cvalue($fx, res);
if let Some((_, dest)) = $destination {
- let ret_ebb = $fx.get_ebb(dest);
- $fx.bcx.ins().jump(ret_ebb, &[]);
+ let ret_block = $fx.get_block(dest);
+ $fx.bcx.ins().jump(ret_block, &[]);
return;
} else {
unreachable!();
}
macro atomic_binop_return_old($fx:expr, $op:ident<$T:ident>($ptr:ident, $src:ident) -> $ret:ident) {
+ crate::atomic_shim::lock_global_lock($fx);
+
let clif_ty = $fx.clif_type($T).unwrap();
let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
let new = $fx.bcx.ins().$op(old, $src);
$fx.bcx.ins().store(MemFlags::new(), new, $ptr, 0);
$ret.write_cvalue($fx, CValue::by_val(old, $fx.layout_of($T)));
+
+ crate::atomic_shim::unlock_global_lock($fx);
}
macro atomic_minmax($fx:expr, $cc:expr, <$T:ident> ($ptr:ident, $src:ident) -> $ret:ident) {
+ crate::atomic_shim::lock_global_lock($fx);
+
// Read old
let clif_ty = $fx.clif_type($T).unwrap();
let old = $fx.bcx.ins().load(clif_ty, MemFlags::new(), $ptr, 0);
let ret_val = CValue::by_val(old, $ret.layout());
$ret.write_cvalue($fx, ret_val);
+
+ crate::atomic_shim::unlock_global_lock($fx);
}
-pub fn lane_type_and_count<'tcx>(
+fn lane_type_and_count<'tcx>(
tcx: TyCtxt<'tcx>,
layout: TyLayout<'tcx>,
) -> (TyLayout<'tcx>, u32) {
fn simd_for_each_lane<'tcx, B: Backend>(
fx: &mut FunctionCx<'_, 'tcx, B>,
- intrinsic: &str,
val: CValue<'tcx>,
ret: CPlace<'tcx>,
f: impl Fn(
fn simd_pair_for_each_lane<'tcx, B: Backend>(
fx: &mut FunctionCx<'_, 'tcx, B>,
- intrinsic: &str,
x: CValue<'tcx>,
y: CValue<'tcx>,
ret: CPlace<'tcx>,
}
macro simd_cmp {
- ($fx:expr, $intrinsic:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
+ ($fx:expr, $cc:ident($x:ident, $y:ident) -> $ret:ident) => {
simd_pair_for_each_lane(
$fx,
- $intrinsic,
$x,
$y,
$ret,
},
);
},
- ($fx:expr, $intrinsic:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+ ($fx:expr, $cc_u:ident|$cc_s:ident($x:ident, $y:ident) -> $ret:ident) => {
simd_pair_for_each_lane(
$fx,
- $intrinsic,
$x,
$y,
$ret,
}
macro simd_int_binop {
- ($fx:expr, $intrinsic:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
- simd_int_binop!($fx, $intrinsic, $op|$op($x, $y) -> $ret);
+ ($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_int_binop!($fx, $op|$op($x, $y) -> $ret);
},
- ($fx:expr, $intrinsic:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
- let (lane_layout, lane_count) = lane_type_and_count($fx.tcx, $x.layout());
- let x_val = $x.load_vector($fx);
- let y_val = $y.load_vector($fx);
-
- let res = match lane_layout.ty.kind {
- ty::Uint(_) => $fx.bcx.ins().$op_u(x_val, y_val),
- ty::Int(_) => $fx.bcx.ins().$op_s(x_val, y_val),
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- $ret.write_cvalue($fx, CValue::by_val(res, $ret.layout()));
+ ($fx:expr, $op_u:ident|$op_s:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind {
+ ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ },
+ );
},
}
macro simd_int_flt_binop {
- ($fx:expr, $intrinsic:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
- simd_int_flt_binop!($fx, $intrinsic, $op|$op|$op_f($x, $y) -> $ret);
+ ($fx:expr, $op:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_int_flt_binop!($fx, $op|$op|$op_f($x, $y) -> $ret);
},
- ($fx:expr, $intrinsic:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
- let (lane_layout, lane_count) = lane_type_and_count($fx.tcx, $x.layout());
- let x_val = $x.load_vector($fx);
- let y_val = $y.load_vector($fx);
-
- let res = match lane_layout.ty.kind {
- ty::Uint(_) => $fx.bcx.ins().$op_u(x_val, y_val),
- ty::Int(_) => $fx.bcx.ins().$op_s(x_val, y_val),
- ty::Float(_) => $fx.bcx.ins().$op_f(x_val, y_val),
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- $ret.write_cvalue($fx, CValue::by_val(res, $ret.layout()));
+ ($fx:expr, $op_u:ident|$op_s:ident|$op_f:ident($x:ident, $y:ident) -> $ret:ident) => {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind {
+ ty::Uint(_) => fx.bcx.ins().$op_u(x_lane, y_lane),
+ ty::Int(_) => fx.bcx.ins().$op_s(x_lane, y_lane),
+ ty::Float(_) => fx.bcx.ins().$op_f(x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ },
+ );
},
}
-macro simd_flt_binop($fx:expr, $intrinsic:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
- let (lane_layout, lane_count) = lane_type_and_count($fx.tcx, $x.layout());
- let x_val = $x.load_vector($fx);
- let y_val = $y.load_vector($fx);
-
- let res = match lane_layout.ty.kind {
- ty::Float(_) => $fx.bcx.ins().$op(x_val, y_val),
- _ => unreachable!("{:?}", lane_layout.ty),
- };
- $ret.write_cvalue($fx, CValue::by_val(res, $ret.layout()));
+macro simd_flt_binop($fx:expr, $op:ident($x:ident, $y:ident) -> $ret:ident) {
+ simd_pair_for_each_lane(
+ $fx,
+ $x,
+ $y,
+ $ret,
+ |fx, lane_layout, ret_lane_layout, x_lane, y_lane| {
+ let res_lane = match lane_layout.ty.kind {
+ ty::Float(_) => fx.bcx.ins().$op(x_lane, y_lane),
+ _ => unreachable!("{:?}", lane_layout.ty),
+ };
+ CValue::by_val(res_lane, ret_lane_layout)
+ },
+ );
}
pub fn codegen_intrinsic_call<'tcx>(
"transmute" => {
trap_unreachable(
fx,
- "[corruption] Called intrinsic::transmute with uninhabited argument.",
+ "[corruption] Transmuting to uninhabited type.",
);
}
_ => unimplemented!("unsupported instrinsic {}", intrinsic),
if intrinsic.starts_with("simd_") {
self::simd::codegen_simd_intrinsic_call(fx, instance, args, ret, span);
- let ret_ebb = fx.get_ebb(destination.expect("SIMD intrinsics don't diverge").1);
- fx.bcx.ins().jump(ret_ebb, &[]);
+ let ret_block = fx.get_block(destination.expect("SIMD intrinsics don't diverge").1);
+ fx.bcx.ins().jump(ret_block, &[]);
return;
}
intrinsic_match! {
fx, intrinsic, substs, args,
_ => {
- unimpl!("unsupported intrinsic {}", intrinsic)
+ unimpl_fatal!(fx.tcx, span, "unsupported intrinsic {}", intrinsic);
};
assume, (c _a) {};
let byte_amount = fx.bcx.ins().imul(count, elem_size);
if intrinsic.ends_with("_nonoverlapping") {
+ // FIXME emit_small_memcpy
fx.bcx.call_memcpy(fx.module.target_config(), dst, src, byte_amount);
} else {
+ // FIXME emit_small_memmove
fx.bcx.call_memmove(fx.module.target_config(), dst, src, byte_amount);
}
};
let layout = fx.layout_of(T);
let size = if layout.is_unsized() {
let (_ptr, info) = ptr.load_scalar_pair(fx);
- let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
+ let (size, _align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
size
} else {
fx
let layout = fx.layout_of(T);
let align = if layout.is_unsized() {
let (_ptr, info) = ptr.load_scalar_pair(fx);
- let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout.ty, info);
+ let (_size, align) = crate::unsize::size_and_align_of_dst(fx, layout, info);
align
} else {
fx
"unchecked_rem" => BinOp::Rem,
"unchecked_shl" => BinOp::Shl,
"unchecked_shr" => BinOp::Shr,
- _ => unimplemented!("intrinsic {}", intrinsic),
+ _ => unreachable!("intrinsic {}", intrinsic),
};
let res = crate::num::trans_int_binop(fx, bin_op, x, y);
ret.write_cvalue(fx, res);
"add_with_overflow" => BinOp::Add,
"sub_with_overflow" => BinOp::Sub,
"mul_with_overflow" => BinOp::Mul,
- _ => unimplemented!("intrinsic {}", intrinsic),
+ _ => unreachable!("intrinsic {}", intrinsic),
};
let res = crate::num::trans_checked_int_binop(
"wrapping_add" => BinOp::Add,
"wrapping_sub" => BinOp::Sub,
"wrapping_mul" => BinOp::Mul,
- _ => unimplemented!("intrinsic {}", intrinsic),
+ _ => unreachable!("intrinsic {}", intrinsic),
};
let res = crate::num::trans_int_binop(
fx,
let bin_op = match intrinsic {
"saturating_add" => BinOp::Add,
"saturating_sub" => BinOp::Sub,
- _ => unimplemented!("intrinsic {}", intrinsic),
+ _ => unreachable!("intrinsic {}", intrinsic),
};
let signed = type_sign(T);
transmute, <src_ty, dst_ty> (c from) {
assert_eq!(from.layout().ty, src_ty);
- let addr = from.force_stack(fx);
+ let (addr, meta) = from.force_stack(fx);
+ assert!(meta.is_none());
let dst_layout = fx.layout_of(dst_ty);
ret.write_cvalue(fx, CValue::by_ref(addr, dst_layout))
};
- init, () {
- let layout = ret.layout();
- if layout.abi == Abi::Uninhabited {
- crate::trap::trap_panic(fx, "[panic] Called intrinsic::init for uninhabited type.");
- return;
- }
-
- match *ret.inner() {
- CPlaceInner::NoPlace => {}
- CPlaceInner::Var(var) => {
- let clif_ty = fx.clif_type(layout.ty).unwrap();
- let val = match clif_ty {
- types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 0),
- types::I128 => {
- let zero = fx.bcx.ins().iconst(types::I64, 0);
- fx.bcx.ins().iconcat(zero, zero)
- }
- types::F32 => {
- let zero = fx.bcx.ins().iconst(types::I32, 0);
- fx.bcx.ins().bitcast(types::F32, zero)
- }
- types::F64 => {
- let zero = fx.bcx.ins().iconst(types::I64, 0);
- fx.bcx.ins().bitcast(types::F64, zero)
- }
- _ => panic!("clif_type returned {}", clif_ty),
- };
- fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::from_u32(var.as_u32()));
- fx.bcx.def_var(mir_var(var), val);
- }
- _ => {
- let addr = ret.to_ptr(fx).get_addr(fx);
- let layout = ret.layout();
- fx.bcx.emit_small_memset(fx.module.target_config(), addr, 0, layout.size.bytes(), 1);
- }
- }
- };
- uninit, () {
- let layout = ret.layout();
- if layout.abi == Abi::Uninhabited {
- crate::trap::trap_panic(fx, "[panic] Called intrinsic::uninit for uninhabited type.");
- return;
- }
- match *ret.inner() {
- CPlaceInner::NoPlace => {},
- CPlaceInner::Var(var) => {
- let clif_ty = fx.clif_type(layout.ty).unwrap();
- let val = match clif_ty {
- types::I8 | types::I16 | types::I32 | types::I64 => fx.bcx.ins().iconst(clif_ty, 42),
- types::I128 => {
- let zero = fx.bcx.ins().iconst(types::I64, 0);
- let fourty_two = fx.bcx.ins().iconst(types::I64, 42);
- fx.bcx.ins().iconcat(fourty_two, zero)
- }
- types::F32 => {
- let zero = fx.bcx.ins().iconst(types::I32, 0xdeadbeef);
- fx.bcx.ins().bitcast(types::F32, zero)
- }
- types::F64 => {
- let zero = fx.bcx.ins().iconst(types::I64, 0xcafebabedeadbeefu64 as i64);
- fx.bcx.ins().bitcast(types::F64, zero)
- }
- _ => panic!("clif_type returned {}", clif_ty),
- };
- fx.bcx.set_val_label(val, cranelift_codegen::ir::ValueLabel::from_u32(var.as_u32()));
- fx.bcx.def_var(mir_var(var), val);
- }
- CPlaceInner::Addr(_, _) => {
- // Don't write to `ret`, as the destination memory is already uninitialized.
- }
- }
- };
write_bytes, (c dst, v val, v count) {
let pointee_ty = dst.layout().ty.builtin_deref(true).unwrap().ty;
let pointee_size = fx.layout_of(pointee_ty).size.bytes();
let hi = swap(bcx, hi);
bcx.ins().iconcat(hi, lo)
}
- ty => unimplemented!("bswap {}", ty),
+ ty => unreachable!("bswap {}", ty),
}
};
let res = CValue::by_val(swap(&mut fx.bcx, arg), fx.layout_of(T));
ret.write_cvalue(fx, res);
};
- panic_if_uninhabited, <T> () {
- if fx.layout_of(T).abi.is_uninhabited() {
- crate::trap::trap_panic(fx, "[panic] Called intrinsic::panic_if_uninhabited for uninhabited type.");
+ assert_inhabited | assert_zero_valid | assert_uninit_valid, <T> () {
+ let layout = fx.layout_of(T);
+ if layout.abi.is_uninhabited() {
+ crate::trap::trap_panic(fx, &format!("attempted to instantiate uninhabited type `{}`", T));
+ return;
+ }
+
+ if intrinsic == "assert_zero_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ true).unwrap() {
+ crate::trap::trap_panic(fx, &format!("attempted to zero-initialize type `{}`, which is invalid", T));
+ return;
+ }
+
+ if intrinsic == "assert_uninit_valid" && !layout.might_permit_raw_init(fx, /*zero:*/ false).unwrap() {
+ crate::trap::trap_panic(fx, &format!("attempted to leave type `{}` uninitialized, which is invalid", T));
return;
}
};
size_of | pref_align_of | min_align_of | needs_drop | type_id | type_name, () {
let const_val =
fx.tcx.const_eval_instance(ParamEnv::reveal_all(), instance, None).unwrap();
- let val = crate::constant::trans_const_value(fx, const_val);
+ let val = crate::constant::trans_const_value(
+ fx,
+ ty::Const::from_value(fx.tcx, const_val, ret.layout().ty),
+ );
ret.write_cvalue(fx, val);
};
ret.write_cvalue(fx, caller_location);
};
- _ if intrinsic.starts_with("atomic_fence"), () {};
- _ if intrinsic.starts_with("atomic_singlethreadfence"), () {};
+ _ if intrinsic.starts_with("atomic_fence"), () {
+ crate::atomic_shim::lock_global_lock(fx);
+ crate::atomic_shim::unlock_global_lock(fx);
+ };
+ _ if intrinsic.starts_with("atomic_singlethreadfence"), () {
+ crate::atomic_shim::lock_global_lock(fx);
+ crate::atomic_shim::unlock_global_lock(fx);
+ };
_ if intrinsic.starts_with("atomic_load"), (c ptr) {
+ crate::atomic_shim::lock_global_lock(fx);
+
let inner_layout =
fx.layout_of(ptr.layout().ty.builtin_deref(true).unwrap().ty);
let val = CValue::by_ref(Pointer::new(ptr.load_scalar(fx)), inner_layout);
ret.write_cvalue(fx, val);
+
+ crate::atomic_shim::unlock_global_lock(fx);
};
_ if intrinsic.starts_with("atomic_store"), (v ptr, c val) {
+ crate::atomic_shim::lock_global_lock(fx);
+
let dest = CPlace::for_ptr(Pointer::new(ptr), val.layout());
dest.write_cvalue(fx, val);
+
+ crate::atomic_shim::unlock_global_lock(fx);
};
_ if intrinsic.starts_with("atomic_xchg"), <T> (v ptr, c src) {
+ crate::atomic_shim::lock_global_lock(fx);
+
// Read old
let clif_ty = fx.clif_type(T).unwrap();
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
// Write new
let dest = CPlace::for_ptr(Pointer::new(ptr), src.layout());
dest.write_cvalue(fx, src);
+
+ crate::atomic_shim::unlock_global_lock(fx);
};
_ if intrinsic.starts_with("atomic_cxchg"), <T> (v ptr, v test_old, v new) { // both atomic_cxchg_* and atomic_cxchgweak_*
+ crate::atomic_shim::lock_global_lock(fx);
+
// Read old
let clif_ty = fx.clif_type(T).unwrap();
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
let ret_val = CValue::by_val_pair(old, fx.bcx.ins().bint(types::I8, is_eq), ret.layout());
ret.write_cvalue(fx, ret_val);
+
+ crate::atomic_shim::unlock_global_lock(fx);
};
_ if intrinsic.starts_with("atomic_xadd"), <T> (v ptr, v amount) {
atomic_binop_return_old! (fx, band<T>(ptr, src) -> ret);
};
_ if intrinsic.starts_with("atomic_nand"), <T> (v ptr, v src) {
+ crate::atomic_shim::lock_global_lock(fx);
+
let clif_ty = fx.clif_type(T).unwrap();
let old = fx.bcx.ins().load(clif_ty, MemFlags::new(), ptr, 0);
let and = fx.bcx.ins().band(old, src);
let new = fx.bcx.ins().bnot(and);
fx.bcx.ins().store(MemFlags::new(), new, ptr, 0);
ret.write_cvalue(fx, CValue::by_val(old, fx.layout_of(T)));
+
+ crate::atomic_shim::unlock_global_lock(fx);
};
_ if intrinsic.starts_with("atomic_or"), <T> (v ptr, v src) {
atomic_binop_return_old! (fx, bor<T>(ptr, src) -> ret);
ret.write_cvalue(fx, val);
};
- try, (v f, v data, v _local_ptr) {
+ try, (v f, v data, v _catch_fn) {
// FIXME once unwinding is supported, change this to actually catch panics
let f_sig = fx.bcx.func.import_signature(Signature {
call_conv: CallConv::triple_default(fx.triple()),
fx.bcx.ins().call_indirect(f_sig, f, &[data]);
- let ret_val = CValue::const_val(fx, ret.layout().ty, 0);
+ let ret_val = CValue::const_val(fx, ret.layout(), 0);
ret.write_cvalue(fx, ret_val);
};
}
if let Some((_, dest)) = destination {
- let ret_ebb = fx.get_ebb(dest);
- fx.bcx.ins().jump(ret_ebb, &[]);
+ let ret_block = fx.get_block(dest);
+ fx.bcx.ins().jump(ret_block, &[]);
} else {
trap_unreachable(fx, "[corruption] Diverging intrinsic returned.");
}