+//! Various operations on integer and floating-point numbers
+
use crate::prelude::*;
-pub fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
+pub(crate) fn bin_op_to_intcc(bin_op: BinOp, signed: bool) -> Option<IntCC> {
use BinOp::*;
use IntCC::*;
Some(match bin_op {
}
fn codegen_compare_bin_op<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
signed: bool,
lhs: Value,
rhs: Value,
) -> CValue<'tcx> {
let intcc = crate::num::bin_op_to_intcc(bin_op, signed).unwrap();
- let val = codegen_icmp(fx, intcc, lhs, rhs);
+ let val = fx.bcx.ins().icmp(intcc, lhs, rhs);
let val = fx.bcx.ins().bint(types::I8, val);
CValue::by_val(val, fx.layout_of(fx.tcx.types.bool))
}
-pub fn codegen_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+pub(crate) fn codegen_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
) -> CValue<'tcx> {
match bin_op {
BinOp::Eq | BinOp::Lt | BinOp::Le | BinOp::Ne | BinOp::Ge | BinOp::Gt => {
- match in_lhs.layout().ty.sty {
+ match in_lhs.layout().ty.kind() {
ty::Bool | ty::Uint(_) | ty::Int(_) | ty::Char => {
let signed = type_sign(in_lhs.layout().ty);
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
- let (lhs, rhs) = if (bin_op == BinOp::Eq || bin_op == BinOp::Ne)
- && (in_lhs.layout().ty.sty == fx.tcx.types.i8.sty
- || in_lhs.layout().ty.sty == fx.tcx.types.i16.sty)
- {
- // FIXME(CraneStation/cranelift#896) icmp_imm.i8/i16 with eq/ne for signed ints is implemented wrong.
- (
- fx.bcx.ins().sextend(types::I32, lhs),
- fx.bcx.ins().sextend(types::I32, rhs),
- )
- } else {
- (lhs, rhs)
- };
-
return codegen_compare_bin_op(fx, bin_op, signed, lhs, rhs);
}
_ => {}
_ => {}
}
- match in_lhs.layout().ty.sty {
- ty::Bool => crate::num::trans_bool_binop(fx, bin_op, in_lhs, in_rhs),
- ty::Uint(_) | ty::Int(_) => crate::num::trans_int_binop(fx, bin_op, in_lhs, in_rhs),
- ty::Float(_) => crate::num::trans_float_binop(fx, bin_op, in_lhs, in_rhs),
- ty::RawPtr(..) | ty::FnPtr(..) => crate::num::trans_ptr_binop(fx, bin_op, in_lhs, in_rhs),
- _ => unimplemented!(
- "{:?}({:?}, {:?})",
- bin_op,
- in_lhs.layout().ty,
- in_rhs.layout().ty
- ),
+ match in_lhs.layout().ty.kind() {
+ ty::Bool => crate::num::codegen_bool_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Uint(_) | ty::Int(_) => crate::num::codegen_int_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::Float(_) => crate::num::codegen_float_binop(fx, bin_op, in_lhs, in_rhs),
+ ty::RawPtr(..) | ty::FnPtr(..) => crate::num::codegen_ptr_binop(fx, bin_op, in_lhs, in_rhs),
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
}
}
-pub fn trans_bool_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+pub(crate) fn codegen_bool_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
CValue::by_val(res, fx.layout_of(fx.tcx.types.bool))
}
-pub fn trans_int_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+pub(crate) fn codegen_int_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
BinOp::BitOr => b.bor(lhs, rhs),
BinOp::Shl => {
let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let rhs = clif_intcast(fx, rhs, lhs_ty, false);
- fx.bcx.ins().ishl(lhs, rhs)
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ fx.bcx.ins().ishl(lhs, actual_shift)
}
BinOp::Shr => {
let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
- let rhs = clif_intcast(fx, rhs, lhs_ty, false);
+ let actual_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
if signed {
- fx.bcx.ins().sshr(lhs, rhs)
+ fx.bcx.ins().sshr(lhs, actual_shift)
} else {
- fx.bcx.ins().ushr(lhs, rhs)
+ fx.bcx.ins().ushr(lhs, actual_shift)
}
}
// Compare binops handles by `codegen_binop`.
- _ => unreachable!(
- "{:?}({:?}, {:?})",
- bin_op,
- in_lhs.layout().ty,
- in_rhs.layout().ty
- ),
+ _ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs.layout().ty, in_rhs.layout().ty),
};
CValue::by_val(val, in_lhs.layout())
}
-pub fn trans_checked_int_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+pub(crate) fn codegen_checked_int_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
(val, has_overflow)
}
BinOp::Mul => {
- let val = fx.bcx.ins().imul(lhs, rhs);
- /*let val_hi = if !signed {
- fx.bcx.ins().umulhi(lhs, rhs)
- } else {
- fx.bcx.ins().smulhi(lhs, rhs)
- };
- let has_overflow = fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0);*/
- // TODO: check for overflow
- let has_overflow = fx.bcx.ins().bconst(types::B1, false);
- (val, has_overflow)
+ let ty = fx.bcx.func.dfg.value_type(lhs);
+ match ty {
+ types::I8 | types::I16 | types::I32 if !signed => {
+ let lhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().uextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::UnsignedGreaterThan,
+ val,
+ (1 << ty.bits()) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, has_overflow)
+ }
+ types::I8 | types::I16 | types::I32 if signed => {
+ let lhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), lhs);
+ let rhs = fx.bcx.ins().sextend(ty.double_width().unwrap(), rhs);
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_underflow =
+ fx.bcx.ins().icmp_imm(IntCC::SignedLessThan, val, -(1 << (ty.bits() - 1)));
+ let has_overflow = fx.bcx.ins().icmp_imm(
+ IntCC::SignedGreaterThan,
+ val,
+ (1 << (ty.bits() - 1)) - 1,
+ );
+ let val = fx.bcx.ins().ireduce(ty, val);
+ (val, fx.bcx.ins().bor(has_underflow, has_overflow))
+ }
+ types::I64 => {
+ let val = fx.bcx.ins().imul(lhs, rhs);
+ let has_overflow = if !signed {
+ let val_hi = fx.bcx.ins().umulhi(lhs, rhs);
+ fx.bcx.ins().icmp_imm(IntCC::NotEqual, val_hi, 0)
+ } else {
+ // Based on LLVM's instruction sequence for compiling
+ // a.checked_mul(b).is_some() to riscv64gc:
+ // mulh a2, a0, a1
+ // mul a0, a0, a1
+ // srai a0, a0, 63
+ // xor a0, a0, a2
+ // snez a0, a0
+ let val_hi = fx.bcx.ins().smulhi(lhs, rhs);
+ let val_sign = fx.bcx.ins().sshr_imm(val, i64::from(ty.bits() - 1));
+ let xor = fx.bcx.ins().bxor(val_hi, val_sign);
+ fx.bcx.ins().icmp_imm(IntCC::NotEqual, xor, 0)
+ };
+ (val, has_overflow)
+ }
+ types::I128 => {
+ unreachable!("i128 should have been handled by codegen_i128::maybe_codegen")
+ }
+ _ => unreachable!("invalid non-integer type {}", ty),
+ }
}
BinOp::Shl => {
- let val = fx.bcx.ins().ishl(lhs, rhs);
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
+ let val = fx.bcx.ins().ishl(lhs, masked_shift);
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
- let has_overflow =
- fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
(val, has_overflow)
}
BinOp::Shr => {
+ let lhs_ty = fx.bcx.func.dfg.value_type(lhs);
+ let masked_shift = fx.bcx.ins().band_imm(rhs, i64::from(lhs_ty.bits() - 1));
let val = if !signed {
- fx.bcx.ins().ushr(lhs, rhs)
+ fx.bcx.ins().ushr(lhs, masked_shift)
} else {
- fx.bcx.ins().sshr(lhs, rhs)
+ fx.bcx.ins().sshr(lhs, masked_shift)
};
let ty = fx.bcx.func.dfg.value_type(val);
let max_shift = i64::from(ty.bits()) - 1;
- let has_overflow =
- fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
+ let has_overflow = fx.bcx.ins().icmp_imm(IntCC::UnsignedGreaterThan, rhs, max_shift);
(val, has_overflow)
}
- _ => bug!(
- "binop {:?} on checked int/uint lhs: {:?} rhs: {:?}",
- bin_op,
- in_lhs,
- in_rhs
- ),
+ _ => bug!("binop {:?} on checked int/uint lhs: {:?} rhs: {:?}", bin_op, in_lhs, in_rhs),
};
let has_overflow = fx.bcx.ins().bint(types::I8, has_overflow);
- let out_place = CPlace::new_stack_slot(
- fx,
- fx.tcx
- .mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()),
- );
- let out_layout = out_place.layout();
- out_place.write_cvalue(fx, CValue::by_val_pair(res, has_overflow, out_layout));
-
- out_place.to_cvalue(fx)
+
+ let out_layout = fx.layout_of(fx.tcx.mk_tup([in_lhs.layout().ty, fx.tcx.types.bool].iter()));
+ CValue::by_val_pair(res, has_overflow, out_layout)
}
-pub fn trans_float_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+pub(crate) fn codegen_float_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
BinOp::Mul => b.fmul(lhs, rhs),
BinOp::Div => b.fdiv(lhs, rhs),
BinOp::Rem => {
- let name = match in_lhs.layout().ty.sty {
+ let name = match in_lhs.layout().ty.kind() {
ty::Float(FloatTy::F32) => "fmodf",
ty::Float(FloatTy::F64) => "fmod",
_ => bug!(),
CValue::by_val(res, in_lhs.layout())
}
-pub fn trans_ptr_binop<'tcx>(
- fx: &mut FunctionCx<'_, 'tcx, impl Backend>,
+pub(crate) fn codegen_ptr_binop<'tcx>(
+ fx: &mut FunctionCx<'_, '_, 'tcx>,
bin_op: BinOp,
in_lhs: CValue<'tcx>,
in_rhs: CValue<'tcx>,
) -> CValue<'tcx> {
- let is_thin_ptr = in_lhs.layout().ty.builtin_deref(true).map(|TypeAndMut { ty, mutbl: _}| {
- !has_ptr_meta(fx.tcx, ty)
- }).unwrap_or(true);
+ let is_thin_ptr = in_lhs
+ .layout()
+ .ty
+ .builtin_deref(true)
+ .map(|TypeAndMut { ty, mutbl: _ }| !has_ptr_meta(fx.tcx, ty))
+ .unwrap_or(true);
if is_thin_ptr {
match bin_op {
let lhs = in_lhs.load_scalar(fx);
let rhs = in_rhs.load_scalar(fx);
- return codegen_compare_bin_op(fx, bin_op, false, lhs, rhs);
+ codegen_compare_bin_op(fx, bin_op, false, lhs, rhs)
}
BinOp::Offset => {
let pointee_ty = in_lhs.layout().ty.builtin_deref(true).unwrap().ty;
let ptr_diff = fx.bcx.ins().imul_imm(offset, pointee_size as i64);
let base_val = base.load_scalar(fx);
let res = fx.bcx.ins().iadd(base_val, ptr_diff);
- return CValue::by_val(res, base.layout());
+ CValue::by_val(res, base.layout())
}
_ => unreachable!("{:?}({:?}, {:?})", bin_op, in_lhs, in_rhs),
- };
+ }
} else {
let (lhs_ptr, lhs_extra) = in_lhs.load_scalar_pair(fx);
let (rhs_ptr, rhs_extra) = in_rhs.load_scalar_pair(fx);
let ptr_eq = fx.bcx.ins().icmp(IntCC::Equal, lhs_ptr, rhs_ptr);
let ptr_cmp =
- fx.bcx
- .ins()
- .icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
+ fx.bcx.ins().icmp(bin_op_to_intcc(bin_op, false).unwrap(), lhs_ptr, rhs_ptr);
let extra_cmp = fx.bcx.ins().icmp(
bin_op_to_intcc(bin_op, false).unwrap(),
lhs_extra,
_ => panic!("bin_op {:?} on ptr", bin_op),
};
- CValue::by_val(
- fx.bcx.ins().bint(types::I8, res),
- fx.layout_of(fx.tcx.types.bool),
- )
+ CValue::by_val(fx.bcx.ins().bint(types::I8, res), fx.layout_of(fx.tcx.types.bool))
}
}
+
+// In Rust floating point min and max don't propagate NaN. In Cranelift they do however.
+// For this reason it is necessary to use `a.is_nan() ? b : (a >= b ? b : a)` for `minnumf*`
+// and `a.is_nan() ? b : (a <= b ? b : a)` for `maxnumf*`. NaN checks are done by comparing
+// a float against itself. Only in case of NaN is it not equal to itself.
+pub(crate) fn codegen_float_min(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_ge_b = fx.bcx.ins().fcmp(FloatCC::GreaterThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_ge_b, b, a);
+ fx.bcx.ins().select(a_is_nan, b, temp)
+}
+
+pub(crate) fn codegen_float_max(fx: &mut FunctionCx<'_, '_, '_>, a: Value, b: Value) -> Value {
+ let a_is_nan = fx.bcx.ins().fcmp(FloatCC::NotEqual, a, a);
+ let a_le_b = fx.bcx.ins().fcmp(FloatCC::LessThanOrEqual, a, b);
+ let temp = fx.bcx.ins().select(a_le_b, b, a);
+ fx.bcx.ins().select(a_is_nan, b, temp)
+}