1 use std::convert::TryFrom;
3 use rustc_apfloat::Float;
5 use rustc_middle::mir::interpret::{InterpResult, Scalar};
6 use rustc_middle::ty::{self, layout::TyAndLayout, FloatTy, Ty};
7 use rustc_target::abi::LayoutOf;
9 use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
11 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
12 /// Applies the binary operation `op` to the two operands and writes a tuple of the result
13 /// and a boolean signifying the potential overflow to the destination.
14 pub fn binop_with_overflow(
17 left: &ImmTy<'tcx, M::PointerTag>,
18 right: &ImmTy<'tcx, M::PointerTag>,
19 dest: &PlaceTy<'tcx, M::PointerTag>,
20 ) -> InterpResult<'tcx> {
21 let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
23 self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
25 "type mismatch for result of {:?}",
28 let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
29 self.write_immediate(val, dest)
32 /// Applies the binary operation `op` to the arguments and writes the result to the
34 pub fn binop_ignore_overflow(
37 left: &ImmTy<'tcx, M::PointerTag>,
38 right: &ImmTy<'tcx, M::PointerTag>,
39 dest: &PlaceTy<'tcx, M::PointerTag>,
40 ) -> InterpResult<'tcx> {
41 let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
42 assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
43 self.write_scalar(val, dest)
47 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
53 ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
54 use rustc_middle::mir::BinOp::*;
56 let res = match bin_op {
63 _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
65 (Scalar::from_bool(res), false, self.tcx.types.bool)
73 ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
74 use rustc_middle::mir::BinOp::*;
76 let res = match bin_op {
86 _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
88 (Scalar::from_bool(res), false, self.tcx.types.bool)
91 fn binary_float_op<F: Float + Into<Scalar<M::PointerTag>>>(
97 ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
98 use rustc_middle::mir::BinOp::*;
100 let (val, ty) = match bin_op {
101 Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
102 Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
103 Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
104 Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
105 Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
106 Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
107 Add => ((l + r).value.into(), ty),
108 Sub => ((l - r).value.into(), ty),
109 Mul => ((l * r).value.into(), ty),
110 Div => ((l / r).value.into(), ty),
111 Rem => ((l % r).value.into(), ty),
112 _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
120 // passing in raw bits
122 left_layout: TyAndLayout<'tcx>,
124 right_layout: TyAndLayout<'tcx>,
125 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
126 use rustc_middle::mir::BinOp::*;
128 // Shift ops can have an RHS with a different numeric type.
129 if bin_op == Shl || bin_op == Shr {
130 let signed = left_layout.abi.is_signed();
131 let size = u128::from(left_layout.size.bits());
132 let overflow = r >= size;
133 let r = r % size; // mask to type size
134 let r = u32::try_from(r).unwrap(); // we masked so this will always fit
135 let result = if signed {
136 let l = self.sign_extend(l, left_layout) as i128;
137 let result = match bin_op {
138 Shl => l.checked_shl(r).unwrap(),
139 Shr => l.checked_shr(r).unwrap(),
140 _ => bug!("it has already been checked that this is a shift op"),
145 Shl => l.checked_shl(r).unwrap(),
146 Shr => l.checked_shr(r).unwrap(),
147 _ => bug!("it has already been checked that this is a shift op"),
150 let truncated = self.truncate(result, left_layout);
151 return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
154 // For the remaining ops, the types must be the same on both sides
155 if left_layout.ty != right_layout.ty {
158 "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
167 let size = left_layout.size;
169 // Operations that need special treatment for signed integers
170 if left_layout.abi.is_signed() {
171 let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
172 Lt => Some(i128::lt),
173 Le => Some(i128::le),
174 Gt => Some(i128::gt),
175 Ge => Some(i128::ge),
178 if let Some(op) = op {
179 let l = self.sign_extend(l, left_layout) as i128;
180 let r = self.sign_extend(r, right_layout) as i128;
181 return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
183 let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
184 Div if r == 0 => throw_ub!(DivisionByZero),
185 Rem if r == 0 => throw_ub!(RemainderByZero),
186 Div => Some(i128::overflowing_div),
187 Rem => Some(i128::overflowing_rem),
188 Add => Some(i128::overflowing_add),
189 Sub => Some(i128::overflowing_sub),
190 Mul => Some(i128::overflowing_mul),
193 if let Some(op) = op {
194 let r = self.sign_extend(r, right_layout) as i128;
195 // We need a special check for overflowing remainder:
196 // "int_min % -1" overflows and returns 0, but after casting things to a larger int
197 // type it does *not* overflow nor give an unrepresentable result!
199 if r == -1 && l == (1 << (size.bits() - 1)) {
200 return Ok((Scalar::from_int(0, size), true, left_layout.ty));
203 let l = self.sign_extend(l, left_layout) as i128;
205 let (result, oflo) = op(l, r);
206 // This may be out-of-bounds for the result type, so we have to truncate ourselves.
207 // If that truncation loses any information, we have an overflow.
208 let result = result as u128;
209 let truncated = self.truncate(result, left_layout);
211 Scalar::from_uint(truncated, size),
212 oflo || self.sign_extend(truncated, left_layout) != result,
218 let (val, ty) = match bin_op {
219 Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
220 Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
222 Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
223 Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
224 Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
225 Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
227 BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
228 BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
229 BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
231 Add | Sub | Mul | Rem | Div => {
232 assert!(!left_layout.abi.is_signed());
233 let op: fn(u128, u128) -> (u128, bool) = match bin_op {
234 Add => u128::overflowing_add,
235 Sub => u128::overflowing_sub,
236 Mul => u128::overflowing_mul,
237 Div if r == 0 => throw_ub!(DivisionByZero),
238 Rem if r == 0 => throw_ub!(RemainderByZero),
239 Div => u128::overflowing_div,
240 Rem => u128::overflowing_rem,
243 let (result, oflo) = op(l, r);
244 // Truncate to target type.
245 // If that truncation loses any information, we have an overflow.
246 let truncated = self.truncate(result, left_layout);
248 Scalar::from_uint(truncated, size),
249 oflo || truncated != result,
256 "invalid binary op {:?}: {:?}, {:?} (both {:?})",
267 /// Returns the result of the specified operation, whether it overflowed, and
269 pub fn overflowing_binary_op(
272 left: &ImmTy<'tcx, M::PointerTag>,
273 right: &ImmTy<'tcx, M::PointerTag>,
274 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
276 "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
284 match left.layout.ty.kind() {
286 assert_eq!(left.layout.ty, right.layout.ty);
287 let left = left.to_scalar()?;
288 let right = right.to_scalar()?;
289 Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
292 assert_eq!(left.layout.ty, right.layout.ty);
293 let left = left.to_scalar()?;
294 let right = right.to_scalar()?;
295 Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
298 assert_eq!(left.layout.ty, right.layout.ty);
299 let ty = left.layout.ty;
300 let left = left.to_scalar()?;
301 let right = right.to_scalar()?;
304 self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
307 self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
311 _ if left.layout.ty.is_integral() => {
312 // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
314 right.layout.ty.is_integral(),
315 "Unexpected types for BinOp: {:?} {:?} {:?}",
321 let l = self.force_bits(left.to_scalar()?, left.layout.size)?;
322 let r = self.force_bits(right.to_scalar()?, right.layout.size)?;
323 self.binary_int_op(bin_op, l, left.layout, r, right.layout)
325 _ if left.layout.ty.is_any_ptr() => {
326 // The RHS type must be the same *or an integer type* (for `Offset`).
328 right.layout.ty == left.layout.ty || right.layout.ty.is_integral(),
329 "Unexpected types for BinOp: {:?} {:?} {:?}",
335 M::binary_ptr_op(self, bin_op, left, right)
339 "Invalid MIR: bad LHS type for binop: {:?}",
345 /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
350 left: &ImmTy<'tcx, M::PointerTag>,
351 right: &ImmTy<'tcx, M::PointerTag>,
352 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
353 let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
354 Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
357 /// Returns the result of the specified operation, whether it overflowed, and
359 pub fn overflowing_unary_op(
362 val: &ImmTy<'tcx, M::PointerTag>,
363 ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
364 use rustc_middle::mir::UnOp::*;
366 let layout = val.layout;
367 let val = val.to_scalar()?;
368 trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
370 match layout.ty.kind() {
372 let val = val.to_bool()?;
373 let res = match un_op {
375 _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
377 Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
380 let res = match (un_op, fty) {
381 (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
382 (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
383 _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
385 Ok((res, false, layout.ty))
388 assert!(layout.ty.is_integral());
389 let val = self.force_bits(val, layout.size)?;
390 let (res, overflow) = match un_op {
391 Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
393 // arithmetic negation
394 assert!(layout.abi.is_signed());
395 let val = self.sign_extend(val, layout) as i128;
396 let (res, overflow) = val.overflowing_neg();
397 let res = res as u128;
398 // Truncate to target type.
399 // If that truncation loses any information, we have an overflow.
400 let truncated = self.truncate(res, layout);
401 (truncated, overflow || self.sign_extend(truncated, layout) != res)
404 Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
412 val: &ImmTy<'tcx, M::PointerTag>,
413 ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
414 let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
415 Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))