]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_const_eval/src/interpret/operator.rs
:arrow_up: rust-analyzer
[rust.git] / compiler / rustc_const_eval / src / interpret / operator.rs
1 use std::convert::TryFrom;
2
3 use rustc_apfloat::Float;
4 use rustc_middle::mir;
5 use rustc_middle::mir::interpret::{InterpResult, Scalar};
6 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
7 use rustc_middle::ty::{self, FloatTy, Ty};
8 use rustc_target::abi::Abi;
9
10 use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
11
12 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
13     /// Applies the binary operation `op` to the two operands and writes a tuple of the result
14     /// and a boolean signifying the potential overflow to the destination.
15     ///
16     /// `force_overflow_checks` indicates whether overflow checks should be done even when
17     /// `tcx.sess.overflow_checks()` is `false`.
18     pub fn binop_with_overflow(
19         &mut self,
20         op: mir::BinOp,
21         force_overflow_checks: bool,
22         left: &ImmTy<'tcx, M::Provenance>,
23         right: &ImmTy<'tcx, M::Provenance>,
24         dest: &PlaceTy<'tcx, M::Provenance>,
25     ) -> InterpResult<'tcx> {
26         let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
27         debug_assert_eq!(
28             self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
29             dest.layout.ty,
30             "type mismatch for result of {:?}",
31             op,
32         );
33         // As per https://github.com/rust-lang/rust/pull/98738, we always return `false` in the 2nd
34         // component when overflow checking is disabled.
35         let overflowed =
36             overflowed && (force_overflow_checks || M::checked_binop_checks_overflow(self));
37         // Write the result to `dest`.
38         if let Abi::ScalarPair(..) = dest.layout.abi {
39             // We can use the optimized path and avoid `place_field` (which might do
40             // `force_allocation`).
41             let pair = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
42             self.write_immediate(pair, dest)?;
43         } else {
44             assert!(self.tcx.sess.opts.unstable_opts.randomize_layout);
45             // With randomized layout, `(int, bool)` might cease to be a `ScalarPair`, so we have to
46             // do a component-wise write here. This code path is slower than the above because
47             // `place_field` will have to `force_allocate` locals here.
48             let val_field = self.place_field(&dest, 0)?;
49             self.write_scalar(val, &val_field)?;
50             let overflowed_field = self.place_field(&dest, 1)?;
51             self.write_scalar(Scalar::from_bool(overflowed), &overflowed_field)?;
52         }
53         Ok(())
54     }
55
56     /// Applies the binary operation `op` to the arguments and writes the result to the
57     /// destination.
58     pub fn binop_ignore_overflow(
59         &mut self,
60         op: mir::BinOp,
61         left: &ImmTy<'tcx, M::Provenance>,
62         right: &ImmTy<'tcx, M::Provenance>,
63         dest: &PlaceTy<'tcx, M::Provenance>,
64     ) -> InterpResult<'tcx> {
65         let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
66         assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
67         self.write_scalar(val, dest)
68     }
69 }
70
71 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
72     fn binary_char_op(
73         &self,
74         bin_op: mir::BinOp,
75         l: char,
76         r: char,
77     ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
78         use rustc_middle::mir::BinOp::*;
79
80         let res = match bin_op {
81             Eq => l == r,
82             Ne => l != r,
83             Lt => l < r,
84             Le => l <= r,
85             Gt => l > r,
86             Ge => l >= r,
87             _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
88         };
89         (Scalar::from_bool(res), false, self.tcx.types.bool)
90     }
91
92     fn binary_bool_op(
93         &self,
94         bin_op: mir::BinOp,
95         l: bool,
96         r: bool,
97     ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
98         use rustc_middle::mir::BinOp::*;
99
100         let res = match bin_op {
101             Eq => l == r,
102             Ne => l != r,
103             Lt => l < r,
104             Le => l <= r,
105             Gt => l > r,
106             Ge => l >= r,
107             BitAnd => l & r,
108             BitOr => l | r,
109             BitXor => l ^ r,
110             _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
111         };
112         (Scalar::from_bool(res), false, self.tcx.types.bool)
113     }
114
115     fn binary_float_op<F: Float + Into<Scalar<M::Provenance>>>(
116         &self,
117         bin_op: mir::BinOp,
118         ty: Ty<'tcx>,
119         l: F,
120         r: F,
121     ) -> (Scalar<M::Provenance>, bool, Ty<'tcx>) {
122         use rustc_middle::mir::BinOp::*;
123
124         let (val, ty) = match bin_op {
125             Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
126             Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
127             Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
128             Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
129             Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
130             Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
131             Add => ((l + r).value.into(), ty),
132             Sub => ((l - r).value.into(), ty),
133             Mul => ((l * r).value.into(), ty),
134             Div => ((l / r).value.into(), ty),
135             Rem => ((l % r).value.into(), ty),
136             _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
137         };
138         (val, false, ty)
139     }
140
141     fn binary_int_op(
142         &self,
143         bin_op: mir::BinOp,
144         // passing in raw bits
145         l: u128,
146         left_layout: TyAndLayout<'tcx>,
147         r: u128,
148         right_layout: TyAndLayout<'tcx>,
149     ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
150         use rustc_middle::mir::BinOp::*;
151
152         // Shift ops can have an RHS with a different numeric type.
153         if bin_op == Shl || bin_op == Shr {
154             let size = u128::from(left_layout.size.bits());
155             // Even if `r` is signed, we treat it as if it was unsigned (i.e., we use its
156             // zero-extended form). This matches the codegen backend:
157             // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/base.rs#L315-L317>.
158             // The overflow check is also ignorant to the sign:
159             // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/mir/rvalue.rs#L728>.
160             // This would behave rather strangely if we had integer types of size 256: a shift by
161             // -1i8 would actually shift by 255, but that would *not* be considered overflowing. A
162             // shift by -1i16 though would be considered overflowing. If we had integers of size
163             // 512, then a shift by -1i8 would even produce a different result than one by -1i16:
164             // the first shifts by 255, the latter by u16::MAX % 512 = 511. Lucky enough, our
165             // integers are maximally 128bits wide, so negative shifts *always* overflow and we have
166             // consistent results for the same value represented at different bit widths.
167             assert!(size <= 128);
168             let overflow = r >= size;
169             // The shift offset is implicitly masked to the type size, to make sure this operation
170             // is always defined. This is the one MIR operator that does *not* directly map to a
171             // single LLVM operation. See
172             // <https://github.com/rust-lang/rust/blob/c274e4969f058b1c644243181ece9f829efa7594/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
173             // for the corresponding truncation in our codegen backends.
174             let r = r % size;
175             let r = u32::try_from(r).unwrap(); // we masked so this will always fit
176             let result = if left_layout.abi.is_signed() {
177                 let l = self.sign_extend(l, left_layout) as i128;
178                 let result = match bin_op {
179                     Shl => l.checked_shl(r).unwrap(),
180                     Shr => l.checked_shr(r).unwrap(),
181                     _ => bug!(),
182                 };
183                 result as u128
184             } else {
185                 match bin_op {
186                     Shl => l.checked_shl(r).unwrap(),
187                     Shr => l.checked_shr(r).unwrap(),
188                     _ => bug!(),
189                 }
190             };
191             let truncated = self.truncate(result, left_layout);
192             return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
193         }
194
195         // For the remaining ops, the types must be the same on both sides
196         if left_layout.ty != right_layout.ty {
197             span_bug!(
198                 self.cur_span(),
199                 "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
200                 bin_op,
201                 l,
202                 left_layout.ty,
203                 r,
204                 right_layout.ty,
205             )
206         }
207
208         let size = left_layout.size;
209
210         // Operations that need special treatment for signed integers
211         if left_layout.abi.is_signed() {
212             let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
213                 Lt => Some(i128::lt),
214                 Le => Some(i128::le),
215                 Gt => Some(i128::gt),
216                 Ge => Some(i128::ge),
217                 _ => None,
218             };
219             if let Some(op) = op {
220                 let l = self.sign_extend(l, left_layout) as i128;
221                 let r = self.sign_extend(r, right_layout) as i128;
222                 return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
223             }
224             let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
225                 Div if r == 0 => throw_ub!(DivisionByZero),
226                 Rem if r == 0 => throw_ub!(RemainderByZero),
227                 Div => Some(i128::overflowing_div),
228                 Rem => Some(i128::overflowing_rem),
229                 Add => Some(i128::overflowing_add),
230                 Sub => Some(i128::overflowing_sub),
231                 Mul => Some(i128::overflowing_mul),
232                 _ => None,
233             };
234             if let Some(op) = op {
235                 let l = self.sign_extend(l, left_layout) as i128;
236                 let r = self.sign_extend(r, right_layout) as i128;
237
238                 // We need a special check for overflowing Rem and Div since they are *UB*
239                 // on overflow, which can happen with "int_min $OP -1".
240                 if matches!(bin_op, Rem | Div) {
241                     if l == size.signed_int_min() && r == -1 {
242                         if bin_op == Rem {
243                             throw_ub!(RemainderOverflow)
244                         } else {
245                             throw_ub!(DivisionOverflow)
246                         }
247                     }
248                 }
249
250                 let (result, oflo) = op(l, r);
251                 // This may be out-of-bounds for the result type, so we have to truncate ourselves.
252                 // If that truncation loses any information, we have an overflow.
253                 let result = result as u128;
254                 let truncated = self.truncate(result, left_layout);
255                 return Ok((
256                     Scalar::from_uint(truncated, size),
257                     oflo || self.sign_extend(truncated, left_layout) != result,
258                     left_layout.ty,
259                 ));
260             }
261         }
262
263         let (val, ty) = match bin_op {
264             Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
265             Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
266
267             Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
268             Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
269             Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
270             Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
271
272             BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
273             BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
274             BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
275
276             Add | Sub | Mul | Rem | Div => {
277                 assert!(!left_layout.abi.is_signed());
278                 let op: fn(u128, u128) -> (u128, bool) = match bin_op {
279                     Add => u128::overflowing_add,
280                     Sub => u128::overflowing_sub,
281                     Mul => u128::overflowing_mul,
282                     Div if r == 0 => throw_ub!(DivisionByZero),
283                     Rem if r == 0 => throw_ub!(RemainderByZero),
284                     Div => u128::overflowing_div,
285                     Rem => u128::overflowing_rem,
286                     _ => bug!(),
287                 };
288                 let (result, oflo) = op(l, r);
289                 // Truncate to target type.
290                 // If that truncation loses any information, we have an overflow.
291                 let truncated = self.truncate(result, left_layout);
292                 return Ok((
293                     Scalar::from_uint(truncated, size),
294                     oflo || truncated != result,
295                     left_layout.ty,
296                 ));
297             }
298
299             _ => span_bug!(
300                 self.cur_span(),
301                 "invalid binary op {:?}: {:?}, {:?} (both {:?})",
302                 bin_op,
303                 l,
304                 r,
305                 right_layout.ty,
306             ),
307         };
308
309         Ok((val, false, ty))
310     }
311
312     /// Returns the result of the specified operation, whether it overflowed, and
313     /// the result type.
314     pub fn overflowing_binary_op(
315         &self,
316         bin_op: mir::BinOp,
317         left: &ImmTy<'tcx, M::Provenance>,
318         right: &ImmTy<'tcx, M::Provenance>,
319     ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
320         trace!(
321             "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
322             bin_op,
323             *left,
324             left.layout.ty,
325             *right,
326             right.layout.ty
327         );
328
329         match left.layout.ty.kind() {
330             ty::Char => {
331                 assert_eq!(left.layout.ty, right.layout.ty);
332                 let left = left.to_scalar()?;
333                 let right = right.to_scalar()?;
334                 Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
335             }
336             ty::Bool => {
337                 assert_eq!(left.layout.ty, right.layout.ty);
338                 let left = left.to_scalar()?;
339                 let right = right.to_scalar()?;
340                 Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
341             }
342             ty::Float(fty) => {
343                 assert_eq!(left.layout.ty, right.layout.ty);
344                 let ty = left.layout.ty;
345                 let left = left.to_scalar()?;
346                 let right = right.to_scalar()?;
347                 Ok(match fty {
348                     FloatTy::F32 => {
349                         self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
350                     }
351                     FloatTy::F64 => {
352                         self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
353                     }
354                 })
355             }
356             _ if left.layout.ty.is_integral() => {
357                 // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
358                 assert!(
359                     right.layout.ty.is_integral(),
360                     "Unexpected types for BinOp: {:?} {:?} {:?}",
361                     left.layout.ty,
362                     bin_op,
363                     right.layout.ty
364                 );
365
366                 let l = left.to_scalar()?.to_bits(left.layout.size)?;
367                 let r = right.to_scalar()?.to_bits(right.layout.size)?;
368                 self.binary_int_op(bin_op, l, left.layout, r, right.layout)
369             }
370             _ if left.layout.ty.is_any_ptr() => {
371                 // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
372                 // (Even when both sides are pointers, their type might differ, see issue #91636)
373                 assert!(
374                     right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
375                     "Unexpected types for BinOp: {:?} {:?} {:?}",
376                     left.layout.ty,
377                     bin_op,
378                     right.layout.ty
379                 );
380
381                 M::binary_ptr_op(self, bin_op, left, right)
382             }
383             _ => span_bug!(
384                 self.cur_span(),
385                 "Invalid MIR: bad LHS type for binop: {:?}",
386                 left.layout.ty
387             ),
388         }
389     }
390
391     /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
392     #[inline]
393     pub fn binary_op(
394         &self,
395         bin_op: mir::BinOp,
396         left: &ImmTy<'tcx, M::Provenance>,
397         right: &ImmTy<'tcx, M::Provenance>,
398     ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
399         let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
400         Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
401     }
402
403     /// Returns the result of the specified operation, whether it overflowed, and
404     /// the result type.
405     pub fn overflowing_unary_op(
406         &self,
407         un_op: mir::UnOp,
408         val: &ImmTy<'tcx, M::Provenance>,
409     ) -> InterpResult<'tcx, (Scalar<M::Provenance>, bool, Ty<'tcx>)> {
410         use rustc_middle::mir::UnOp::*;
411
412         let layout = val.layout;
413         let val = val.to_scalar()?;
414         trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
415
416         match layout.ty.kind() {
417             ty::Bool => {
418                 let val = val.to_bool()?;
419                 let res = match un_op {
420                     Not => !val,
421                     _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
422                 };
423                 Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
424             }
425             ty::Float(fty) => {
426                 let res = match (un_op, fty) {
427                     (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
428                     (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
429                     _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
430                 };
431                 Ok((res, false, layout.ty))
432             }
433             _ => {
434                 assert!(layout.ty.is_integral());
435                 let val = val.to_bits(layout.size)?;
436                 let (res, overflow) = match un_op {
437                     Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
438                     Neg => {
439                         // arithmetic negation
440                         assert!(layout.abi.is_signed());
441                         let val = self.sign_extend(val, layout) as i128;
442                         let (res, overflow) = val.overflowing_neg();
443                         let res = res as u128;
444                         // Truncate to target type.
445                         // If that truncation loses any information, we have an overflow.
446                         let truncated = self.truncate(res, layout);
447                         (truncated, overflow || self.sign_extend(truncated, layout) != res)
448                     }
449                 };
450                 Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
451             }
452         }
453     }
454
455     pub fn unary_op(
456         &self,
457         un_op: mir::UnOp,
458         val: &ImmTy<'tcx, M::Provenance>,
459     ) -> InterpResult<'tcx, ImmTy<'tcx, M::Provenance>> {
460         let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
461         Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
462     }
463 }