]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_const_eval/src/interpret/operator.rs
Tweak move error
[rust.git] / compiler / rustc_const_eval / src / interpret / operator.rs
1 use std::convert::TryFrom;
2
3 use rustc_apfloat::Float;
4 use rustc_middle::mir;
5 use rustc_middle::mir::interpret::{InterpResult, Scalar};
6 use rustc_middle::ty::layout::{LayoutOf, TyAndLayout};
7 use rustc_middle::ty::{self, FloatTy, Ty};
8
9 use super::{ImmTy, Immediate, InterpCx, Machine, PlaceTy};
10
11 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
12     /// Applies the binary operation `op` to the two operands and writes a tuple of the result
13     /// and a boolean signifying the potential overflow to the destination.
14     pub fn binop_with_overflow(
15         &mut self,
16         op: mir::BinOp,
17         left: &ImmTy<'tcx, M::PointerTag>,
18         right: &ImmTy<'tcx, M::PointerTag>,
19         dest: &PlaceTy<'tcx, M::PointerTag>,
20     ) -> InterpResult<'tcx> {
21         let (val, overflowed, ty) = self.overflowing_binary_op(op, &left, &right)?;
22         debug_assert_eq!(
23             self.tcx.intern_tup(&[ty, self.tcx.types.bool]),
24             dest.layout.ty,
25             "type mismatch for result of {:?}",
26             op,
27         );
28         let val = Immediate::ScalarPair(val.into(), Scalar::from_bool(overflowed).into());
29         self.write_immediate(val, dest)
30     }
31
32     /// Applies the binary operation `op` to the arguments and writes the result to the
33     /// destination.
34     pub fn binop_ignore_overflow(
35         &mut self,
36         op: mir::BinOp,
37         left: &ImmTy<'tcx, M::PointerTag>,
38         right: &ImmTy<'tcx, M::PointerTag>,
39         dest: &PlaceTy<'tcx, M::PointerTag>,
40     ) -> InterpResult<'tcx> {
41         let (val, _overflowed, ty) = self.overflowing_binary_op(op, left, right)?;
42         assert_eq!(ty, dest.layout.ty, "type mismatch for result of {:?}", op);
43         self.write_scalar(val, dest)
44     }
45 }
46
47 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
48     fn binary_char_op(
49         &self,
50         bin_op: mir::BinOp,
51         l: char,
52         r: char,
53     ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
54         use rustc_middle::mir::BinOp::*;
55
56         let res = match bin_op {
57             Eq => l == r,
58             Ne => l != r,
59             Lt => l < r,
60             Le => l <= r,
61             Gt => l > r,
62             Ge => l >= r,
63             _ => span_bug!(self.cur_span(), "Invalid operation on char: {:?}", bin_op),
64         };
65         (Scalar::from_bool(res), false, self.tcx.types.bool)
66     }
67
68     fn binary_bool_op(
69         &self,
70         bin_op: mir::BinOp,
71         l: bool,
72         r: bool,
73     ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
74         use rustc_middle::mir::BinOp::*;
75
76         let res = match bin_op {
77             Eq => l == r,
78             Ne => l != r,
79             Lt => l < r,
80             Le => l <= r,
81             Gt => l > r,
82             Ge => l >= r,
83             BitAnd => l & r,
84             BitOr => l | r,
85             BitXor => l ^ r,
86             _ => span_bug!(self.cur_span(), "Invalid operation on bool: {:?}", bin_op),
87         };
88         (Scalar::from_bool(res), false, self.tcx.types.bool)
89     }
90
91     fn binary_float_op<F: Float + Into<Scalar<M::PointerTag>>>(
92         &self,
93         bin_op: mir::BinOp,
94         ty: Ty<'tcx>,
95         l: F,
96         r: F,
97     ) -> (Scalar<M::PointerTag>, bool, Ty<'tcx>) {
98         use rustc_middle::mir::BinOp::*;
99
100         let (val, ty) = match bin_op {
101             Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
102             Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
103             Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
104             Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
105             Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
106             Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
107             Add => ((l + r).value.into(), ty),
108             Sub => ((l - r).value.into(), ty),
109             Mul => ((l * r).value.into(), ty),
110             Div => ((l / r).value.into(), ty),
111             Rem => ((l % r).value.into(), ty),
112             _ => span_bug!(self.cur_span(), "invalid float op: `{:?}`", bin_op),
113         };
114         (val, false, ty)
115     }
116
117     fn binary_int_op(
118         &self,
119         bin_op: mir::BinOp,
120         // passing in raw bits
121         l: u128,
122         left_layout: TyAndLayout<'tcx>,
123         r: u128,
124         right_layout: TyAndLayout<'tcx>,
125     ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
126         use rustc_middle::mir::BinOp::*;
127
128         // Shift ops can have an RHS with a different numeric type.
129         if bin_op == Shl || bin_op == Shr {
130             let signed = left_layout.abi.is_signed();
131             let size = u128::from(left_layout.size.bits());
132             let overflow = r >= size;
133             // The shift offset is implicitly masked to the type size, to make sure this operation
134             // is always defined. This is the one MIR operator that does *not* directly map to a
135             // single LLVM operation. See
136             // <https://github.com/rust-lang/rust/blob/a3b9405ae7bb6ab4e8103b414e75c44598a10fd2/compiler/rustc_codegen_ssa/src/common.rs#L131-L158>
137             // for the corresponding truncation in our codegen backends.
138             let r = r % size;
139             let r = u32::try_from(r).unwrap(); // we masked so this will always fit
140             let result = if signed {
141                 let l = self.sign_extend(l, left_layout) as i128;
142                 let result = match bin_op {
143                     Shl => l.checked_shl(r).unwrap(),
144                     Shr => l.checked_shr(r).unwrap(),
145                     _ => bug!("it has already been checked that this is a shift op"),
146                 };
147                 result as u128
148             } else {
149                 match bin_op {
150                     Shl => l.checked_shl(r).unwrap(),
151                     Shr => l.checked_shr(r).unwrap(),
152                     _ => bug!("it has already been checked that this is a shift op"),
153                 }
154             };
155             let truncated = self.truncate(result, left_layout);
156             return Ok((Scalar::from_uint(truncated, left_layout.size), overflow, left_layout.ty));
157         }
158
159         // For the remaining ops, the types must be the same on both sides
160         if left_layout.ty != right_layout.ty {
161             span_bug!(
162                 self.cur_span(),
163                 "invalid asymmetric binary op {:?}: {:?} ({:?}), {:?} ({:?})",
164                 bin_op,
165                 l,
166                 left_layout.ty,
167                 r,
168                 right_layout.ty,
169             )
170         }
171
172         let size = left_layout.size;
173
174         // Operations that need special treatment for signed integers
175         if left_layout.abi.is_signed() {
176             let op: Option<fn(&i128, &i128) -> bool> = match bin_op {
177                 Lt => Some(i128::lt),
178                 Le => Some(i128::le),
179                 Gt => Some(i128::gt),
180                 Ge => Some(i128::ge),
181                 _ => None,
182             };
183             if let Some(op) = op {
184                 let l = self.sign_extend(l, left_layout) as i128;
185                 let r = self.sign_extend(r, right_layout) as i128;
186                 return Ok((Scalar::from_bool(op(&l, &r)), false, self.tcx.types.bool));
187             }
188             let op: Option<fn(i128, i128) -> (i128, bool)> = match bin_op {
189                 Div if r == 0 => throw_ub!(DivisionByZero),
190                 Rem if r == 0 => throw_ub!(RemainderByZero),
191                 Div => Some(i128::overflowing_div),
192                 Rem => Some(i128::overflowing_rem),
193                 Add => Some(i128::overflowing_add),
194                 Sub => Some(i128::overflowing_sub),
195                 Mul => Some(i128::overflowing_mul),
196                 _ => None,
197             };
198             if let Some(op) = op {
199                 let l = self.sign_extend(l, left_layout) as i128;
200                 let r = self.sign_extend(r, right_layout) as i128;
201
202                 // We need a special check for overflowing Rem and Div since they are *UB*
203                 // on overflow, which can happen with "int_min $OP -1".
204                 if matches!(bin_op, Rem | Div) {
205                     if l == size.signed_int_min() && r == -1 {
206                         if bin_op == Rem {
207                             throw_ub!(RemainderOverflow)
208                         } else {
209                             throw_ub!(DivisionOverflow)
210                         }
211                     }
212                 }
213
214                 let (result, oflo) = op(l, r);
215                 // This may be out-of-bounds for the result type, so we have to truncate ourselves.
216                 // If that truncation loses any information, we have an overflow.
217                 let result = result as u128;
218                 let truncated = self.truncate(result, left_layout);
219                 return Ok((
220                     Scalar::from_uint(truncated, size),
221                     oflo || self.sign_extend(truncated, left_layout) != result,
222                     left_layout.ty,
223                 ));
224             }
225         }
226
227         let (val, ty) = match bin_op {
228             Eq => (Scalar::from_bool(l == r), self.tcx.types.bool),
229             Ne => (Scalar::from_bool(l != r), self.tcx.types.bool),
230
231             Lt => (Scalar::from_bool(l < r), self.tcx.types.bool),
232             Le => (Scalar::from_bool(l <= r), self.tcx.types.bool),
233             Gt => (Scalar::from_bool(l > r), self.tcx.types.bool),
234             Ge => (Scalar::from_bool(l >= r), self.tcx.types.bool),
235
236             BitOr => (Scalar::from_uint(l | r, size), left_layout.ty),
237             BitAnd => (Scalar::from_uint(l & r, size), left_layout.ty),
238             BitXor => (Scalar::from_uint(l ^ r, size), left_layout.ty),
239
240             Add | Sub | Mul | Rem | Div => {
241                 assert!(!left_layout.abi.is_signed());
242                 let op: fn(u128, u128) -> (u128, bool) = match bin_op {
243                     Add => u128::overflowing_add,
244                     Sub => u128::overflowing_sub,
245                     Mul => u128::overflowing_mul,
246                     Div if r == 0 => throw_ub!(DivisionByZero),
247                     Rem if r == 0 => throw_ub!(RemainderByZero),
248                     Div => u128::overflowing_div,
249                     Rem => u128::overflowing_rem,
250                     _ => bug!(),
251                 };
252                 let (result, oflo) = op(l, r);
253                 // Truncate to target type.
254                 // If that truncation loses any information, we have an overflow.
255                 let truncated = self.truncate(result, left_layout);
256                 return Ok((
257                     Scalar::from_uint(truncated, size),
258                     oflo || truncated != result,
259                     left_layout.ty,
260                 ));
261             }
262
263             _ => span_bug!(
264                 self.cur_span(),
265                 "invalid binary op {:?}: {:?}, {:?} (both {:?})",
266                 bin_op,
267                 l,
268                 r,
269                 right_layout.ty,
270             ),
271         };
272
273         Ok((val, false, ty))
274     }
275
276     /// Returns the result of the specified operation, whether it overflowed, and
277     /// the result type.
278     pub fn overflowing_binary_op(
279         &self,
280         bin_op: mir::BinOp,
281         left: &ImmTy<'tcx, M::PointerTag>,
282         right: &ImmTy<'tcx, M::PointerTag>,
283     ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
284         trace!(
285             "Running binary op {:?}: {:?} ({:?}), {:?} ({:?})",
286             bin_op,
287             *left,
288             left.layout.ty,
289             *right,
290             right.layout.ty
291         );
292
293         match left.layout.ty.kind() {
294             ty::Char => {
295                 assert_eq!(left.layout.ty, right.layout.ty);
296                 let left = left.to_scalar()?;
297                 let right = right.to_scalar()?;
298                 Ok(self.binary_char_op(bin_op, left.to_char()?, right.to_char()?))
299             }
300             ty::Bool => {
301                 assert_eq!(left.layout.ty, right.layout.ty);
302                 let left = left.to_scalar()?;
303                 let right = right.to_scalar()?;
304                 Ok(self.binary_bool_op(bin_op, left.to_bool()?, right.to_bool()?))
305             }
306             ty::Float(fty) => {
307                 assert_eq!(left.layout.ty, right.layout.ty);
308                 let ty = left.layout.ty;
309                 let left = left.to_scalar()?;
310                 let right = right.to_scalar()?;
311                 Ok(match fty {
312                     FloatTy::F32 => {
313                         self.binary_float_op(bin_op, ty, left.to_f32()?, right.to_f32()?)
314                     }
315                     FloatTy::F64 => {
316                         self.binary_float_op(bin_op, ty, left.to_f64()?, right.to_f64()?)
317                     }
318                 })
319             }
320             _ if left.layout.ty.is_integral() => {
321                 // the RHS type can be different, e.g. for shifts -- but it has to be integral, too
322                 assert!(
323                     right.layout.ty.is_integral(),
324                     "Unexpected types for BinOp: {:?} {:?} {:?}",
325                     left.layout.ty,
326                     bin_op,
327                     right.layout.ty
328                 );
329
330                 let l = left.to_scalar()?.to_bits(left.layout.size)?;
331                 let r = right.to_scalar()?.to_bits(right.layout.size)?;
332                 self.binary_int_op(bin_op, l, left.layout, r, right.layout)
333             }
334             _ if left.layout.ty.is_any_ptr() => {
335                 // The RHS type must be a `pointer` *or an integer type* (for `Offset`).
336                 // (Even when both sides are pointers, their type might differ, see issue #91636)
337                 assert!(
338                     right.layout.ty.is_any_ptr() || right.layout.ty.is_integral(),
339                     "Unexpected types for BinOp: {:?} {:?} {:?}",
340                     left.layout.ty,
341                     bin_op,
342                     right.layout.ty
343                 );
344
345                 M::binary_ptr_op(self, bin_op, left, right)
346             }
347             _ => span_bug!(
348                 self.cur_span(),
349                 "Invalid MIR: bad LHS type for binop: {:?}",
350                 left.layout.ty
351             ),
352         }
353     }
354
355     /// Typed version of `overflowing_binary_op`, returning an `ImmTy`. Also ignores overflows.
356     #[inline]
357     pub fn binary_op(
358         &self,
359         bin_op: mir::BinOp,
360         left: &ImmTy<'tcx, M::PointerTag>,
361         right: &ImmTy<'tcx, M::PointerTag>,
362     ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
363         let (val, _overflow, ty) = self.overflowing_binary_op(bin_op, left, right)?;
364         Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
365     }
366
367     /// Returns the result of the specified operation, whether it overflowed, and
368     /// the result type.
369     pub fn overflowing_unary_op(
370         &self,
371         un_op: mir::UnOp,
372         val: &ImmTy<'tcx, M::PointerTag>,
373     ) -> InterpResult<'tcx, (Scalar<M::PointerTag>, bool, Ty<'tcx>)> {
374         use rustc_middle::mir::UnOp::*;
375
376         let layout = val.layout;
377         let val = val.to_scalar()?;
378         trace!("Running unary op {:?}: {:?} ({:?})", un_op, val, layout.ty);
379
380         match layout.ty.kind() {
381             ty::Bool => {
382                 let val = val.to_bool()?;
383                 let res = match un_op {
384                     Not => !val,
385                     _ => span_bug!(self.cur_span(), "Invalid bool op {:?}", un_op),
386                 };
387                 Ok((Scalar::from_bool(res), false, self.tcx.types.bool))
388             }
389             ty::Float(fty) => {
390                 let res = match (un_op, fty) {
391                     (Neg, FloatTy::F32) => Scalar::from_f32(-val.to_f32()?),
392                     (Neg, FloatTy::F64) => Scalar::from_f64(-val.to_f64()?),
393                     _ => span_bug!(self.cur_span(), "Invalid float op {:?}", un_op),
394                 };
395                 Ok((res, false, layout.ty))
396             }
397             _ => {
398                 assert!(layout.ty.is_integral());
399                 let val = val.to_bits(layout.size)?;
400                 let (res, overflow) = match un_op {
401                     Not => (self.truncate(!val, layout), false), // bitwise negation, then truncate
402                     Neg => {
403                         // arithmetic negation
404                         assert!(layout.abi.is_signed());
405                         let val = self.sign_extend(val, layout) as i128;
406                         let (res, overflow) = val.overflowing_neg();
407                         let res = res as u128;
408                         // Truncate to target type.
409                         // If that truncation loses any information, we have an overflow.
410                         let truncated = self.truncate(res, layout);
411                         (truncated, overflow || self.sign_extend(truncated, layout) != res)
412                     }
413                 };
414                 Ok((Scalar::from_uint(res, layout.size), overflow, layout.ty))
415             }
416         }
417     }
418
419     pub fn unary_op(
420         &self,
421         un_op: mir::UnOp,
422         val: &ImmTy<'tcx, M::PointerTag>,
423     ) -> InterpResult<'tcx, ImmTy<'tcx, M::PointerTag>> {
424         let (val, _overflow, ty) = self.overflowing_unary_op(un_op, val)?;
425         Ok(ImmTy::from_scalar(val, self.layout_of(ty)?))
426     }
427 }