]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_mir/src/interpret/intrinsics.rs
Auto merge of #79945 - jackh726:existential_trait_ref, r=nikomatsakis
[rust.git] / compiler / rustc_mir / src / interpret / intrinsics.rs
1 //! Intrinsics and other functions that the miri engine executes without
2 //! looking at their MIR. Intrinsics/functions supported here are shared by CTFE
3 //! and miri.
4
5 use std::convert::TryFrom;
6
7 use rustc_hir::def_id::DefId;
8 use rustc_middle::mir::{
9     self,
10     interpret::{uabs, ConstValue, GlobalId, InterpResult, Scalar},
11     BinOp,
12 };
13 use rustc_middle::ty;
14 use rustc_middle::ty::subst::SubstsRef;
15 use rustc_middle::ty::{Ty, TyCtxt};
16 use rustc_span::symbol::{sym, Symbol};
17 use rustc_target::abi::{Abi, LayoutOf as _, Primitive, Size};
18
19 use super::{
20     util::ensure_monomorphic_enough, CheckInAllocMsg, ImmTy, InterpCx, Machine, OpTy, PlaceTy,
21 };
22
23 mod caller_location;
24 mod type_name;
25
26 fn numeric_intrinsic<'tcx, Tag>(
27     name: Symbol,
28     bits: u128,
29     kind: Primitive,
30 ) -> InterpResult<'tcx, Scalar<Tag>> {
31     let size = match kind {
32         Primitive::Int(integer, _) => integer.size(),
33         _ => bug!("invalid `{}` argument: {:?}", name, bits),
34     };
35     let extra = 128 - u128::from(size.bits());
36     let bits_out = match name {
37         sym::ctpop => u128::from(bits.count_ones()),
38         sym::ctlz => u128::from(bits.leading_zeros()) - extra,
39         sym::cttz => u128::from((bits << extra).trailing_zeros()) - extra,
40         sym::bswap => (bits << extra).swap_bytes(),
41         sym::bitreverse => (bits << extra).reverse_bits(),
42         _ => bug!("not a numeric intrinsic: {}", name),
43     };
44     Ok(Scalar::from_uint(bits_out, size))
45 }
46
47 /// The logic for all nullary intrinsics is implemented here. These intrinsics don't get evaluated
48 /// inside an `InterpCx` and instead have their value computed directly from rustc internal info.
49 crate fn eval_nullary_intrinsic<'tcx>(
50     tcx: TyCtxt<'tcx>,
51     param_env: ty::ParamEnv<'tcx>,
52     def_id: DefId,
53     substs: SubstsRef<'tcx>,
54 ) -> InterpResult<'tcx, ConstValue<'tcx>> {
55     let tp_ty = substs.type_at(0);
56     let name = tcx.item_name(def_id);
57     Ok(match name {
58         sym::type_name => {
59             ensure_monomorphic_enough(tcx, tp_ty)?;
60             let alloc = type_name::alloc_type_name(tcx, tp_ty);
61             ConstValue::Slice { data: alloc, start: 0, end: alloc.len() }
62         }
63         sym::needs_drop => ConstValue::from_bool(tp_ty.needs_drop(tcx, param_env)),
64         sym::min_align_of | sym::pref_align_of => {
65             let layout = tcx.layout_of(param_env.and(tp_ty)).map_err(|e| err_inval!(Layout(e)))?;
66             let n = match name {
67                 sym::pref_align_of => layout.align.pref.bytes(),
68                 sym::min_align_of => layout.align.abi.bytes(),
69                 _ => bug!(),
70             };
71             ConstValue::from_machine_usize(n, &tcx)
72         }
73         sym::type_id => {
74             ensure_monomorphic_enough(tcx, tp_ty)?;
75             ConstValue::from_u64(tcx.type_id_hash(tp_ty))
76         }
77         sym::variant_count => match tp_ty.kind() {
78             ty::Adt(ref adt, _) => ConstValue::from_machine_usize(adt.variants.len() as u64, &tcx),
79             ty::Projection(_)
80             | ty::Opaque(_, _)
81             | ty::Param(_)
82             | ty::Bound(_, _)
83             | ty::Placeholder(_)
84             | ty::Infer(_) => throw_inval!(TooGeneric),
85             ty::Bool
86             | ty::Char
87             | ty::Int(_)
88             | ty::Uint(_)
89             | ty::Float(_)
90             | ty::Foreign(_)
91             | ty::Str
92             | ty::Array(_, _)
93             | ty::Slice(_)
94             | ty::RawPtr(_)
95             | ty::Ref(_, _, _)
96             | ty::FnDef(_, _)
97             | ty::FnPtr(_)
98             | ty::Dynamic(_, _)
99             | ty::Closure(_, _)
100             | ty::Generator(_, _, _)
101             | ty::GeneratorWitness(_)
102             | ty::Never
103             | ty::Tuple(_)
104             | ty::Error(_) => ConstValue::from_machine_usize(0u64, &tcx),
105         },
106         other => bug!("`{}` is not a zero arg intrinsic", other),
107     })
108 }
109
110 impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
111     /// Returns `true` if emulation happened.
112     /// Here we implement the intrinsics that are common to all Miri instances; individual machines can add their own
113     /// intrinsic handling.
114     pub fn emulate_intrinsic(
115         &mut self,
116         instance: ty::Instance<'tcx>,
117         args: &[OpTy<'tcx, M::PointerTag>],
118         ret: Option<(PlaceTy<'tcx, M::PointerTag>, mir::BasicBlock)>,
119     ) -> InterpResult<'tcx, bool> {
120         let substs = instance.substs;
121         let intrinsic_name = self.tcx.item_name(instance.def_id());
122
123         // First handle intrinsics without return place.
124         let (dest, ret) = match ret {
125             None => match intrinsic_name {
126                 sym::transmute => throw_ub_format!("transmuting to uninhabited type"),
127                 sym::abort => M::abort(self, "the program aborted execution".to_owned())?,
128                 // Unsupported diverging intrinsic.
129                 _ => return Ok(false),
130             },
131             Some(p) => p,
132         };
133
134         // Keep the patterns in this match ordered the same as the list in
135         // `src/librustc_middle/ty/constness.rs`
136         match intrinsic_name {
137             sym::caller_location => {
138                 let span = self.find_closest_untracked_caller_location();
139                 let location = self.alloc_caller_location_for_span(span);
140                 self.write_scalar(location.ptr, dest)?;
141             }
142
143             sym::min_align_of_val | sym::size_of_val => {
144                 let place = self.deref_operand(args[0])?;
145                 let (size, align) = self
146                     .size_and_align_of(place.meta, place.layout)?
147                     .ok_or_else(|| err_unsup_format!("`extern type` does not have known layout"))?;
148
149                 let result = match intrinsic_name {
150                     sym::min_align_of_val => align.bytes(),
151                     sym::size_of_val => size.bytes(),
152                     _ => bug!(),
153                 };
154
155                 self.write_scalar(Scalar::from_machine_usize(result, self), dest)?;
156             }
157
158             sym::min_align_of
159             | sym::pref_align_of
160             | sym::needs_drop
161             | sym::type_id
162             | sym::type_name
163             | sym::variant_count => {
164                 let gid = GlobalId { instance, promoted: None };
165                 let ty = match intrinsic_name {
166                     sym::min_align_of | sym::pref_align_of | sym::variant_count => {
167                         self.tcx.types.usize
168                     }
169                     sym::needs_drop => self.tcx.types.bool,
170                     sym::type_id => self.tcx.types.u64,
171                     sym::type_name => self.tcx.mk_static_str(),
172                     _ => bug!("already checked for nullary intrinsics"),
173                 };
174                 let val =
175                     self.tcx.const_eval_global_id(self.param_env, gid, Some(self.tcx.span))?;
176                 let const_ = ty::Const { val: ty::ConstKind::Value(val), ty };
177                 let val = self.const_to_op(&const_, None)?;
178                 self.copy_op(val, dest)?;
179             }
180
181             sym::ctpop
182             | sym::cttz
183             | sym::cttz_nonzero
184             | sym::ctlz
185             | sym::ctlz_nonzero
186             | sym::bswap
187             | sym::bitreverse => {
188                 let ty = substs.type_at(0);
189                 let layout_of = self.layout_of(ty)?;
190                 let val = self.read_scalar(args[0])?.check_init()?;
191                 let bits = self.force_bits(val, layout_of.size)?;
192                 let kind = match layout_of.abi {
193                     Abi::Scalar(ref scalar) => scalar.value,
194                     _ => span_bug!(
195                         self.cur_span(),
196                         "{} called on invalid type {:?}",
197                         intrinsic_name,
198                         ty
199                     ),
200                 };
201                 let (nonzero, intrinsic_name) = match intrinsic_name {
202                     sym::cttz_nonzero => (true, sym::cttz),
203                     sym::ctlz_nonzero => (true, sym::ctlz),
204                     other => (false, other),
205                 };
206                 if nonzero && bits == 0 {
207                     throw_ub_format!("`{}_nonzero` called on 0", intrinsic_name);
208                 }
209                 let out_val = numeric_intrinsic(intrinsic_name, bits, kind)?;
210                 self.write_scalar(out_val, dest)?;
211             }
212             sym::add_with_overflow | sym::sub_with_overflow | sym::mul_with_overflow => {
213                 let lhs = self.read_immediate(args[0])?;
214                 let rhs = self.read_immediate(args[1])?;
215                 let bin_op = match intrinsic_name {
216                     sym::add_with_overflow => BinOp::Add,
217                     sym::sub_with_overflow => BinOp::Sub,
218                     sym::mul_with_overflow => BinOp::Mul,
219                     _ => bug!("Already checked for int ops"),
220                 };
221                 self.binop_with_overflow(bin_op, lhs, rhs, dest)?;
222             }
223             sym::saturating_add | sym::saturating_sub => {
224                 let l = self.read_immediate(args[0])?;
225                 let r = self.read_immediate(args[1])?;
226                 let is_add = intrinsic_name == sym::saturating_add;
227                 let (val, overflowed, _ty) =
228                     self.overflowing_binary_op(if is_add { BinOp::Add } else { BinOp::Sub }, l, r)?;
229                 let val = if overflowed {
230                     let num_bits = l.layout.size.bits();
231                     if l.layout.abi.is_signed() {
232                         // For signed ints the saturated value depends on the sign of the first
233                         // term since the sign of the second term can be inferred from this and
234                         // the fact that the operation has overflowed (if either is 0 no
235                         // overflow can occur)
236                         let first_term: u128 = self.force_bits(l.to_scalar()?, l.layout.size)?;
237                         let first_term_positive = first_term & (1 << (num_bits - 1)) == 0;
238                         if first_term_positive {
239                             // Negative overflow not possible since the positive first term
240                             // can only increase an (in range) negative term for addition
241                             // or corresponding negated positive term for subtraction
242                             Scalar::from_uint(
243                                 (1u128 << (num_bits - 1)) - 1, // max positive
244                                 Size::from_bits(num_bits),
245                             )
246                         } else {
247                             // Positive overflow not possible for similar reason
248                             // max negative
249                             Scalar::from_uint(1u128 << (num_bits - 1), Size::from_bits(num_bits))
250                         }
251                     } else {
252                         // unsigned
253                         if is_add {
254                             // max unsigned
255                             Scalar::from_uint(
256                                 u128::MAX >> (128 - num_bits),
257                                 Size::from_bits(num_bits),
258                             )
259                         } else {
260                             // underflow to 0
261                             Scalar::from_uint(0u128, Size::from_bits(num_bits))
262                         }
263                     }
264                 } else {
265                     val
266                 };
267                 self.write_scalar(val, dest)?;
268             }
269             sym::discriminant_value => {
270                 let place = self.deref_operand(args[0])?;
271                 let discr_val = self.read_discriminant(place.into())?.0;
272                 self.write_scalar(discr_val, dest)?;
273             }
274             sym::unchecked_shl
275             | sym::unchecked_shr
276             | sym::unchecked_add
277             | sym::unchecked_sub
278             | sym::unchecked_mul
279             | sym::unchecked_div
280             | sym::unchecked_rem => {
281                 let l = self.read_immediate(args[0])?;
282                 let r = self.read_immediate(args[1])?;
283                 let bin_op = match intrinsic_name {
284                     sym::unchecked_shl => BinOp::Shl,
285                     sym::unchecked_shr => BinOp::Shr,
286                     sym::unchecked_add => BinOp::Add,
287                     sym::unchecked_sub => BinOp::Sub,
288                     sym::unchecked_mul => BinOp::Mul,
289                     sym::unchecked_div => BinOp::Div,
290                     sym::unchecked_rem => BinOp::Rem,
291                     _ => bug!("Already checked for int ops"),
292                 };
293                 let (val, overflowed, _ty) = self.overflowing_binary_op(bin_op, l, r)?;
294                 if overflowed {
295                     let layout = self.layout_of(substs.type_at(0))?;
296                     let r_val = self.force_bits(r.to_scalar()?, layout.size)?;
297                     if let sym::unchecked_shl | sym::unchecked_shr = intrinsic_name {
298                         throw_ub_format!("overflowing shift by {} in `{}`", r_val, intrinsic_name);
299                     } else {
300                         throw_ub_format!("overflow executing `{}`", intrinsic_name);
301                     }
302                 }
303                 self.write_scalar(val, dest)?;
304             }
305             sym::rotate_left | sym::rotate_right => {
306                 // rotate_left: (X << (S % BW)) | (X >> ((BW - S) % BW))
307                 // rotate_right: (X << ((BW - S) % BW)) | (X >> (S % BW))
308                 let layout = self.layout_of(substs.type_at(0))?;
309                 let val = self.read_scalar(args[0])?.check_init()?;
310                 let val_bits = self.force_bits(val, layout.size)?;
311                 let raw_shift = self.read_scalar(args[1])?.check_init()?;
312                 let raw_shift_bits = self.force_bits(raw_shift, layout.size)?;
313                 let width_bits = u128::from(layout.size.bits());
314                 let shift_bits = raw_shift_bits % width_bits;
315                 let inv_shift_bits = (width_bits - shift_bits) % width_bits;
316                 let result_bits = if intrinsic_name == sym::rotate_left {
317                     (val_bits << shift_bits) | (val_bits >> inv_shift_bits)
318                 } else {
319                     (val_bits >> shift_bits) | (val_bits << inv_shift_bits)
320                 };
321                 let truncated_bits = self.truncate(result_bits, layout);
322                 let result = Scalar::from_uint(truncated_bits, layout.size);
323                 self.write_scalar(result, dest)?;
324             }
325             sym::offset => {
326                 let ptr = self.read_scalar(args[0])?.check_init()?;
327                 let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
328                 let pointee_ty = substs.type_at(0);
329
330                 let offset_ptr = self.ptr_offset_inbounds(ptr, pointee_ty, offset_count)?;
331                 self.write_scalar(offset_ptr, dest)?;
332             }
333             sym::arith_offset => {
334                 let ptr = self.read_scalar(args[0])?.check_init()?;
335                 let offset_count = self.read_scalar(args[1])?.to_machine_isize(self)?;
336                 let pointee_ty = substs.type_at(0);
337
338                 let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
339                 let offset_bytes = offset_count.wrapping_mul(pointee_size);
340                 let offset_ptr = ptr.ptr_wrapping_signed_offset(offset_bytes, self);
341                 self.write_scalar(offset_ptr, dest)?;
342             }
343             sym::ptr_offset_from => {
344                 let a = self.read_immediate(args[0])?.to_scalar()?;
345                 let b = self.read_immediate(args[1])?.to_scalar()?;
346
347                 // Special case: if both scalars are *equal integers*
348                 // and not NULL, we pretend there is an allocation of size 0 right there,
349                 // and their offset is 0. (There's never a valid object at NULL, making it an
350                 // exception from the exception.)
351                 // This is the dual to the special exception for offset-by-0
352                 // in the inbounds pointer offset operation (see the Miri code, `src/operator.rs`).
353                 //
354                 // Control flow is weird because we cannot early-return (to reach the
355                 // `go_to_block` at the end).
356                 let done = if a.is_bits() && b.is_bits() {
357                     let a = a.to_machine_usize(self)?;
358                     let b = b.to_machine_usize(self)?;
359                     if a == b && a != 0 {
360                         self.write_scalar(Scalar::from_machine_isize(0, self), dest)?;
361                         true
362                     } else {
363                         false
364                     }
365                 } else {
366                     false
367                 };
368
369                 if !done {
370                     // General case: we need two pointers.
371                     let a = self.force_ptr(a)?;
372                     let b = self.force_ptr(b)?;
373                     if a.alloc_id != b.alloc_id {
374                         throw_ub_format!(
375                             "ptr_offset_from cannot compute offset of pointers into different \
376                             allocations.",
377                         );
378                     }
379                     let usize_layout = self.layout_of(self.tcx.types.usize)?;
380                     let isize_layout = self.layout_of(self.tcx.types.isize)?;
381                     let a_offset = ImmTy::from_uint(a.offset.bytes(), usize_layout);
382                     let b_offset = ImmTy::from_uint(b.offset.bytes(), usize_layout);
383                     let (val, _overflowed, _ty) =
384                         self.overflowing_binary_op(BinOp::Sub, a_offset, b_offset)?;
385                     let pointee_layout = self.layout_of(substs.type_at(0))?;
386                     let val = ImmTy::from_scalar(val, isize_layout);
387                     let size = ImmTy::from_int(pointee_layout.size.bytes(), isize_layout);
388                     self.exact_div(val, size, dest)?;
389                 }
390             }
391
392             sym::transmute => {
393                 self.copy_op_transmute(args[0], dest)?;
394             }
395             sym::assert_inhabited => {
396                 let ty = instance.substs.type_at(0);
397                 let layout = self.layout_of(ty)?;
398
399                 if layout.abi.is_uninhabited() {
400                     // The run-time intrinsic panics just to get a good backtrace; here we abort
401                     // since there is no problem showing a backtrace even for aborts.
402                     M::abort(
403                         self,
404                         format!(
405                             "aborted execution: attempted to instantiate uninhabited type `{}`",
406                             ty
407                         ),
408                     )?;
409                 }
410             }
411             sym::simd_insert => {
412                 let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
413                 let elem = args[2];
414                 let input = args[0];
415                 let (len, e_ty) = input.layout.ty.simd_size_and_type(*self.tcx);
416                 assert!(
417                     index < len,
418                     "Index `{}` must be in bounds of vector type `{}`: `[0, {})`",
419                     index,
420                     e_ty,
421                     len
422                 );
423                 assert_eq!(
424                     input.layout, dest.layout,
425                     "Return type `{}` must match vector type `{}`",
426                     dest.layout.ty, input.layout.ty
427                 );
428                 assert_eq!(
429                     elem.layout.ty, e_ty,
430                     "Scalar element type `{}` must match vector element type `{}`",
431                     elem.layout.ty, e_ty
432                 );
433
434                 for i in 0..len {
435                     let place = self.place_index(dest, i)?;
436                     let value = if i == index { elem } else { self.operand_index(input, i)? };
437                     self.copy_op(value, place)?;
438                 }
439             }
440             sym::simd_extract => {
441                 let index = u64::from(self.read_scalar(args[1])?.to_u32()?);
442                 let (len, e_ty) = args[0].layout.ty.simd_size_and_type(*self.tcx);
443                 assert!(
444                     index < len,
445                     "index `{}` is out-of-bounds of vector type `{}` with length `{}`",
446                     index,
447                     e_ty,
448                     len
449                 );
450                 assert_eq!(
451                     e_ty, dest.layout.ty,
452                     "Return type `{}` must match vector element type `{}`",
453                     dest.layout.ty, e_ty
454                 );
455                 self.copy_op(self.operand_index(args[0], index)?, dest)?;
456             }
457             sym::likely | sym::unlikely => {
458                 // These just return their argument
459                 self.copy_op(args[0], dest)?;
460             }
461             sym::assume => {
462                 let cond = self.read_scalar(args[0])?.check_init()?.to_bool()?;
463                 if !cond {
464                     throw_ub_format!("`assume` intrinsic called with `false`");
465                 }
466             }
467             _ => return Ok(false),
468         }
469
470         trace!("{:?}", self.dump_place(*dest));
471         self.go_to_block(ret);
472         Ok(true)
473     }
474
475     pub fn exact_div(
476         &mut self,
477         a: ImmTy<'tcx, M::PointerTag>,
478         b: ImmTy<'tcx, M::PointerTag>,
479         dest: PlaceTy<'tcx, M::PointerTag>,
480     ) -> InterpResult<'tcx> {
481         // Performs an exact division, resulting in undefined behavior where
482         // `x % y != 0` or `y == 0` or `x == T::MIN && y == -1`.
483         // First, check x % y != 0 (or if that computation overflows).
484         let (res, overflow, _ty) = self.overflowing_binary_op(BinOp::Rem, a, b)?;
485         if overflow || res.assert_bits(a.layout.size) != 0 {
486             // Then, check if `b` is -1, which is the "MIN / -1" case.
487             let minus1 = Scalar::from_int(-1, dest.layout.size);
488             let b_scalar = b.to_scalar().unwrap();
489             if b_scalar == minus1 {
490                 throw_ub_format!("exact_div: result of dividing MIN by -1 cannot be represented")
491             } else {
492                 throw_ub_format!("exact_div: {} cannot be divided by {} without remainder", a, b,)
493             }
494         }
495         // `Rem` says this is all right, so we can let `Div` do its job.
496         self.binop_ignore_overflow(BinOp::Div, a, b, dest)
497     }
498
499     /// Offsets a pointer by some multiple of its type, returning an error if the pointer leaves its
500     /// allocation. For integer pointers, we consider each of them their own tiny allocation of size
501     /// 0, so offset-by-0 (and only 0) is okay -- except that NULL cannot be offset by _any_ value.
502     pub fn ptr_offset_inbounds(
503         &self,
504         ptr: Scalar<M::PointerTag>,
505         pointee_ty: Ty<'tcx>,
506         offset_count: i64,
507     ) -> InterpResult<'tcx, Scalar<M::PointerTag>> {
508         // We cannot overflow i64 as a type's size must be <= isize::MAX.
509         let pointee_size = i64::try_from(self.layout_of(pointee_ty)?.size.bytes()).unwrap();
510         // The computed offset, in bytes, cannot overflow an isize.
511         let offset_bytes =
512             offset_count.checked_mul(pointee_size).ok_or(err_ub!(PointerArithOverflow))?;
513         // The offset being in bounds cannot rely on "wrapping around" the address space.
514         // So, first rule out overflows in the pointer arithmetic.
515         let offset_ptr = ptr.ptr_signed_offset(offset_bytes, self)?;
516         // ptr and offset_ptr must be in bounds of the same allocated object. This means all of the
517         // memory between these pointers must be accessible. Note that we do not require the
518         // pointers to be properly aligned (unlike a read/write operation).
519         let min_ptr = if offset_bytes >= 0 { ptr } else { offset_ptr };
520         let size: u64 = uabs(offset_bytes);
521         // This call handles checking for integer/NULL pointers.
522         self.memory.check_ptr_access_align(
523             min_ptr,
524             Size::from_bytes(size),
525             None,
526             CheckInAllocMsg::InboundsTest,
527         )?;
528         Ok(offset_ptr)
529     }
530 }