1 //! Validates the MIR to ensure that invariants are upheld.
3 use rustc_index::bit_set::BitSet;
4 use rustc_infer::infer::TyCtxtInferExt;
5 use rustc_middle::mir::interpret::Scalar;
6 use rustc_middle::mir::visit::{PlaceContext, Visitor};
7 use rustc_middle::mir::{
8 traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, Local, Location, MirPass,
9 MirPhase, Operand, Place, PlaceElem, PlaceRef, ProjectionElem, Rvalue, SourceScope, Statement,
10 StatementKind, Terminator, TerminatorKind, UnOp, START_BLOCK,
12 use rustc_middle::ty::fold::BottomUpFolder;
13 use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeFoldable};
14 use rustc_mir_dataflow::impls::MaybeStorageLive;
15 use rustc_mir_dataflow::storage::AlwaysLiveLocals;
16 use rustc_mir_dataflow::{Analysis, ResultsCursor};
17 use rustc_target::abi::Size;
19 #[derive(Copy, Clone, Debug)]
25 pub struct Validator {
26 /// Describes at which point in the pipeline this validation is happening.
28 /// The phase for which we are upholding the dialect. If the given phase forbids a specific
29 /// element, this validator will now emit errors if that specific element is encountered.
30 /// Note that phases that change the dialect cause all *following* phases to check the
31 /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
33 pub mir_phase: MirPhase,
36 impl<'tcx> MirPass<'tcx> for Validator {
37 fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
38 // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
39 // terribly important that they pass the validator. However, I think other passes might
40 // still see them, in which case they might be surprised. It would probably be better if we
41 // didn't put this through the MIR pipeline at all.
42 if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
45 let def_id = body.source.def_id();
46 let param_env = tcx.param_env(def_id);
47 let mir_phase = self.mir_phase;
49 let always_live_locals = AlwaysLiveLocals::new(body);
50 let storage_liveness = MaybeStorageLive::new(always_live_locals)
51 .into_engine(tcx, body)
52 .iterate_to_fixpoint()
53 .into_results_cursor(body);
61 reachable_blocks: traversal::reachable_as_bitset(body),
63 place_cache: Vec::new(),
64 value_cache: Vec::new(),
70 /// Returns whether the two types are equal up to lifetimes.
71 /// All lifetimes, including higher-ranked ones, get ignored for this comparison.
72 /// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
74 /// The point of this function is to approximate "equal up to subtyping". However,
75 /// the approximation is incorrect as variance is ignored.
76 pub fn equal_up_to_regions<'tcx>(
78 param_env: ParamEnv<'tcx>,
87 // Normalize lifetimes away on both sides, then compare.
88 let normalize = |ty: Ty<'tcx>| {
89 tcx.normalize_erasing_regions(
91 ty.fold_with(&mut BottomUpFolder {
93 // FIXME: We erase all late-bound lifetimes, but this is not fully correct.
94 // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
95 // this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
96 // since one may have an `impl SomeTrait for fn(&32)` and
97 // `impl SomeTrait for fn(&'static u32)` at the same time which
98 // specify distinct values for Assoc. (See also #56105)
99 lt_op: |_| tcx.lifetimes.re_erased,
100 // Leave consts and types unchanged.
106 tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok())
109 struct TypeChecker<'a, 'tcx> {
111 body: &'a Body<'tcx>,
113 param_env: ParamEnv<'tcx>,
115 reachable_blocks: BitSet<BasicBlock>,
116 storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
117 place_cache: Vec<PlaceRef<'tcx>>,
118 value_cache: Vec<u128>,
121 impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
122 fn fail(&self, location: Location, msg: impl AsRef<str>) {
123 let span = self.body.source_info(location).span;
124 // We use `delay_span_bug` as we might see broken MIR when other errors have already
126 self.tcx.sess.diagnostic().delay_span_bug(
129 "broken MIR in {:?} ({}) at {:?}:\n{}",
130 self.body.source.instance,
138 fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
139 if bb == START_BLOCK {
140 self.fail(location, "start block must not have predecessors")
142 if let Some(bb) = self.body.basic_blocks().get(bb) {
143 let src = self.body.basic_blocks().get(location.block).unwrap();
144 match (src.is_cleanup, bb.is_cleanup, edge_kind) {
145 // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
146 (false, false, EdgeKind::Normal)
147 // Non-cleanup blocks can jump to cleanup blocks along unwind edges
148 | (false, true, EdgeKind::Unwind)
149 // Cleanup blocks can jump to cleanup blocks along non-unwind edges
150 | (true, true, EdgeKind::Normal) => {}
151 // All other jumps are invalid
156 "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
166 self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
170 /// Check if src can be assigned into dest.
171 /// This is not precise, it will accept some incorrect assignments.
172 fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
173 // Fast path before we normalize.
175 // Equal types, all is good.
178 // Normalization reveals opaque types, but we may be validating MIR while computing
179 // said opaque types, causing cycles.
180 if (src, dest).has_opaque_types() {
183 // Normalize projections and things like that.
184 let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
185 let src = self.tcx.normalize_erasing_regions(param_env, src);
186 let dest = self.tcx.normalize_erasing_regions(param_env, dest);
188 // Type-changing assignments can happen when subtyping is used. While
189 // all normal lifetimes are erased, higher-ranked types with their
190 // late-bound lifetimes are still around and can lead to type
191 // differences. So we compare ignoring lifetimes.
192 equal_up_to_regions(self.tcx, param_env, src, dest)
196 impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
197 fn visit_local(&mut self, local: &Local, context: PlaceContext, location: Location) {
198 if self.body.local_decls.get(*local).is_none() {
201 format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
205 if self.reachable_blocks.contains(location.block) && context.is_use() {
206 // Uses of locals must occur while the local's storage is allocated.
207 self.storage_liveness.seek_after_primary_effect(location);
208 let locals_with_storage = self.storage_liveness.get();
209 if !locals_with_storage.contains(*local) {
210 self.fail(location, format!("use of local {:?}, which has no storage here", local));
215 fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
216 // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
217 if self.tcx.sess.opts.debugging_opts.validate_mir {
218 // `Operand::Copy` is only supposed to be used with `Copy` types.
219 if let Operand::Copy(place) = operand {
220 let ty = place.ty(&self.body.local_decls, self.tcx).ty;
221 let span = self.body.source_info(location).span;
223 if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
224 self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
229 self.super_operand(operand, location);
232 fn visit_projection_elem(
235 proj_base: &[PlaceElem<'tcx>],
236 elem: PlaceElem<'tcx>,
237 context: PlaceContext,
240 if let ProjectionElem::Index(index) = elem {
241 let index_ty = self.body.local_decls[index].ty;
242 if index_ty != self.tcx.types.usize {
243 self.fail(location, format!("bad index ({:?} != usize)", index_ty))
246 self.super_projection_elem(local, proj_base, elem, context, location);
249 fn visit_place(&mut self, place: &Place<'tcx>, _: PlaceContext, _: Location) {
250 // Set off any `bug!`s in the type computation code
251 let _ = place.ty(&self.body.local_decls, self.tcx);
254 fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
255 macro_rules! check_kinds {
256 ($t:expr, $text:literal, $($patterns:tt)*) => {
257 if !matches!(($t).kind(), $($patterns)*) {
258 self.fail(location, format!($text, $t));
264 Rvalue::Aggregate(agg_kind, _) => {
265 let disallowed = match **agg_kind {
266 AggregateKind::Array(..) => false,
267 AggregateKind::Generator(..) => self.mir_phase >= MirPhase::GeneratorsLowered,
268 _ => self.mir_phase >= MirPhase::Deaggregated,
273 format!("{:?} have been lowered to field assignments", rvalue),
277 Rvalue::Ref(_, BorrowKind::Shallow, _) => {
278 if self.mir_phase >= MirPhase::DropsLowered {
281 "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
286 let pty = p.ty(&self.body.local_decls, self.tcx).ty;
289 "Cannot compute length of non-array type {:?}",
290 ty::Array(..) | ty::Slice(..)
293 Rvalue::BinaryOp(op, vals) | Rvalue::CheckedBinaryOp(op, vals) => {
295 let a = vals.0.ty(&self.body.local_decls, self.tcx);
296 let b = vals.1.ty(&self.body.local_decls, self.tcx);
299 check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
300 if b != self.tcx.types.isize && b != self.tcx.types.usize {
301 self.fail(location, format!("Cannot offset by non-isize type {:?}", b));
304 Eq | Lt | Le | Ne | Ge | Gt => {
308 "Cannot compare type {:?}",
318 // None of the possible types have lifetimes, so we can just compare
323 format!("Cannot compare unequal types {:?} and {:?}", a, b),
331 "Cannot shift non-integer type {:?}",
332 ty::Uint(..) | ty::Int(..)
336 BitAnd | BitOr | BitXor => {
340 "Cannot perform bitwise op on type {:?}",
341 ty::Uint(..) | ty::Int(..) | ty::Bool
348 "Cannot perform bitwise op on unequal types {:?} and {:?}",
354 Add | Sub | Mul | Div | Rem => {
358 "Cannot perform op on type {:?}",
359 ty::Uint(..) | ty::Int(..) | ty::Float(..)
365 format!("Cannot perform op on unequal types {:?} and {:?}", a, b),
371 Rvalue::UnaryOp(op, operand) => {
372 let a = operand.ty(&self.body.local_decls, self.tcx);
375 check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
380 "Cannot binary not type {:?}",
381 ty::Int(..) | ty::Uint(..) | ty::Bool
386 Rvalue::ShallowInitBox(operand, _) => {
387 let a = operand.ty(&self.body.local_decls, self.tcx);
388 check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
392 self.super_rvalue(rvalue, location);
395 fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
396 match &statement.kind {
397 StatementKind::Assign(box (dest, rvalue)) => {
398 // LHS and RHS of the assignment must have the same type.
399 let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
400 let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
401 if !self.mir_assign_valid_types(right_ty, left_ty) {
405 "encountered `{:?}` with incompatible types:\n\
406 left-hand side has type: {}\n\
407 right-hand side has type: {}",
408 statement.kind, left_ty, right_ty,
412 // FIXME(JakobDegen): Check this for all rvalues, not just this one.
413 if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue {
414 // The sides of an assignment must not alias. Currently this just checks whether
415 // the places are identical.
419 "encountered `Assign` statement with overlapping memory",
424 StatementKind::AscribeUserType(..) => {
425 if self.mir_phase >= MirPhase::DropsLowered {
428 "`AscribeUserType` should have been removed after drop lowering phase",
432 StatementKind::FakeRead(..) => {
433 if self.mir_phase >= MirPhase::DropsLowered {
436 "`FakeRead` should have been removed after drop lowering phase",
440 StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
445 let src_ty = src.ty(&self.body.local_decls, self.tcx);
446 let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
451 format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
455 let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
456 let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
461 format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
465 // since CopyNonOverlapping is parametrized by 1 type,
466 // we only need to check that they are equal and not keep an extra parameter.
467 if op_src_ty != op_dst_ty {
468 self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
471 let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
472 if op_cnt_ty != self.tcx.types.usize {
473 self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
476 StatementKind::SetDiscriminant { place, .. } => {
477 if self.mir_phase < MirPhase::Deaggregated {
478 self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
480 let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
481 if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Opaque(..)) {
485 "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}",
491 StatementKind::Deinit(..) => {
492 if self.mir_phase < MirPhase::Deaggregated {
493 self.fail(location, "`Deinit`is not allowed until deaggregation");
496 StatementKind::Retag(_, _) => {
497 // FIXME(JakobDegen) The validator should check that `self.mir_phase <
498 // DropsLowered`. However, this causes ICEs with generation of drop shims, which
499 // seem to fail to set their `MirPhase` correctly.
501 StatementKind::StorageLive(..)
502 | StatementKind::StorageDead(..)
503 | StatementKind::Coverage(_)
504 | StatementKind::Nop => {}
507 self.super_statement(statement, location);
510 fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
511 match &terminator.kind {
512 TerminatorKind::Goto { target } => {
513 self.check_edge(location, *target, EdgeKind::Normal);
515 TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
516 let ty = discr.ty(&self.body.local_decls, self.tcx);
517 if ty != *switch_ty {
521 "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
527 let target_width = self.tcx.sess.target.pointer_width;
529 let size = Size::from_bits(match switch_ty.kind() {
530 ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
531 ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
534 other => bug!("unhandled type: {:?}", other),
537 for (value, target) in targets.iter() {
538 if Scalar::<()>::try_from_uint(value, size).is_none() {
541 format!("the value {:#x} is not a proper {:?}", value, switch_ty),
545 self.check_edge(location, target, EdgeKind::Normal);
547 self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
549 self.value_cache.clear();
550 self.value_cache.extend(targets.iter().map(|(value, _)| value));
551 let all_len = self.value_cache.len();
552 self.value_cache.sort_unstable();
553 self.value_cache.dedup();
554 let has_duplicates = all_len != self.value_cache.len();
559 "duplicated values in `SwitchInt` terminator: {:?}",
565 TerminatorKind::Drop { target, unwind, .. } => {
566 self.check_edge(location, *target, EdgeKind::Normal);
567 if let Some(unwind) = unwind {
568 self.check_edge(location, *unwind, EdgeKind::Unwind);
571 TerminatorKind::DropAndReplace { target, unwind, .. } => {
572 if self.mir_phase >= MirPhase::DropsLowered {
575 "`DropAndReplace` should have been removed during drop elaboration",
578 self.check_edge(location, *target, EdgeKind::Normal);
579 if let Some(unwind) = unwind {
580 self.check_edge(location, *unwind, EdgeKind::Unwind);
583 TerminatorKind::Call { func, args, destination, cleanup, .. } => {
584 let func_ty = func.ty(&self.body.local_decls, self.tcx);
585 match func_ty.kind() {
586 ty::FnPtr(..) | ty::FnDef(..) => {}
589 format!("encountered non-callable type {} in `Call` terminator", func_ty),
592 if let Some((_, target)) = destination {
593 self.check_edge(location, *target, EdgeKind::Normal);
595 if let Some(cleanup) = cleanup {
596 self.check_edge(location, *cleanup, EdgeKind::Unwind);
599 // The call destination place and Operand::Move place used as an argument might be
600 // passed by a reference to the callee. Consequently they must be non-overlapping.
601 // Currently this simply checks for duplicate places.
602 self.place_cache.clear();
603 if let Some((destination, _)) = destination {
604 self.place_cache.push(destination.as_ref());
607 if let Operand::Move(place) = arg {
608 self.place_cache.push(place.as_ref());
611 let all_len = self.place_cache.len();
612 self.place_cache.sort_unstable();
613 self.place_cache.dedup();
614 let has_duplicates = all_len != self.place_cache.len();
619 "encountered overlapping memory in `Call` terminator: {:?}",
625 TerminatorKind::Assert { cond, target, cleanup, .. } => {
626 let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
627 if cond_ty != self.tcx.types.bool {
631 "encountered non-boolean condition of type {} in `Assert` terminator",
636 self.check_edge(location, *target, EdgeKind::Normal);
637 if let Some(cleanup) = cleanup {
638 self.check_edge(location, *cleanup, EdgeKind::Unwind);
641 TerminatorKind::Yield { resume, drop, .. } => {
642 if self.body.generator.is_none() {
643 self.fail(location, "`Yield` cannot appear outside generator bodies");
645 if self.mir_phase >= MirPhase::GeneratorsLowered {
646 self.fail(location, "`Yield` should have been replaced by generator lowering");
648 self.check_edge(location, *resume, EdgeKind::Normal);
649 if let Some(drop) = drop {
650 self.check_edge(location, *drop, EdgeKind::Normal);
653 TerminatorKind::FalseEdge { real_target, imaginary_target } => {
654 if self.mir_phase >= MirPhase::DropsLowered {
657 "`FalseEdge` should have been removed after drop elaboration",
660 self.check_edge(location, *real_target, EdgeKind::Normal);
661 self.check_edge(location, *imaginary_target, EdgeKind::Normal);
663 TerminatorKind::FalseUnwind { real_target, unwind } => {
664 if self.mir_phase >= MirPhase::DropsLowered {
667 "`FalseUnwind` should have been removed after drop elaboration",
670 self.check_edge(location, *real_target, EdgeKind::Normal);
671 if let Some(unwind) = unwind {
672 self.check_edge(location, *unwind, EdgeKind::Unwind);
675 TerminatorKind::InlineAsm { destination, cleanup, .. } => {
676 if let Some(destination) = destination {
677 self.check_edge(location, *destination, EdgeKind::Normal);
679 if let Some(cleanup) = cleanup {
680 self.check_edge(location, *cleanup, EdgeKind::Unwind);
683 TerminatorKind::GeneratorDrop => {
684 if self.body.generator.is_none() {
685 self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
687 if self.mir_phase >= MirPhase::GeneratorsLowered {
690 "`GeneratorDrop` should have been replaced by generator lowering",
694 TerminatorKind::Resume | TerminatorKind::Abort => {
695 let bb = location.block;
696 if !self.body.basic_blocks()[bb].is_cleanup {
697 self.fail(location, "Cannot `Resume` or `Abort` from non-cleanup basic block")
700 TerminatorKind::Return => {
701 let bb = location.block;
702 if self.body.basic_blocks()[bb].is_cleanup {
703 self.fail(location, "Cannot `Return` from cleanup basic block")
706 TerminatorKind::Unreachable => {}
709 self.super_terminator(terminator, location);
712 fn visit_source_scope(&mut self, scope: &SourceScope) {
713 if self.body.source_scopes.get(*scope).is_none() {
714 self.tcx.sess.diagnostic().delay_span_bug(
717 "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
718 self.body.source.instance, self.when, scope,