1 //! Validates the MIR to ensure that invariants are upheld.
3 use rustc_data_structures::fx::FxHashSet;
4 use rustc_index::bit_set::BitSet;
5 use rustc_infer::infer::{DefiningAnchor, TyCtxtInferExt};
6 use rustc_infer::traits::ObligationCause;
7 use rustc_middle::mir::interpret::Scalar;
8 use rustc_middle::mir::visit::NonUseContext::VarDebugInfo;
9 use rustc_middle::mir::visit::{PlaceContext, Visitor};
10 use rustc_middle::mir::{
11 traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, CastKind, CopyNonOverlapping,
12 Local, Location, MirPass, MirPhase, NonDivergingIntrinsic, Operand, Place, PlaceElem, PlaceRef,
13 ProjectionElem, RuntimePhase, Rvalue, SourceScope, Statement, StatementKind, Terminator,
14 TerminatorKind, UnOp, START_BLOCK,
16 use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeVisitable};
17 use rustc_mir_dataflow::impls::MaybeStorageLive;
18 use rustc_mir_dataflow::storage::always_storage_live_locals;
19 use rustc_mir_dataflow::{Analysis, ResultsCursor};
20 use rustc_target::abi::{Size, VariantIdx};
21 use rustc_trait_selection::traits::ObligationCtxt;
23 #[derive(Copy, Clone, Debug)]
29 pub struct Validator {
30 /// Describes at which point in the pipeline this validation is happening.
32 /// The phase for which we are upholding the dialect. If the given phase forbids a specific
33 /// element, this validator will now emit errors if that specific element is encountered.
34 /// Note that phases that change the dialect cause all *following* phases to check the
35 /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
37 pub mir_phase: MirPhase,
40 impl<'tcx> MirPass<'tcx> for Validator {
41 fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
42 // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
43 // terribly important that they pass the validator. However, I think other passes might
44 // still see them, in which case they might be surprised. It would probably be better if we
45 // didn't put this through the MIR pipeline at all.
46 if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
49 let def_id = body.source.def_id();
50 let param_env = tcx.param_env(def_id);
51 let mir_phase = self.mir_phase;
53 let always_live_locals = always_storage_live_locals(body);
54 let storage_liveness = MaybeStorageLive::new(always_live_locals)
55 .into_engine(tcx, body)
56 .iterate_to_fixpoint()
57 .into_results_cursor(body);
65 reachable_blocks: traversal::reachable_as_bitset(body),
67 place_cache: Vec::new(),
68 value_cache: Vec::new(),
74 /// Returns whether the two types are equal up to subtyping.
76 /// This is used in case we don't know the expected subtyping direction
77 /// and still want to check whether anything is broken.
78 pub fn is_equal_up_to_subtyping<'tcx>(
80 param_env: ParamEnv<'tcx>,
89 // Check for subtyping in either direction.
90 is_subtype(tcx, param_env, src, dest) || is_subtype(tcx, param_env, dest, src)
93 pub fn is_subtype<'tcx>(
95 param_env: ParamEnv<'tcx>,
104 tcx.infer_ctxt().ignoring_regions().with_opaque_type_inference(DefiningAnchor::Bubble);
105 let infcx = builder.build();
106 let ocx = ObligationCtxt::new(&infcx);
107 let cause = ObligationCause::dummy();
108 let src = ocx.normalize(cause.clone(), param_env, src);
109 let dest = ocx.normalize(cause.clone(), param_env, dest);
110 let Ok(infer_ok) = infcx.at(&cause, param_env).sub(src, dest) else {
113 let () = ocx.register_infer_ok_obligations(infer_ok);
114 let errors = ocx.select_all_or_error();
115 // With `Reveal::All`, opaque types get normalized away, with `Reveal::UserFacing`
116 // we would get unification errors because we're unable to look into opaque types,
117 // even if they're constrained in our current function.
119 // It seems very unlikely that this hides any bugs.
120 let _ = infcx.inner.borrow_mut().opaque_type_storage.take_opaque_types();
123 struct TypeChecker<'a, 'tcx> {
125 body: &'a Body<'tcx>,
127 param_env: ParamEnv<'tcx>,
129 reachable_blocks: BitSet<BasicBlock>,
130 storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
131 place_cache: Vec<PlaceRef<'tcx>>,
132 value_cache: Vec<u128>,
135 impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
136 fn fail(&self, location: Location, msg: impl AsRef<str>) {
137 let span = self.body.source_info(location).span;
138 // We use `delay_span_bug` as we might see broken MIR when other errors have already
140 self.tcx.sess.diagnostic().delay_span_bug(
143 "broken MIR in {:?} ({}) at {:?}:\n{}",
144 self.body.source.instance,
152 fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
153 if bb == START_BLOCK {
154 self.fail(location, "start block must not have predecessors")
156 if let Some(bb) = self.body.basic_blocks.get(bb) {
157 let src = self.body.basic_blocks.get(location.block).unwrap();
158 match (src.is_cleanup, bb.is_cleanup, edge_kind) {
159 // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
160 (false, false, EdgeKind::Normal)
161 // Non-cleanup blocks can jump to cleanup blocks along unwind edges
162 | (false, true, EdgeKind::Unwind)
163 // Cleanup blocks can jump to cleanup blocks along non-unwind edges
164 | (true, true, EdgeKind::Normal) => {}
165 // All other jumps are invalid
170 "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
180 self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
184 /// Check if src can be assigned into dest.
185 /// This is not precise, it will accept some incorrect assignments.
186 fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
187 // Fast path before we normalize.
189 // Equal types, all is good.
192 // Normalization reveals opaque types, but we may be validating MIR while computing
193 // said opaque types, causing cycles.
194 if (src, dest).has_opaque_types() {
198 is_subtype(self.tcx, self.param_env, src, dest)
202 impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
203 fn visit_local(&mut self, local: Local, context: PlaceContext, location: Location) {
204 if self.body.local_decls.get(local).is_none() {
207 format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
211 if self.reachable_blocks.contains(location.block) && context.is_use() {
212 // We check that the local is live whenever it is used. Technically, violating this
213 // restriction is only UB and not actually indicative of not well-formed MIR. This means
214 // that an optimization which turns MIR that already has UB into MIR that fails this
215 // check is not necessarily wrong. However, we have no such optimizations at the moment,
216 // and so we include this check anyway to help us catch bugs. If you happen to write an
217 // optimization that might cause this to incorrectly fire, feel free to remove this
219 self.storage_liveness.seek_after_primary_effect(location);
220 let locals_with_storage = self.storage_liveness.get();
221 if !locals_with_storage.contains(local) {
222 self.fail(location, format!("use of local {:?}, which has no storage here", local));
227 fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
228 // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
229 if self.tcx.sess.opts.unstable_opts.validate_mir
230 && self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial)
232 // `Operand::Copy` is only supposed to be used with `Copy` types.
233 if let Operand::Copy(place) = operand {
234 let ty = place.ty(&self.body.local_decls, self.tcx).ty;
236 if !ty.is_copy_modulo_regions(self.tcx, self.param_env) {
237 self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
242 self.super_operand(operand, location);
245 fn visit_projection_elem(
248 proj_base: &[PlaceElem<'tcx>],
249 elem: PlaceElem<'tcx>,
250 context: PlaceContext,
254 ProjectionElem::Index(index) => {
255 let index_ty = self.body.local_decls[index].ty;
256 if index_ty != self.tcx.types.usize {
257 self.fail(location, format!("bad index ({:?} != usize)", index_ty))
260 ProjectionElem::Deref
261 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup) =>
263 let base_ty = Place::ty_from(local, proj_base, &self.body.local_decls, self.tcx).ty;
265 if base_ty.is_box() {
268 format!("{:?} dereferenced after ElaborateBoxDerefs", base_ty),
272 ProjectionElem::Field(f, ty) => {
273 let parent = Place { local, projection: self.tcx.intern_place_elems(proj_base) };
274 let parent_ty = parent.ty(&self.body.local_decls, self.tcx);
275 let fail_out_of_bounds = |this: &Self, location| {
276 this.fail(location, format!("Out of bounds field {:?} for {:?}", f, parent_ty));
278 let check_equal = |this: &Self, location, f_ty| {
279 if !this.mir_assign_valid_types(ty, f_ty) {
283 "Field projection `{:?}.{:?}` specified type `{:?}`, but actual type is `{:?}`",
290 let kind = match parent_ty.ty.kind() {
291 &ty::Opaque(def_id, substs) => {
292 self.tcx.bound_type_of(def_id).subst(self.tcx, substs).kind()
298 ty::Tuple(fields) => {
299 let Some(f_ty) = fields.get(f.as_usize()) else {
300 fail_out_of_bounds(self, location);
303 check_equal(self, location, *f_ty);
305 ty::Adt(adt_def, substs) => {
306 let var = parent_ty.variant_index.unwrap_or(VariantIdx::from_u32(0));
307 let Some(field) = adt_def.variant(var).fields.get(f.as_usize()) else {
308 fail_out_of_bounds(self, location);
311 check_equal(self, location, field.ty(self.tcx, substs));
313 ty::Closure(_, substs) => {
314 let substs = substs.as_closure();
315 let Some(f_ty) = substs.upvar_tys().nth(f.as_usize()) else {
316 fail_out_of_bounds(self, location);
319 check_equal(self, location, f_ty);
321 &ty::Generator(def_id, substs, _) => {
322 let f_ty = if let Some(var) = parent_ty.variant_index {
323 let gen_body = if def_id == self.body.source.def_id() {
326 self.tcx.optimized_mir(def_id)
329 let Some(layout) = gen_body.generator_layout() else {
330 self.fail(location, format!("No generator layout for {:?}", parent_ty));
334 let Some(&local) = layout.variant_fields[var].get(f) else {
335 fail_out_of_bounds(self, location);
339 let Some(&f_ty) = layout.field_tys.get(local) else {
340 self.fail(location, format!("Out of bounds local {:?} for {:?}", local, parent_ty));
346 let Some(f_ty) = substs.as_generator().prefix_tys().nth(f.index()) else {
347 fail_out_of_bounds(self, location);
354 check_equal(self, location, f_ty);
357 self.fail(location, format!("{:?} does not have fields", parent_ty.ty));
363 self.super_projection_elem(local, proj_base, elem, context, location);
366 fn visit_place(&mut self, place: &Place<'tcx>, cntxt: PlaceContext, location: Location) {
367 // Set off any `bug!`s in the type computation code
368 let _ = place.ty(&self.body.local_decls, self.tcx);
370 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial)
371 && place.projection.len() > 1
372 && cntxt != PlaceContext::NonUse(VarDebugInfo)
373 && place.projection[1..].contains(&ProjectionElem::Deref)
375 self.fail(location, format!("{:?}, has deref at the wrong place", place));
378 self.super_place(place, cntxt, location);
381 fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
382 macro_rules! check_kinds {
383 ($t:expr, $text:literal, $($patterns:tt)*) => {
384 if !matches!(($t).kind(), $($patterns)*) {
385 self.fail(location, format!($text, $t));
390 Rvalue::Use(_) | Rvalue::CopyForDeref(_) => {}
391 Rvalue::Aggregate(agg_kind, _) => {
392 let disallowed = match **agg_kind {
393 AggregateKind::Array(..) => false,
394 _ => self.mir_phase >= MirPhase::Runtime(RuntimePhase::PostCleanup),
399 format!("{:?} have been lowered to field assignments", rvalue),
403 Rvalue::Ref(_, BorrowKind::Shallow, _) => {
404 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
407 "`Assign` statement with a `Shallow` borrow should have been removed in runtime MIR",
411 Rvalue::Ref(..) => {}
413 let pty = p.ty(&self.body.local_decls, self.tcx).ty;
416 "Cannot compute length of non-array type {:?}",
417 ty::Array(..) | ty::Slice(..)
420 Rvalue::BinaryOp(op, vals) => {
422 let a = vals.0.ty(&self.body.local_decls, self.tcx);
423 let b = vals.1.ty(&self.body.local_decls, self.tcx);
426 check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
427 if b != self.tcx.types.isize && b != self.tcx.types.usize {
428 self.fail(location, format!("Cannot offset by non-isize type {:?}", b));
431 Eq | Lt | Le | Ne | Ge | Gt => {
435 "Cannot compare type {:?}",
445 // The function pointer types can have lifetimes
446 if !self.mir_assign_valid_types(a, b) {
449 format!("Cannot compare unequal types {:?} and {:?}", a, b),
457 "Cannot shift non-integer type {:?}",
458 ty::Uint(..) | ty::Int(..)
462 BitAnd | BitOr | BitXor => {
466 "Cannot perform bitwise op on type {:?}",
467 ty::Uint(..) | ty::Int(..) | ty::Bool
474 "Cannot perform bitwise op on unequal types {:?} and {:?}",
480 Add | Sub | Mul | Div | Rem => {
484 "Cannot perform arithmetic on type {:?}",
485 ty::Uint(..) | ty::Int(..) | ty::Float(..)
492 "Cannot perform arithmetic on unequal types {:?} and {:?}",
500 Rvalue::CheckedBinaryOp(op, vals) => {
502 let a = vals.0.ty(&self.body.local_decls, self.tcx);
503 let b = vals.1.ty(&self.body.local_decls, self.tcx);
509 "Cannot perform checked arithmetic on type {:?}",
510 ty::Uint(..) | ty::Int(..)
517 "Cannot perform checked arithmetic on unequal types {:?} and {:?}",
527 "Cannot perform checked shift on non-integer type {:?}",
528 ty::Uint(..) | ty::Int(..)
532 _ => self.fail(location, format!("There is no checked version of {:?}", op)),
535 Rvalue::UnaryOp(op, operand) => {
536 let a = operand.ty(&self.body.local_decls, self.tcx);
539 check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
544 "Cannot binary not type {:?}",
545 ty::Int(..) | ty::Uint(..) | ty::Bool
550 Rvalue::ShallowInitBox(operand, _) => {
551 let a = operand.ty(&self.body.local_decls, self.tcx);
552 check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
554 Rvalue::Cast(kind, operand, target_type) => {
555 let op_ty = operand.ty(self.body, self.tcx);
557 CastKind::DynStar => {
558 // FIXME(dyn-star): make sure nothing needs to be done here.
560 // FIXME: Add Checks for these
561 CastKind::PointerFromExposedAddress
562 | CastKind::PointerExposeAddress
563 | CastKind::Pointer(_) => {}
564 CastKind::IntToInt | CastKind::IntToFloat => {
565 let input_valid = op_ty.is_integral() || op_ty.is_char() || op_ty.is_bool();
566 let target_valid = target_type.is_numeric() || target_type.is_char();
567 if !input_valid || !target_valid {
570 format!("Wrong cast kind {kind:?} for the type {op_ty}",),
574 CastKind::FnPtrToPtr | CastKind::PtrToPtr => {
575 if !(op_ty.is_any_ptr() && target_type.is_unsafe_ptr()) {
576 self.fail(location, "Can't cast {op_ty} into 'Ptr'");
579 CastKind::FloatToFloat | CastKind::FloatToInt => {
580 if !op_ty.is_floating_point() || !target_type.is_numeric() {
584 "Trying to cast non 'Float' as {kind:?} into {target_type:?}"
592 | Rvalue::ThreadLocalRef(_)
593 | Rvalue::AddressOf(_, _)
594 | Rvalue::NullaryOp(_, _)
595 | Rvalue::Discriminant(_) => {}
597 self.super_rvalue(rvalue, location);
600 fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
601 match &statement.kind {
602 StatementKind::Assign(box (dest, rvalue)) => {
603 // LHS and RHS of the assignment must have the same type.
604 let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
605 let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
606 if !self.mir_assign_valid_types(right_ty, left_ty) {
610 "encountered `{:?}` with incompatible types:\n\
611 left-hand side has type: {}\n\
612 right-hand side has type: {}",
613 statement.kind, left_ty, right_ty,
617 if let Rvalue::CopyForDeref(place) = rvalue {
618 if !place.ty(&self.body.local_decls, self.tcx).ty.builtin_deref(true).is_some()
622 "`CopyForDeref` should only be used for dereferenceable types",
626 // FIXME(JakobDegen): Check this for all rvalues, not just this one.
627 if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue {
628 // The sides of an assignment must not alias. Currently this just checks whether
629 // the places are identical.
633 "encountered `Assign` statement with overlapping memory",
638 StatementKind::AscribeUserType(..) => {
639 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
642 "`AscribeUserType` should have been removed after drop lowering phase",
646 StatementKind::FakeRead(..) => {
647 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
650 "`FakeRead` should have been removed after drop lowering phase",
654 StatementKind::Intrinsic(box NonDivergingIntrinsic::Assume(op)) => {
655 let ty = op.ty(&self.body.local_decls, self.tcx);
659 format!("`assume` argument must be `bool`, but got: `{}`", ty),
663 StatementKind::Intrinsic(box NonDivergingIntrinsic::CopyNonOverlapping(
664 CopyNonOverlapping { src, dst, count },
666 let src_ty = src.ty(&self.body.local_decls, self.tcx);
667 let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
672 format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
676 let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
677 let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
682 format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
686 // since CopyNonOverlapping is parametrized by 1 type,
687 // we only need to check that they are equal and not keep an extra parameter.
688 if !self.mir_assign_valid_types(op_src_ty, op_dst_ty) {
689 self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
692 let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
693 if op_cnt_ty != self.tcx.types.usize {
694 self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
697 StatementKind::SetDiscriminant { place, .. } => {
698 if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
699 self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
701 let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
702 if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Opaque(..)) {
706 "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}",
712 StatementKind::Deinit(..) => {
713 if self.mir_phase < MirPhase::Runtime(RuntimePhase::Initial) {
714 self.fail(location, "`Deinit`is not allowed until deaggregation");
717 StatementKind::Retag(_, _) => {
718 // FIXME(JakobDegen) The validator should check that `self.mir_phase <
719 // DropsLowered`. However, this causes ICEs with generation of drop shims, which
720 // seem to fail to set their `MirPhase` correctly.
722 StatementKind::StorageLive(..)
723 | StatementKind::StorageDead(..)
724 | StatementKind::Coverage(_)
725 | StatementKind::Nop => {}
728 self.super_statement(statement, location);
731 fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
732 match &terminator.kind {
733 TerminatorKind::Goto { target } => {
734 self.check_edge(location, *target, EdgeKind::Normal);
736 TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
737 let ty = discr.ty(&self.body.local_decls, self.tcx);
738 if ty != *switch_ty {
742 "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
748 let target_width = self.tcx.sess.target.pointer_width;
750 let size = Size::from_bits(match switch_ty.kind() {
751 ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
752 ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
755 other => bug!("unhandled type: {:?}", other),
758 for (value, target) in targets.iter() {
759 if Scalar::<()>::try_from_uint(value, size).is_none() {
762 format!("the value {:#x} is not a proper {:?}", value, switch_ty),
766 self.check_edge(location, target, EdgeKind::Normal);
768 self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
770 self.value_cache.clear();
771 self.value_cache.extend(targets.iter().map(|(value, _)| value));
772 let all_len = self.value_cache.len();
773 self.value_cache.sort_unstable();
774 self.value_cache.dedup();
775 let has_duplicates = all_len != self.value_cache.len();
780 "duplicated values in `SwitchInt` terminator: {:?}",
786 TerminatorKind::Drop { target, unwind, .. } => {
787 self.check_edge(location, *target, EdgeKind::Normal);
788 if let Some(unwind) = unwind {
789 self.check_edge(location, *unwind, EdgeKind::Unwind);
792 TerminatorKind::DropAndReplace { target, unwind, .. } => {
793 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
796 "`DropAndReplace` should have been removed during drop elaboration",
799 self.check_edge(location, *target, EdgeKind::Normal);
800 if let Some(unwind) = unwind {
801 self.check_edge(location, *unwind, EdgeKind::Unwind);
804 TerminatorKind::Call { func, args, destination, target, cleanup, .. } => {
805 let func_ty = func.ty(&self.body.local_decls, self.tcx);
806 match func_ty.kind() {
807 ty::FnPtr(..) | ty::FnDef(..) => {}
810 format!("encountered non-callable type {} in `Call` terminator", func_ty),
813 if let Some(target) = target {
814 self.check_edge(location, *target, EdgeKind::Normal);
816 if let Some(cleanup) = cleanup {
817 self.check_edge(location, *cleanup, EdgeKind::Unwind);
820 // The call destination place and Operand::Move place used as an argument might be
821 // passed by a reference to the callee. Consequently they must be non-overlapping.
822 // Currently this simply checks for duplicate places.
823 self.place_cache.clear();
824 self.place_cache.push(destination.as_ref());
826 if let Operand::Move(place) = arg {
827 self.place_cache.push(place.as_ref());
830 let all_len = self.place_cache.len();
831 let mut dedup = FxHashSet::default();
832 self.place_cache.retain(|p| dedup.insert(*p));
833 let has_duplicates = all_len != self.place_cache.len();
838 "encountered overlapping memory in `Call` terminator: {:?}",
844 TerminatorKind::Assert { cond, target, cleanup, .. } => {
845 let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
846 if cond_ty != self.tcx.types.bool {
850 "encountered non-boolean condition of type {} in `Assert` terminator",
855 self.check_edge(location, *target, EdgeKind::Normal);
856 if let Some(cleanup) = cleanup {
857 self.check_edge(location, *cleanup, EdgeKind::Unwind);
860 TerminatorKind::Yield { resume, drop, .. } => {
861 if self.body.generator.is_none() {
862 self.fail(location, "`Yield` cannot appear outside generator bodies");
864 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
865 self.fail(location, "`Yield` should have been replaced by generator lowering");
867 self.check_edge(location, *resume, EdgeKind::Normal);
868 if let Some(drop) = drop {
869 self.check_edge(location, *drop, EdgeKind::Normal);
872 TerminatorKind::FalseEdge { real_target, imaginary_target } => {
873 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
876 "`FalseEdge` should have been removed after drop elaboration",
879 self.check_edge(location, *real_target, EdgeKind::Normal);
880 self.check_edge(location, *imaginary_target, EdgeKind::Normal);
882 TerminatorKind::FalseUnwind { real_target, unwind } => {
883 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
886 "`FalseUnwind` should have been removed after drop elaboration",
889 self.check_edge(location, *real_target, EdgeKind::Normal);
890 if let Some(unwind) = unwind {
891 self.check_edge(location, *unwind, EdgeKind::Unwind);
894 TerminatorKind::InlineAsm { destination, cleanup, .. } => {
895 if let Some(destination) = destination {
896 self.check_edge(location, *destination, EdgeKind::Normal);
898 if let Some(cleanup) = cleanup {
899 self.check_edge(location, *cleanup, EdgeKind::Unwind);
902 TerminatorKind::GeneratorDrop => {
903 if self.body.generator.is_none() {
904 self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
906 if self.mir_phase >= MirPhase::Runtime(RuntimePhase::Initial) {
909 "`GeneratorDrop` should have been replaced by generator lowering",
913 TerminatorKind::Resume | TerminatorKind::Abort => {
914 let bb = location.block;
915 if !self.body.basic_blocks[bb].is_cleanup {
916 self.fail(location, "Cannot `Resume` or `Abort` from non-cleanup basic block")
919 TerminatorKind::Return => {
920 let bb = location.block;
921 if self.body.basic_blocks[bb].is_cleanup {
922 self.fail(location, "Cannot `Return` from cleanup basic block")
925 TerminatorKind::Unreachable => {}
928 self.super_terminator(terminator, location);
931 fn visit_source_scope(&mut self, scope: SourceScope) {
932 if self.body.source_scopes.get(scope).is_none() {
933 self.tcx.sess.diagnostic().delay_span_bug(
936 "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
937 self.body.source.instance, self.when, scope,