]> git.lizzy.rs Git - rust.git/blob - compiler/rustc_const_eval/src/transform/validate.rs
Rollup merge of #95405 - cjgillot:probe, r=petrochenkov
[rust.git] / compiler / rustc_const_eval / src / transform / validate.rs
1 //! Validates the MIR to ensure that invariants are upheld.
2
3 use rustc_index::bit_set::BitSet;
4 use rustc_infer::infer::TyCtxtInferExt;
5 use rustc_middle::mir::interpret::Scalar;
6 use rustc_middle::mir::visit::{PlaceContext, Visitor};
7 use rustc_middle::mir::{
8     traversal, AggregateKind, BasicBlock, BinOp, Body, BorrowKind, Local, Location, MirPass,
9     MirPhase, Operand, Place, PlaceElem, PlaceRef, ProjectionElem, Rvalue, SourceScope, Statement,
10     StatementKind, Terminator, TerminatorKind, UnOp, START_BLOCK,
11 };
12 use rustc_middle::ty::fold::BottomUpFolder;
13 use rustc_middle::ty::{self, InstanceDef, ParamEnv, Ty, TyCtxt, TypeFoldable};
14 use rustc_mir_dataflow::impls::MaybeStorageLive;
15 use rustc_mir_dataflow::storage::AlwaysLiveLocals;
16 use rustc_mir_dataflow::{Analysis, ResultsCursor};
17 use rustc_target::abi::Size;
18
19 #[derive(Copy, Clone, Debug)]
20 enum EdgeKind {
21     Unwind,
22     Normal,
23 }
24
25 pub struct Validator {
26     /// Describes at which point in the pipeline this validation is happening.
27     pub when: String,
28     /// The phase for which we are upholding the dialect. If the given phase forbids a specific
29     /// element, this validator will now emit errors if that specific element is encountered.
30     /// Note that phases that change the dialect cause all *following* phases to check the
31     /// invariants of the new dialect. A phase that changes dialects never checks the new invariants
32     /// itself.
33     pub mir_phase: MirPhase,
34 }
35
36 impl<'tcx> MirPass<'tcx> for Validator {
37     fn run_pass(&self, tcx: TyCtxt<'tcx>, body: &mut Body<'tcx>) {
38         // FIXME(JakobDegen): These bodies never instantiated in codegend anyway, so it's not
39         // terribly important that they pass the validator. However, I think other passes might
40         // still see them, in which case they might be surprised. It would probably be better if we
41         // didn't put this through the MIR pipeline at all.
42         if matches!(body.source.instance, InstanceDef::Intrinsic(..) | InstanceDef::Virtual(..)) {
43             return;
44         }
45         let def_id = body.source.def_id();
46         let param_env = tcx.param_env(def_id);
47         let mir_phase = self.mir_phase;
48
49         let always_live_locals = AlwaysLiveLocals::new(body);
50         let storage_liveness = MaybeStorageLive::new(always_live_locals)
51             .into_engine(tcx, body)
52             .iterate_to_fixpoint()
53             .into_results_cursor(body);
54
55         TypeChecker {
56             when: &self.when,
57             body,
58             tcx,
59             param_env,
60             mir_phase,
61             reachable_blocks: traversal::reachable_as_bitset(body),
62             storage_liveness,
63             place_cache: Vec::new(),
64             value_cache: Vec::new(),
65         }
66         .visit_body(body);
67     }
68 }
69
70 /// Returns whether the two types are equal up to lifetimes.
71 /// All lifetimes, including higher-ranked ones, get ignored for this comparison.
72 /// (This is unlike the `erasing_regions` methods, which keep higher-ranked lifetimes for soundness reasons.)
73 ///
74 /// The point of this function is to approximate "equal up to subtyping".  However,
75 /// the approximation is incorrect as variance is ignored.
76 pub fn equal_up_to_regions<'tcx>(
77     tcx: TyCtxt<'tcx>,
78     param_env: ParamEnv<'tcx>,
79     src: Ty<'tcx>,
80     dest: Ty<'tcx>,
81 ) -> bool {
82     // Fast path.
83     if src == dest {
84         return true;
85     }
86
87     // Normalize lifetimes away on both sides, then compare.
88     let normalize = |ty: Ty<'tcx>| {
89         tcx.normalize_erasing_regions(
90             param_env,
91             ty.fold_with(&mut BottomUpFolder {
92                 tcx,
93                 // FIXME: We erase all late-bound lifetimes, but this is not fully correct.
94                 // If you have a type like `<for<'a> fn(&'a u32) as SomeTrait>::Assoc`,
95                 // this is not necessarily equivalent to `<fn(&'static u32) as SomeTrait>::Assoc`,
96                 // since one may have an `impl SomeTrait for fn(&32)` and
97                 // `impl SomeTrait for fn(&'static u32)` at the same time which
98                 // specify distinct values for Assoc. (See also #56105)
99                 lt_op: |_| tcx.lifetimes.re_erased,
100                 // Leave consts and types unchanged.
101                 ct_op: |ct| ct,
102                 ty_op: |ty| ty,
103             }),
104         )
105     };
106     tcx.infer_ctxt().enter(|infcx| infcx.can_eq(param_env, normalize(src), normalize(dest)).is_ok())
107 }
108
109 struct TypeChecker<'a, 'tcx> {
110     when: &'a str,
111     body: &'a Body<'tcx>,
112     tcx: TyCtxt<'tcx>,
113     param_env: ParamEnv<'tcx>,
114     mir_phase: MirPhase,
115     reachable_blocks: BitSet<BasicBlock>,
116     storage_liveness: ResultsCursor<'a, 'tcx, MaybeStorageLive>,
117     place_cache: Vec<PlaceRef<'tcx>>,
118     value_cache: Vec<u128>,
119 }
120
121 impl<'a, 'tcx> TypeChecker<'a, 'tcx> {
122     fn fail(&self, location: Location, msg: impl AsRef<str>) {
123         let span = self.body.source_info(location).span;
124         // We use `delay_span_bug` as we might see broken MIR when other errors have already
125         // occurred.
126         self.tcx.sess.diagnostic().delay_span_bug(
127             span,
128             &format!(
129                 "broken MIR in {:?} ({}) at {:?}:\n{}",
130                 self.body.source.instance,
131                 self.when,
132                 location,
133                 msg.as_ref()
134             ),
135         );
136     }
137
138     fn check_edge(&self, location: Location, bb: BasicBlock, edge_kind: EdgeKind) {
139         if bb == START_BLOCK {
140             self.fail(location, "start block must not have predecessors")
141         }
142         if let Some(bb) = self.body.basic_blocks().get(bb) {
143             let src = self.body.basic_blocks().get(location.block).unwrap();
144             match (src.is_cleanup, bb.is_cleanup, edge_kind) {
145                 // Non-cleanup blocks can jump to non-cleanup blocks along non-unwind edges
146                 (false, false, EdgeKind::Normal)
147                 // Non-cleanup blocks can jump to cleanup blocks along unwind edges
148                 | (false, true, EdgeKind::Unwind)
149                 // Cleanup blocks can jump to cleanup blocks along non-unwind edges
150                 | (true, true, EdgeKind::Normal) => {}
151                 // All other jumps are invalid
152                 _ => {
153                     self.fail(
154                         location,
155                         format!(
156                             "{:?} edge to {:?} violates unwind invariants (cleanup {:?} -> {:?})",
157                             edge_kind,
158                             bb,
159                             src.is_cleanup,
160                             bb.is_cleanup,
161                         )
162                     )
163                 }
164             }
165         } else {
166             self.fail(location, format!("encountered jump to invalid basic block {:?}", bb))
167         }
168     }
169
170     /// Check if src can be assigned into dest.
171     /// This is not precise, it will accept some incorrect assignments.
172     fn mir_assign_valid_types(&self, src: Ty<'tcx>, dest: Ty<'tcx>) -> bool {
173         // Fast path before we normalize.
174         if src == dest {
175             // Equal types, all is good.
176             return true;
177         }
178         // Normalization reveals opaque types, but we may be validating MIR while computing
179         // said opaque types, causing cycles.
180         if (src, dest).has_opaque_types() {
181             return true;
182         }
183         // Normalize projections and things like that.
184         let param_env = self.param_env.with_reveal_all_normalized(self.tcx);
185         let src = self.tcx.normalize_erasing_regions(param_env, src);
186         let dest = self.tcx.normalize_erasing_regions(param_env, dest);
187
188         // Type-changing assignments can happen when subtyping is used. While
189         // all normal lifetimes are erased, higher-ranked types with their
190         // late-bound lifetimes are still around and can lead to type
191         // differences. So we compare ignoring lifetimes.
192         equal_up_to_regions(self.tcx, param_env, src, dest)
193     }
194 }
195
196 impl<'a, 'tcx> Visitor<'tcx> for TypeChecker<'a, 'tcx> {
197     fn visit_local(&mut self, local: &Local, context: PlaceContext, location: Location) {
198         if self.body.local_decls.get(*local).is_none() {
199             self.fail(
200                 location,
201                 format!("local {:?} has no corresponding declaration in `body.local_decls`", local),
202             );
203         }
204
205         if self.reachable_blocks.contains(location.block) && context.is_use() {
206             // Uses of locals must occur while the local's storage is allocated.
207             self.storage_liveness.seek_after_primary_effect(location);
208             let locals_with_storage = self.storage_liveness.get();
209             if !locals_with_storage.contains(*local) {
210                 self.fail(location, format!("use of local {:?}, which has no storage here", local));
211             }
212         }
213     }
214
215     fn visit_operand(&mut self, operand: &Operand<'tcx>, location: Location) {
216         // This check is somewhat expensive, so only run it when -Zvalidate-mir is passed.
217         if self.tcx.sess.opts.debugging_opts.validate_mir {
218             // `Operand::Copy` is only supposed to be used with `Copy` types.
219             if let Operand::Copy(place) = operand {
220                 let ty = place.ty(&self.body.local_decls, self.tcx).ty;
221                 let span = self.body.source_info(location).span;
222
223                 if !ty.is_copy_modulo_regions(self.tcx.at(span), self.param_env) {
224                     self.fail(location, format!("`Operand::Copy` with non-`Copy` type {}", ty));
225                 }
226             }
227         }
228
229         self.super_operand(operand, location);
230     }
231
232     fn visit_projection_elem(
233         &mut self,
234         local: Local,
235         proj_base: &[PlaceElem<'tcx>],
236         elem: PlaceElem<'tcx>,
237         context: PlaceContext,
238         location: Location,
239     ) {
240         if let ProjectionElem::Index(index) = elem {
241             let index_ty = self.body.local_decls[index].ty;
242             if index_ty != self.tcx.types.usize {
243                 self.fail(location, format!("bad index ({:?} != usize)", index_ty))
244             }
245         }
246         self.super_projection_elem(local, proj_base, elem, context, location);
247     }
248
249     fn visit_place(&mut self, place: &Place<'tcx>, _: PlaceContext, _: Location) {
250         // Set off any `bug!`s in the type computation code
251         let _ = place.ty(&self.body.local_decls, self.tcx);
252     }
253
254     fn visit_rvalue(&mut self, rvalue: &Rvalue<'tcx>, location: Location) {
255         macro_rules! check_kinds {
256             ($t:expr, $text:literal, $($patterns:tt)*) => {
257                 if !matches!(($t).kind(), $($patterns)*) {
258                     self.fail(location, format!($text, $t));
259                 }
260             };
261         }
262         match rvalue {
263             Rvalue::Use(_) => {}
264             Rvalue::Aggregate(agg_kind, _) => {
265                 let disallowed = match **agg_kind {
266                     AggregateKind::Array(..) => false,
267                     AggregateKind::Generator(..) => self.mir_phase >= MirPhase::GeneratorsLowered,
268                     _ => self.mir_phase >= MirPhase::Deaggregated,
269                 };
270                 if disallowed {
271                     self.fail(
272                         location,
273                         format!("{:?} have been lowered to field assignments", rvalue),
274                     )
275                 }
276             }
277             Rvalue::Ref(_, BorrowKind::Shallow, _) => {
278                 if self.mir_phase >= MirPhase::DropsLowered {
279                     self.fail(
280                         location,
281                         "`Assign` statement with a `Shallow` borrow should have been removed after drop lowering phase",
282                     );
283                 }
284             }
285             Rvalue::Len(p) => {
286                 let pty = p.ty(&self.body.local_decls, self.tcx).ty;
287                 check_kinds!(
288                     pty,
289                     "Cannot compute length of non-array type {:?}",
290                     ty::Array(..) | ty::Slice(..)
291                 );
292             }
293             Rvalue::BinaryOp(op, vals) | Rvalue::CheckedBinaryOp(op, vals) => {
294                 use BinOp::*;
295                 let a = vals.0.ty(&self.body.local_decls, self.tcx);
296                 let b = vals.1.ty(&self.body.local_decls, self.tcx);
297                 match op {
298                     Offset => {
299                         check_kinds!(a, "Cannot offset non-pointer type {:?}", ty::RawPtr(..));
300                         if b != self.tcx.types.isize && b != self.tcx.types.usize {
301                             self.fail(location, format!("Cannot offset by non-isize type {:?}", b));
302                         }
303                     }
304                     Eq | Lt | Le | Ne | Ge | Gt => {
305                         for x in [a, b] {
306                             check_kinds!(
307                                 x,
308                                 "Cannot compare type {:?}",
309                                 ty::Bool
310                                     | ty::Char
311                                     | ty::Int(..)
312                                     | ty::Uint(..)
313                                     | ty::Float(..)
314                                     | ty::RawPtr(..)
315                                     | ty::FnPtr(..)
316                             )
317                         }
318                         // None of the possible types have lifetimes, so we can just compare
319                         // directly
320                         if a != b {
321                             self.fail(
322                                 location,
323                                 format!("Cannot compare unequal types {:?} and {:?}", a, b),
324                             );
325                         }
326                     }
327                     Shl | Shr => {
328                         for x in [a, b] {
329                             check_kinds!(
330                                 x,
331                                 "Cannot shift non-integer type {:?}",
332                                 ty::Uint(..) | ty::Int(..)
333                             )
334                         }
335                     }
336                     BitAnd | BitOr | BitXor => {
337                         for x in [a, b] {
338                             check_kinds!(
339                                 x,
340                                 "Cannot perform bitwise op on type {:?}",
341                                 ty::Uint(..) | ty::Int(..) | ty::Bool
342                             )
343                         }
344                         if a != b {
345                             self.fail(
346                                 location,
347                                 format!(
348                                     "Cannot perform bitwise op on unequal types {:?} and {:?}",
349                                     a, b
350                                 ),
351                             );
352                         }
353                     }
354                     Add | Sub | Mul | Div | Rem => {
355                         for x in [a, b] {
356                             check_kinds!(
357                                 x,
358                                 "Cannot perform op on type {:?}",
359                                 ty::Uint(..) | ty::Int(..) | ty::Float(..)
360                             )
361                         }
362                         if a != b {
363                             self.fail(
364                                 location,
365                                 format!("Cannot perform op on unequal types {:?} and {:?}", a, b),
366                             );
367                         }
368                     }
369                 }
370             }
371             Rvalue::UnaryOp(op, operand) => {
372                 let a = operand.ty(&self.body.local_decls, self.tcx);
373                 match op {
374                     UnOp::Neg => {
375                         check_kinds!(a, "Cannot negate type {:?}", ty::Int(..) | ty::Float(..))
376                     }
377                     UnOp::Not => {
378                         check_kinds!(
379                             a,
380                             "Cannot binary not type {:?}",
381                             ty::Int(..) | ty::Uint(..) | ty::Bool
382                         );
383                     }
384                 }
385             }
386             Rvalue::ShallowInitBox(operand, _) => {
387                 let a = operand.ty(&self.body.local_decls, self.tcx);
388                 check_kinds!(a, "Cannot shallow init type {:?}", ty::RawPtr(..));
389             }
390             _ => {}
391         }
392         self.super_rvalue(rvalue, location);
393     }
394
395     fn visit_statement(&mut self, statement: &Statement<'tcx>, location: Location) {
396         match &statement.kind {
397             StatementKind::Assign(box (dest, rvalue)) => {
398                 // LHS and RHS of the assignment must have the same type.
399                 let left_ty = dest.ty(&self.body.local_decls, self.tcx).ty;
400                 let right_ty = rvalue.ty(&self.body.local_decls, self.tcx);
401                 if !self.mir_assign_valid_types(right_ty, left_ty) {
402                     self.fail(
403                         location,
404                         format!(
405                             "encountered `{:?}` with incompatible types:\n\
406                             left-hand side has type: {}\n\
407                             right-hand side has type: {}",
408                             statement.kind, left_ty, right_ty,
409                         ),
410                     );
411                 }
412                 // FIXME(JakobDegen): Check this for all rvalues, not just this one.
413                 if let Rvalue::Use(Operand::Copy(src) | Operand::Move(src)) = rvalue {
414                     // The sides of an assignment must not alias. Currently this just checks whether
415                     // the places are identical.
416                     if dest == src {
417                         self.fail(
418                             location,
419                             "encountered `Assign` statement with overlapping memory",
420                         );
421                     }
422                 }
423             }
424             StatementKind::AscribeUserType(..) => {
425                 if self.mir_phase >= MirPhase::DropsLowered {
426                     self.fail(
427                         location,
428                         "`AscribeUserType` should have been removed after drop lowering phase",
429                     );
430                 }
431             }
432             StatementKind::FakeRead(..) => {
433                 if self.mir_phase >= MirPhase::DropsLowered {
434                     self.fail(
435                         location,
436                         "`FakeRead` should have been removed after drop lowering phase",
437                     );
438                 }
439             }
440             StatementKind::CopyNonOverlapping(box rustc_middle::mir::CopyNonOverlapping {
441                 ref src,
442                 ref dst,
443                 ref count,
444             }) => {
445                 let src_ty = src.ty(&self.body.local_decls, self.tcx);
446                 let op_src_ty = if let Some(src_deref) = src_ty.builtin_deref(true) {
447                     src_deref.ty
448                 } else {
449                     self.fail(
450                         location,
451                         format!("Expected src to be ptr in copy_nonoverlapping, got: {}", src_ty),
452                     );
453                     return;
454                 };
455                 let dst_ty = dst.ty(&self.body.local_decls, self.tcx);
456                 let op_dst_ty = if let Some(dst_deref) = dst_ty.builtin_deref(true) {
457                     dst_deref.ty
458                 } else {
459                     self.fail(
460                         location,
461                         format!("Expected dst to be ptr in copy_nonoverlapping, got: {}", dst_ty),
462                     );
463                     return;
464                 };
465                 // since CopyNonOverlapping is parametrized by 1 type,
466                 // we only need to check that they are equal and not keep an extra parameter.
467                 if op_src_ty != op_dst_ty {
468                     self.fail(location, format!("bad arg ({:?} != {:?})", op_src_ty, op_dst_ty));
469                 }
470
471                 let op_cnt_ty = count.ty(&self.body.local_decls, self.tcx);
472                 if op_cnt_ty != self.tcx.types.usize {
473                     self.fail(location, format!("bad arg ({:?} != usize)", op_cnt_ty))
474                 }
475             }
476             StatementKind::SetDiscriminant { place, .. } => {
477                 if self.mir_phase < MirPhase::Deaggregated {
478                     self.fail(location, "`SetDiscriminant`is not allowed until deaggregation");
479                 }
480                 let pty = place.ty(&self.body.local_decls, self.tcx).ty.kind();
481                 if !matches!(pty, ty::Adt(..) | ty::Generator(..) | ty::Opaque(..)) {
482                     self.fail(
483                         location,
484                         format!(
485                             "`SetDiscriminant` is only allowed on ADTs and generators, not {:?}",
486                             pty
487                         ),
488                     );
489                 }
490             }
491             StatementKind::Deinit(..) => {
492                 if self.mir_phase < MirPhase::Deaggregated {
493                     self.fail(location, "`Deinit`is not allowed until deaggregation");
494                 }
495             }
496             StatementKind::Retag(_, _) => {
497                 // FIXME(JakobDegen) The validator should check that `self.mir_phase <
498                 // DropsLowered`. However, this causes ICEs with generation of drop shims, which
499                 // seem to fail to set their `MirPhase` correctly.
500             }
501             StatementKind::StorageLive(..)
502             | StatementKind::StorageDead(..)
503             | StatementKind::Coverage(_)
504             | StatementKind::Nop => {}
505         }
506
507         self.super_statement(statement, location);
508     }
509
510     fn visit_terminator(&mut self, terminator: &Terminator<'tcx>, location: Location) {
511         match &terminator.kind {
512             TerminatorKind::Goto { target } => {
513                 self.check_edge(location, *target, EdgeKind::Normal);
514             }
515             TerminatorKind::SwitchInt { targets, switch_ty, discr } => {
516                 let ty = discr.ty(&self.body.local_decls, self.tcx);
517                 if ty != *switch_ty {
518                     self.fail(
519                         location,
520                         format!(
521                             "encountered `SwitchInt` terminator with type mismatch: {:?} != {:?}",
522                             ty, switch_ty,
523                         ),
524                     );
525                 }
526
527                 let target_width = self.tcx.sess.target.pointer_width;
528
529                 let size = Size::from_bits(match switch_ty.kind() {
530                     ty::Uint(uint) => uint.normalize(target_width).bit_width().unwrap(),
531                     ty::Int(int) => int.normalize(target_width).bit_width().unwrap(),
532                     ty::Char => 32,
533                     ty::Bool => 1,
534                     other => bug!("unhandled type: {:?}", other),
535                 });
536
537                 for (value, target) in targets.iter() {
538                     if Scalar::<()>::try_from_uint(value, size).is_none() {
539                         self.fail(
540                             location,
541                             format!("the value {:#x} is not a proper {:?}", value, switch_ty),
542                         )
543                     }
544
545                     self.check_edge(location, target, EdgeKind::Normal);
546                 }
547                 self.check_edge(location, targets.otherwise(), EdgeKind::Normal);
548
549                 self.value_cache.clear();
550                 self.value_cache.extend(targets.iter().map(|(value, _)| value));
551                 let all_len = self.value_cache.len();
552                 self.value_cache.sort_unstable();
553                 self.value_cache.dedup();
554                 let has_duplicates = all_len != self.value_cache.len();
555                 if has_duplicates {
556                     self.fail(
557                         location,
558                         format!(
559                             "duplicated values in `SwitchInt` terminator: {:?}",
560                             terminator.kind,
561                         ),
562                     );
563                 }
564             }
565             TerminatorKind::Drop { target, unwind, .. } => {
566                 self.check_edge(location, *target, EdgeKind::Normal);
567                 if let Some(unwind) = unwind {
568                     self.check_edge(location, *unwind, EdgeKind::Unwind);
569                 }
570             }
571             TerminatorKind::DropAndReplace { target, unwind, .. } => {
572                 if self.mir_phase >= MirPhase::DropsLowered {
573                     self.fail(
574                         location,
575                         "`DropAndReplace` should have been removed during drop elaboration",
576                     );
577                 }
578                 self.check_edge(location, *target, EdgeKind::Normal);
579                 if let Some(unwind) = unwind {
580                     self.check_edge(location, *unwind, EdgeKind::Unwind);
581                 }
582             }
583             TerminatorKind::Call { func, args, destination, cleanup, .. } => {
584                 let func_ty = func.ty(&self.body.local_decls, self.tcx);
585                 match func_ty.kind() {
586                     ty::FnPtr(..) | ty::FnDef(..) => {}
587                     _ => self.fail(
588                         location,
589                         format!("encountered non-callable type {} in `Call` terminator", func_ty),
590                     ),
591                 }
592                 if let Some((_, target)) = destination {
593                     self.check_edge(location, *target, EdgeKind::Normal);
594                 }
595                 if let Some(cleanup) = cleanup {
596                     self.check_edge(location, *cleanup, EdgeKind::Unwind);
597                 }
598
599                 // The call destination place and Operand::Move place used as an argument might be
600                 // passed by a reference to the callee. Consequently they must be non-overlapping.
601                 // Currently this simply checks for duplicate places.
602                 self.place_cache.clear();
603                 if let Some((destination, _)) = destination {
604                     self.place_cache.push(destination.as_ref());
605                 }
606                 for arg in args {
607                     if let Operand::Move(place) = arg {
608                         self.place_cache.push(place.as_ref());
609                     }
610                 }
611                 let all_len = self.place_cache.len();
612                 self.place_cache.sort_unstable();
613                 self.place_cache.dedup();
614                 let has_duplicates = all_len != self.place_cache.len();
615                 if has_duplicates {
616                     self.fail(
617                         location,
618                         format!(
619                             "encountered overlapping memory in `Call` terminator: {:?}",
620                             terminator.kind,
621                         ),
622                     );
623                 }
624             }
625             TerminatorKind::Assert { cond, target, cleanup, .. } => {
626                 let cond_ty = cond.ty(&self.body.local_decls, self.tcx);
627                 if cond_ty != self.tcx.types.bool {
628                     self.fail(
629                         location,
630                         format!(
631                             "encountered non-boolean condition of type {} in `Assert` terminator",
632                             cond_ty
633                         ),
634                     );
635                 }
636                 self.check_edge(location, *target, EdgeKind::Normal);
637                 if let Some(cleanup) = cleanup {
638                     self.check_edge(location, *cleanup, EdgeKind::Unwind);
639                 }
640             }
641             TerminatorKind::Yield { resume, drop, .. } => {
642                 if self.body.generator.is_none() {
643                     self.fail(location, "`Yield` cannot appear outside generator bodies");
644                 }
645                 if self.mir_phase >= MirPhase::GeneratorsLowered {
646                     self.fail(location, "`Yield` should have been replaced by generator lowering");
647                 }
648                 self.check_edge(location, *resume, EdgeKind::Normal);
649                 if let Some(drop) = drop {
650                     self.check_edge(location, *drop, EdgeKind::Normal);
651                 }
652             }
653             TerminatorKind::FalseEdge { real_target, imaginary_target } => {
654                 if self.mir_phase >= MirPhase::DropsLowered {
655                     self.fail(
656                         location,
657                         "`FalseEdge` should have been removed after drop elaboration",
658                     );
659                 }
660                 self.check_edge(location, *real_target, EdgeKind::Normal);
661                 self.check_edge(location, *imaginary_target, EdgeKind::Normal);
662             }
663             TerminatorKind::FalseUnwind { real_target, unwind } => {
664                 if self.mir_phase >= MirPhase::DropsLowered {
665                     self.fail(
666                         location,
667                         "`FalseUnwind` should have been removed after drop elaboration",
668                     );
669                 }
670                 self.check_edge(location, *real_target, EdgeKind::Normal);
671                 if let Some(unwind) = unwind {
672                     self.check_edge(location, *unwind, EdgeKind::Unwind);
673                 }
674             }
675             TerminatorKind::InlineAsm { destination, cleanup, .. } => {
676                 if let Some(destination) = destination {
677                     self.check_edge(location, *destination, EdgeKind::Normal);
678                 }
679                 if let Some(cleanup) = cleanup {
680                     self.check_edge(location, *cleanup, EdgeKind::Unwind);
681                 }
682             }
683             TerminatorKind::GeneratorDrop => {
684                 if self.body.generator.is_none() {
685                     self.fail(location, "`GeneratorDrop` cannot appear outside generator bodies");
686                 }
687                 if self.mir_phase >= MirPhase::GeneratorsLowered {
688                     self.fail(
689                         location,
690                         "`GeneratorDrop` should have been replaced by generator lowering",
691                     );
692                 }
693             }
694             TerminatorKind::Resume | TerminatorKind::Abort => {
695                 let bb = location.block;
696                 if !self.body.basic_blocks()[bb].is_cleanup {
697                     self.fail(location, "Cannot `Resume` or `Abort` from non-cleanup basic block")
698                 }
699             }
700             TerminatorKind::Return => {
701                 let bb = location.block;
702                 if self.body.basic_blocks()[bb].is_cleanup {
703                     self.fail(location, "Cannot `Return` from cleanup basic block")
704                 }
705             }
706             TerminatorKind::Unreachable => {}
707         }
708
709         self.super_terminator(terminator, location);
710     }
711
712     fn visit_source_scope(&mut self, scope: &SourceScope) {
713         if self.body.source_scopes.get(*scope).is_none() {
714             self.tcx.sess.diagnostic().delay_span_bug(
715                 self.body.span,
716                 &format!(
717                     "broken MIR in {:?} ({}):\ninvalid source scope {:?}",
718                     self.body.source.instance, self.when, scope,
719                 ),
720             );
721         }
722     }
723 }