]> git.lizzy.rs Git - rust.git/blobdiff - src/librustc_mir/build/scope.rs
Various minor/cosmetic improvements to code
[rust.git] / src / librustc_mir / build / scope.rs
index 99badd5a03fe6ee9840bf0f0e9d985b100ed1035..8c948766314e8fb988a6db2d5dfe0e9fd145d5da 100644 (file)
 use build::{BlockAnd, BlockAndExtension, Builder, CFG};
 use hair::LintLevel;
 use rustc::middle::region;
-use rustc::ty::{Ty, TyCtxt};
+use rustc::ty::Ty;
 use rustc::hir;
 use rustc::hir::def_id::LOCAL_CRATE;
 use rustc::mir::*;
 use syntax_pos::{Span};
 use rustc_data_structures::fx::FxHashMap;
+use std::collections::hash_map::Entry;
 
 #[derive(Debug)]
 pub struct Scope<'tcx> {
@@ -151,7 +152,7 @@ struct DropData<'tcx> {
 pub(crate) struct CachedBlock {
     /// The cached block for the cleanups-on-diverge path. This block
     /// contains code to run the current drop and all the preceding
-    /// drops (i.e. those having lower index in Drop’s Scope drop
+    /// drops (i.e., those having lower index in Drop’s Scope drop
     /// array)
     unwind: Option<BasicBlock>,
 
@@ -181,7 +182,7 @@ pub struct BreakableScope<'tcx> {
     /// Block to branch into when the loop or block terminates (either by being `break`-en out
     /// from, or by having its condition to become false)
     pub break_block: BasicBlock,
-    /// The destination of the loop/block expression itself (i.e. where to put the result of a
+    /// The destination of the loop/block expression itself (i.e., where to put the result of a
     /// `break` expression)
     pub break_destination: Place<'tcx>,
 }
@@ -224,7 +225,7 @@ impl<'tcx> Scope<'tcx> {
     /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a
     /// larger extent of code.
     ///
-    /// `storage_only` controls whether to invalidate only drop paths run `StorageDead`.
+    /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`.
     /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current
     /// top-of-scope (as opposed to dependent scopes).
     fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
@@ -242,8 +243,8 @@ fn invalidate_cache(&mut self, storage_only: bool, this_scope_only: bool) {
         }
 
         if !storage_only && !this_scope_only {
-            for dropdata in &mut self.drops {
-                if let DropKind::Value { ref mut cached_block } = dropdata.kind {
+            for drop_data in &mut self.drops {
+                if let DropKind::Value { ref mut cached_block } = drop_data.kind {
                     cached_block.invalidate();
                 }
             }
@@ -321,11 +322,11 @@ pub fn in_scope<F, R>(&mut self,
             let same_lint_scopes = tcx.dep_graph.with_ignore(|| {
                 let sets = tcx.lint_levels(LOCAL_CRATE);
                 let parent_hir_id =
-                    tcx.hir.definitions().node_to_hir_id(
+                    tcx.hir().definitions().node_to_hir_id(
                         self.source_scope_local_data[source_scope].lint_root
-                            );
+                    );
                 let current_hir_id =
-                    tcx.hir.definitions().node_to_hir_id(node_id);
+                    tcx.hir().definitions().node_to_hir_id(node_id);
                 sets.lint_level_set(parent_hir_id) ==
                     sets.lint_level_set(current_hir_id)
             });
@@ -333,7 +334,7 @@ pub fn in_scope<F, R>(&mut self,
             if !same_lint_scopes {
                 self.source_scope =
                     self.new_source_scope(region_scope.1.span, lint_level,
-                                              None);
+                                          None);
             }
         }
         self.push_scope(region_scope);
@@ -381,15 +382,18 @@ pub fn pop_scope(&mut self,
         let scope = self.scopes.pop().unwrap();
         assert_eq!(scope.region_scope, region_scope.0);
 
-        self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
-        let resume_block = self.resume_block();
-        unpack!(block = build_scope_drops(&mut self.cfg,
-                                          resume_block,
-                                          &scope,
-                                          &self.scopes,
-                                          block,
-                                          self.arg_count,
-                                          false));
+        let unwind_to = self.scopes.last().and_then(|next_scope| {
+            next_scope.cached_unwind.get(false)
+        }).unwrap_or_else(|| self.resume_block());
+
+        unpack!(block = build_scope_drops(
+            &mut self.cfg,
+            &scope,
+            block,
+            unwind_to,
+            self.arg_count,
+            false,
+        ));
 
         block.unit()
     }
@@ -397,8 +401,7 @@ pub fn pop_scope(&mut self,
 
     /// Branch out of `block` to `target`, exiting all scopes up to
     /// and including `region_scope`.  This will insert whatever drops are
-    /// needed, as well as tracking this exit for the SEME region. See
-    /// module comment for details.
+    /// needed. See module comment for details.
     pub fn exit_scope(&mut self,
                       span: Span,
                       region_scope: (region::Scope, SourceInfo),
@@ -416,41 +419,51 @@ pub fn exit_scope(&mut self,
 
         // If we are emitting a `drop` statement, we need to have the cached
         // diverge cleanup pads ready in case that drop panics.
-        let may_panic = self.scopes[(len - scope_count)..].iter()
-            .any(|s| s.drops.iter().any(|s| s.kind.may_panic()));
+        let may_panic = self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup);
         if may_panic {
             self.diverge_cleanup();
         }
 
-        {
-        let resume_block = self.resume_block();
-        let mut rest = &mut self.scopes[(len - scope_count)..];
-        while let Some((scope, rest_)) = {rest}.split_last_mut() {
-            rest = rest_;
-            block = if let Some(&e) = scope.cached_exits.get(&(target, region_scope.0)) {
-                self.cfg.terminate(block, scope.source_info(span),
-                                   TerminatorKind::Goto { target: e });
-                return;
-            } else {
-                let b = self.cfg.start_new_block();
-                self.cfg.terminate(block, scope.source_info(span),
-                                   TerminatorKind::Goto { target: b });
-                scope.cached_exits.insert((target, region_scope.0), b);
-                b
+        let mut scopes = self.scopes[(len - scope_count - 1)..].iter_mut().rev();
+        let mut scope = scopes.next().unwrap();
+        for next_scope in scopes {
+            if scope.drops.is_empty() {
+                scope = next_scope;
+                continue;
+            }
+            let source_info = scope.source_info(span);
+            block = match scope.cached_exits.entry((target, region_scope.0)) {
+                Entry::Occupied(e) => {
+                    self.cfg.terminate(block, source_info,
+                                    TerminatorKind::Goto { target: *e.get() });
+                    return;
+                }
+                Entry::Vacant(v) => {
+                    let b = self.cfg.start_new_block();
+                    self.cfg.terminate(block, source_info,
+                                    TerminatorKind::Goto { target: b });
+                    v.insert(b);
+                    b
+                }
             };
 
-            // End all regions for scopes out of which we are breaking.
-            self.cfg.push_end_region(self.hir.tcx(), block, region_scope.1, scope.region_scope);
+            let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| {
+                debug_assert!(!may_panic, "cached block not present?");
+                START_BLOCK
+            });
 
-            unpack!(block = build_scope_drops(&mut self.cfg,
-                                              resume_block,
-                                              scope,
-                                              rest,
-                                              block,
-                                              self.arg_count,
-                                              false));
-        }
+            unpack!(block = build_scope_drops(
+                &mut self.cfg,
+                scope,
+                block,
+                unwind_to,
+                self.arg_count,
+                false,
+            ));
+
+            scope = next_scope;
         }
+
         let scope = &self.scopes[len - scope_count];
         self.cfg.terminate(block, scope.source_info(span),
                            TerminatorKind::Goto { target });
@@ -465,20 +478,20 @@ pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
             return None;
         }
 
-        // Fill in the cache
+        // Fill in the cache for unwinds
         self.diverge_cleanup_gen(true);
 
         let src_info = self.scopes[0].source_info(self.fn_span);
+        let resume_block = self.resume_block();
+        let mut scopes = self.scopes.iter_mut().rev().peekable();
         let mut block = self.cfg.start_new_block();
         let result = block;
-        let resume_block = self.resume_block();
-        let mut rest = &mut self.scopes[..];
 
-        while let Some((scope, rest_)) = {rest}.split_last_mut() {
-            rest = rest_;
+        while let Some(scope) = scopes.next() {
             if !scope.needs_cleanup {
                 continue;
             }
+
             block = if let Some(b) = scope.cached_generator_drop {
                 self.cfg.terminate(block, src_info,
                                    TerminatorKind::Goto { target: b });
@@ -491,16 +504,20 @@ pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
                 b
             };
 
-            // End all regions for scopes out of which we are breaking.
-            self.cfg.push_end_region(self.hir.tcx(), block, src_info, scope.region_scope);
-
-            unpack!(block = build_scope_drops(&mut self.cfg,
-                                              resume_block,
-                                              scope,
-                                              rest,
-                                              block,
-                                              self.arg_count,
-                                              true));
+            let unwind_to = scopes.peek().as_ref().map(|scope| {
+                scope.cached_unwind.get(true).unwrap_or_else(|| {
+                    span_bug!(src_info.span, "cached block not present?")
+                })
+            }).unwrap_or(resume_block);
+
+            unpack!(block = build_scope_drops(
+                &mut self.cfg,
+                scope,
+                block,
+                unwind_to,
+                self.arg_count,
+                true,
+            ));
         }
 
         self.cfg.terminate(block, src_info, TerminatorKind::GeneratorDrop);
@@ -510,9 +527,9 @@ pub fn generator_drop_cleanup(&mut self) -> Option<BasicBlock> {
 
     /// Creates a new source scope, nested in the current one.
     pub fn new_source_scope(&mut self,
-                                span: Span,
-                                lint_level: LintLevel,
-                                safety: Option<Safety>) -> SourceScope {
+                            span: Span,
+                            lint_level: LintLevel,
+                            safety: Option<Safety>) -> SourceScope {
         let parent = self.source_scope;
         debug!("new_source_scope({:?}, {:?}, {:?}) - parent({:?})={:?}",
                span, lint_level, safety,
@@ -720,7 +737,7 @@ pub fn schedule_drop(
             //
             // Note that this code iterates scopes from the inner-most to the outer-most,
             // invalidating caches of each scope visited. This way bare minimum of the
-            // caches gets invalidated. i.e. if a new drop is added into the middle scope, the
+            // caches gets invalidated. i.e., if a new drop is added into the middle scope, the
             // cache of outer scpoe stays intact.
             scope.invalidate_cache(!needs_drop, this_scope);
             if this_scope {
@@ -749,8 +766,7 @@ pub fn schedule_drop(
     /// Creates a path that performs all required cleanup for unwinding.
     ///
     /// This path terminates in Resume. Returns the start of the path.
-    /// See module comment for more details. None indicates there’s no
-    /// cleanup to do at this point.
+    /// See module comment for more details.
     pub fn diverge_cleanup(&mut self) -> BasicBlock {
         self.diverge_cleanup_gen(false)
     }
@@ -772,11 +788,6 @@ fn resume_block(&mut self) -> BasicBlock {
     }
 
     fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
-        // To start, create the resume terminator.
-        let mut target = self.resume_block();
-
-        let Builder { ref mut cfg, ref mut scopes, .. } = *self;
-
         // Build up the drops in **reverse** order. The end result will
         // look like:
         //
@@ -788,11 +799,17 @@ fn diverge_cleanup_gen(&mut self, generator_drop: bool) -> BasicBlock {
         // store caches. If everything is cached, we'll just walk right
         // to left reading the cached results but never created anything.
 
-        if scopes.iter().any(|scope| scope.needs_cleanup) {
-            for scope in scopes.iter_mut() {
-                target = build_diverge_scope(self.hir.tcx(), cfg, scope.region_scope_span,
-                                             scope, target, generator_drop);
-            }
+        // Find the last cached block
+        let (mut target, first_uncached) = if let Some(cached_index) = self.scopes.iter()
+            .rposition(|scope| scope.cached_unwind.get(generator_drop).is_some()) {
+            (self.scopes[cached_index].cached_unwind.get(generator_drop).unwrap(), cached_index + 1)
+        } else {
+            (self.resume_block(), 0)
+        };
+
+        for scope in self.scopes[first_uncached..].iter_mut() {
+            target = build_diverge_scope(&mut self.cfg, scope.region_scope_span,
+                                         scope, target, generator_drop);
         }
 
         target
@@ -866,64 +883,62 @@ pub fn assert(&mut self, block: BasicBlock,
 }
 
 /// Builds drops for pop_scope and exit_scope.
-fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
-                           resume_block: BasicBlock,
-                           scope: &Scope<'tcx>,
-                           earlier_scopes: &[Scope<'tcx>],
-                           mut block: BasicBlock,
-                           arg_count: usize,
-                           generator_drop: bool)
-                           -> BlockAnd<()> {
-    debug!("build_scope_drops({:?} -> {:?})", block, scope);
-    let mut iter = scope.drops.iter().rev();
-    while let Some(drop_data) = iter.next() {
+fn build_scope_drops<'tcx>(
+    cfg: &mut CFG<'tcx>,
+    scope: &Scope<'tcx>,
+    mut block: BasicBlock,
+    last_unwind_to: BasicBlock,
+    arg_count: usize,
+    generator_drop: bool,
+) -> BlockAnd<()> {
+    debug!("build_scope_drops({:?} -> {:?}", block, scope);
+
+    // Build up the drops in evaluation order. The end result will
+    // look like:
+    //
+    // [SDs, drops[n]] --..> [SDs, drop[1]] -> [SDs, drop[0]] -> [[SDs]]
+    //               |                    |                 |
+    //               :                    |                 |
+    //                                    V                 V
+    // [drop[n]] -...-> [drop[1]] ------> [drop[0]] ------> [last_unwind_to]
+    //
+    // The horizontal arrows represent the execution path when the drops return
+    // successfully. The downwards arrows represent the execution path when the
+    // drops panic (panicking while unwinding will abort, so there's no need for
+    // another set of arrows). The drops for the unwind path should have already
+    // been generated by `diverge_cleanup_gen`.
+    //
+    // The code in this function reads from right to left.
+    // Storage dead drops have to be done left to right (since we can only push
+    // to the end of a Vec). So, we find the next drop and then call
+    // push_storage_deads which will iterate backwards through them so that
+    // they are added in the correct order.
+
+    let mut unwind_blocks = scope.drops.iter().rev().filter_map(|drop_data| {
+        if let DropKind::Value { cached_block } = drop_data.kind {
+            Some(cached_block.get(generator_drop).unwrap_or_else(|| {
+                span_bug!(drop_data.span, "cached block not present?")
+            }))
+        } else {
+            None
+        }
+    });
+
+    // When we unwind from a drop, we start cleaning up from the next one, so
+    // we don't need this block.
+    unwind_blocks.next();
+
+    for drop_data in scope.drops.iter().rev() {
         let source_info = scope.source_info(drop_data.span);
         match drop_data.kind {
             DropKind::Value { .. } => {
-                // Try to find the next block with its cached block for us to
-                // diverge into, either a previous block in this current scope or
-                // the top of the previous scope.
-                //
-                // If it wasn't for EndRegion, we could just chain all the DropData
-                // together and pick the first DropKind::Value. Please do that
-                // when we replace EndRegion with NLL.
-                let on_diverge = iter.clone().filter_map(|dd| {
-                    match dd.kind {
-                        DropKind::Value { cached_block } => Some(cached_block),
-                        DropKind::Storage => None
-                    }
-                }).next().or_else(|| {
-                    if earlier_scopes.iter().any(|scope| scope.needs_cleanup) {
-                        // If *any* scope requires cleanup code to be run,
-                        // we must use the cached unwind from the *topmost*
-                        // scope, to ensure all EndRegions from surrounding
-                        // scopes are executed before the drop code runs.
-                        Some(earlier_scopes.last().unwrap().cached_unwind)
-                    } else {
-                        // We don't need any further cleanup, so return None
-                        // to avoid creating a landing pad. We can skip
-                        // EndRegions because all local regions end anyway
-                        // when the function unwinds.
-                        //
-                        // This is an important optimization because LLVM is
-                        // terrible at optimizing landing pads. FIXME: I think
-                        // it would be cleaner and better to do this optimization
-                        // in SimplifyCfg instead of here.
-                        None
-                    }
-                });
-
-                let on_diverge = on_diverge.map(|cached_block| {
-                    cached_block.get(generator_drop).unwrap_or_else(|| {
-                        span_bug!(drop_data.span, "cached block not present?")
-                    })
-                });
+                let unwind_to = unwind_blocks.next().unwrap_or(last_unwind_to);
 
                 let next = cfg.start_new_block();
                 cfg.terminate(block, source_info, TerminatorKind::Drop {
                     location: drop_data.location.clone(),
                     target: next,
-                    unwind: Some(on_diverge.unwrap_or(resume_block))
+                    unwind: Some(unwind_to)
                 });
                 block = next;
             }
@@ -950,21 +965,17 @@ fn build_scope_drops<'tcx>(cfg: &mut CFG<'tcx>,
     block.unit()
 }
 
-fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
-                                       cfg: &mut CFG<'tcx>,
-                                       span: Span,
-                                       scope: &mut Scope<'tcx>,
-                                       mut target: BasicBlock,
-                                       generator_drop: bool)
-                                       -> BasicBlock
+fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>,
+                             span: Span,
+                             scope: &mut Scope<'tcx>,
+                             mut target: BasicBlock,
+                             generator_drop: bool)
+                             -> BasicBlock
 {
     // Build up the drops in **reverse** order. The end result will
     // look like:
     //
-    //    [EndRegion Block] -> [drops[n]] -...-> [drops[0]] -> [Free] -> [target]
-    //    |                                                         |
-    //    +---------------------------------------------------------+
-    //     code for scope
+    //    [drops[n]] -...-> [drops[0]] -> [target]
     //
     // The code in this function reads from right to left. At each
     // point, we check for cached blocks representing the
@@ -1009,21 +1020,7 @@ fn build_diverge_scope<'a, 'gcx, 'tcx>(tcx: TyCtxt<'a, 'gcx, 'tcx>,
         };
     }
 
-    // Finally, push the EndRegion block, used by mir-borrowck, and set
-    // `cached_unwind` to point to it (Block becomes trivial goto after
-    // pass that removes all EndRegions).
-    target = {
-        let cached_block = scope.cached_unwind.ref_mut(generator_drop);
-        if let Some(cached_block) = *cached_block {
-            cached_block
-        } else {
-            let block = cfg.start_new_cleanup_block();
-            cfg.push_end_region(tcx, block, source_info(span), scope.region_scope);
-            cfg.terminate(block, source_info(span), TerminatorKind::Goto { target });
-            *cached_block = Some(block);
-            block
-        }
-    };
+    *scope.cached_unwind.ref_mut(generator_drop) = Some(target);
 
     debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target);