1 // Copyright 2015 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 Managing the scope stack. The scopes are tied to lexical scopes, so as
13 we descend the HAIR, we push a scope on the stack, translate ite
14 contents, and then pop it off. Every scope is named by a
19 When pushing a new scope, we record the current point in the graph (a
20 basic block); this marks the entry to the scope. We then generate more
21 stuff in the control-flow graph. Whenever the scope is exited, either
22 via a `break` or `return` or just by fallthrough, that marks an exit
23 from the scope. Each lexical scope thus corresponds to a single-entry,
24 multiple-exit (SEME) region in the control-flow graph.
26 For now, we keep a mapping from each `CodeExtent` to its
27 corresponding SEME region for later reference (see caveat in next
28 paragraph). This is because region scopes are tied to
29 them. Eventually, when we shift to non-lexical lifetimes, three should
30 be no need to remember this mapping.
32 There is one additional wrinkle, actually, that I wanted to hide from
33 you but duty compels me to mention. In the course of translating
34 matches, it sometimes happen that certain code (namely guards) gets
35 executed multiple times. This means that the scope lexical scope may
36 in fact correspond to multiple, disjoint SEME regions. So in fact our
37 mapping is from one scope to a vector of SEME regions.
41 The primary purpose for scopes is to insert drops: while translating
42 the contents, we also accumulate lvalues that need to be dropped upon
43 exit from each scope. This is done by calling `schedule_drop`. Once a
44 drop is scheduled, whenever we branch out we will insert drops of all
45 those lvalues onto the outgoing edge. Note that we don't know the full
46 set of scheduled drops up front, and so whenever we exit from the
47 scope we only drop the values scheduled thus far. For example, consider
48 the scope S corresponding to this loop:
58 When processing the `let x`, we will add one drop to the scope for
59 `x`. The break will then insert a drop for `x`. When we process `let
60 y`, we will add another drop (in fact, to a subscope, but let's ignore
61 that for now); any later drops would also drop `y`.
65 There are numerous "normal" ways to early exit a scope: `break`,
66 `continue`, `return` (panics are handled separately). Whenever an
67 early exit occurs, the method `exit_scope` is called. It is given the
68 current point in execution where the early exit occurs, as well as the
69 scope you want to branch to (note that all early exits from to some
70 other enclosing scope). `exit_scope` will record thid exit point and
73 Panics are handled in a similar fashion, except that a panic always
74 returns out to the `DIVERGE_BLOCK`. To trigger a panic, simply call
75 `panic(p)` with the current point `p`. Or else you can call
76 `diverge_cleanup`, which will produce a block that you can branch to
77 which does the appropriate cleanup and then diverges. `panic(p)`
78 simply calls `diverge_cleanup()` and adds an edge from `p` to the
83 In addition to the normal scope stack, we track a loop scope stack
84 that contains only loops. It tracks where a `break` and `continue`
89 use build::{BlockAnd, BlockAndExtension, Builder};
90 use rustc::middle::region::CodeExtent;
91 use rustc::middle::lang_items;
92 use rustc::middle::subst::{Substs, VecPerParamSpace};
93 use rustc::middle::ty::{self, Ty};
94 use rustc::mir::repr::*;
95 use syntax::codemap::{Span, DUMMY_SP};
96 use syntax::parse::token::intern_and_get_ident;
98 pub struct Scope<'tcx> {
100 drops: Vec<(Span, Lvalue<'tcx>)>,
101 frees: Vec<(Span, Lvalue<'tcx>, Ty<'tcx>)>,
102 cached_block: Option<BasicBlock>,
105 #[derive(Clone, Debug)]
106 pub struct LoopScope {
107 /// Extent of the loop
108 pub extent: CodeExtent,
109 /// Where the body of the loop begins
110 pub continue_block: BasicBlock,
111 /// Block to branch into when the loop terminates (either by being `break`-en out from, or by
112 /// having its condition to become false)
113 pub break_block: BasicBlock, // where to go on a `break
114 /// Indicates the reachability of the break_block for this loop
115 pub might_break: bool
118 impl<'a,'tcx> Builder<'a,'tcx> {
119 /// Start a loop scope, which tracks where `continue` and `break`
120 /// should branch to. See module comment for more details.
122 /// Returns the might_break attribute of the LoopScope used.
123 pub fn in_loop_scope<F>(&mut self,
124 loop_block: BasicBlock,
125 break_block: BasicBlock,
128 where F: FnOnce(&mut Builder<'a, 'tcx>)
130 let extent = self.extent_of_innermost_scope();
131 let loop_scope = LoopScope {
132 extent: extent.clone(),
133 continue_block: loop_block,
134 break_block: break_block,
137 self.loop_scopes.push(loop_scope);
139 let loop_scope = self.loop_scopes.pop().unwrap();
140 assert!(loop_scope.extent == extent);
141 loop_scope.might_break
144 /// Convenience wrapper that pushes a scope and then executes `f`
145 /// to build its contents, popping the scope afterwards.
146 pub fn in_scope<F, R>(&mut self, extent: CodeExtent, mut block: BasicBlock, f: F) -> BlockAnd<R>
147 where F: FnOnce(&mut Builder<'a, 'tcx>) -> BlockAnd<R>
149 debug!("in_scope(extent={:?}, block={:?})", extent, block);
150 self.push_scope(extent, block);
151 let rv = unpack!(block = f(self));
152 self.pop_scope(extent, block);
153 debug!("in_scope: exiting extent={:?} block={:?}", extent, block);
157 /// Push a scope onto the stack. You can then build code in this
158 /// scope and call `pop_scope` afterwards. Note that these two
159 /// calls must be paired; using `in_scope` as a convenience
160 /// wrapper maybe preferable.
161 pub fn push_scope(&mut self, extent: CodeExtent, block: BasicBlock) {
162 debug!("push_scope({:?}, {:?})", extent, block);
164 // push scope, execute `f`, then pop scope again
165 self.scopes.push(Scope {
166 extent: extent.clone(),
173 /// Pops a scope, which should have extent `extent`, adding any
174 /// drops onto the end of `block` that are needed. This must
175 /// match 1-to-1 with `push_scope`.
176 pub fn pop_scope(&mut self, extent: CodeExtent, block: BasicBlock) {
177 debug!("pop_scope({:?}, {:?})", extent, block);
178 let scope = self.scopes.pop().unwrap();
180 assert_eq!(scope.extent, extent);
182 // add in any drops needed on the fallthrough path (any other
183 // exiting paths, such as those that arise from `break`, will
184 // have drops already)
185 for (span, lvalue) in scope.drops {
186 self.cfg.push_drop(block, span, &lvalue);
191 /// Finds the loop scope for a given label. This is used for
192 /// resolving `break` and `continue`.
193 pub fn find_loop_scope(&mut self,
195 label: Option<CodeExtent>)
197 let Builder { ref mut loop_scopes, ref mut hir, .. } = *self;
200 // no label? return the innermost loop scope
201 loop_scopes.iter_mut().rev().next()
204 // otherwise, find the loop-scope with the correct id
205 loop_scopes.iter_mut()
207 .filter(|loop_scope| loop_scope.extent == label)
210 }.unwrap_or_else(|| hir.span_bug(span, "no enclosing loop scope found?"))
213 /// Branch out of `block` to `target`, exiting all scopes up to
214 /// and including `extent`. This will insert whatever drops are
215 /// needed, as well as tracking this exit for the SEME region. See
216 /// module comment for details.
217 pub fn exit_scope(&mut self,
221 target: BasicBlock) {
222 let Builder { ref mut scopes, ref mut cfg, ref mut hir, .. } = *self;
224 let scope_count = 1 + scopes.iter().rev().position(|scope| scope.extent == extent)
226 hir.span_bug(span, &format!("extent {:?} does not enclose", extent))
229 for scope in scopes.iter_mut().rev().take(scope_count) {
230 for &(drop_span, ref lvalue) in &scope.drops {
231 cfg.push_drop(block, drop_span, lvalue);
234 cfg.terminate(block, Terminator::Goto { target: target });
237 /// Creates a path that performs all required cleanup for unwinding.
239 /// This path terminates in Resume. Returns the start of the path.
240 /// See module comment for more details. None indicates there’s no
241 /// cleanup to do at this point.
242 pub fn diverge_cleanup(&mut self) -> Option<BasicBlock> {
243 if self.scopes.is_empty() {
247 let tcx = self.hir.tcx();
248 let unit_tmp = self.get_unit_temp();
249 let mut terminator = Terminator::Resume;
250 // Given an array of scopes, we generate these from the outermost scope to the innermost
251 // one. Thus for array [S0, S1, S2] with corresponding cleanup blocks [B0, B1, B2], we will
252 // generate B0 <- B1 <- B2 in left-to-right order. The outermost scope (B0) will always
253 // terminate with a Resume terminator.
254 for scope in self.scopes.iter_mut().filter(|s| !s.drops.is_empty() || !s.frees.is_empty()) {
255 if let Some(b) = scope.cached_block {
256 terminator = Terminator::Goto { target: b };
259 let mut new_block = self.cfg.start_new_cleanup_block();
260 self.cfg.terminate(new_block, terminator);
261 terminator = Terminator::Goto { target: new_block };
263 for &(span, ref lvalue) in scope.drops.iter().rev() {
264 self.cfg.push_drop(new_block, span, lvalue);
267 for &(_, ref lvalue, ref item_ty) in scope.frees.iter().rev() {
268 let item = lang_items::SpannedLangItems::box_free_fn(&tcx.lang_items)
269 .expect("box_free language item required");
270 let substs = tcx.mk_substs(Substs::new(
271 VecPerParamSpace::new(vec![], vec![], vec![item_ty]),
272 VecPerParamSpace::new(vec![], vec![], vec![])
274 let func = Constant {
276 ty: tcx.lookup_item_type(item.0).ty,
277 literal: Literal::Item {
279 kind: ItemKind::Function,
283 let old_block = new_block;
284 new_block = self.cfg.start_new_cleanup_block();
285 self.cfg.terminate(new_block, Terminator::Call {
286 func: Operand::Constant(func),
287 args: vec![Operand::Consume(lvalue.clone())],
288 destination: Some((unit_tmp.clone(), old_block)),
289 cleanup: None // we’re already doing divergence cleanups
291 terminator = Terminator::Goto { target: new_block };
294 scope.cached_block = Some(new_block);
297 // Return the innermost cached block, most likely the one we just generated.
298 // Note that if there are no cleanups in scope we return None.
299 self.scopes.iter().rev().flat_map(|b| b.cached_block).next()
302 /// Indicates that `lvalue` should be dropped on exit from
304 pub fn schedule_drop(&mut self,
307 lvalue: &Lvalue<'tcx>,
308 lvalue_ty: Ty<'tcx>) {
309 if self.hir.needs_drop(lvalue_ty) {
310 for scope in self.scopes.iter_mut().rev() {
311 // We must invalidate all the cached_blocks leading up to the scope we’re looking
312 // for, because otherwise some/most of the blocks in the chain might become
313 // incorrect (i.e. they still are pointing at old cached_block).
314 scope.cached_block = None;
315 if scope.extent == extent {
316 scope.drops.push((span, lvalue.clone()));
320 self.hir.span_bug(span,
321 &format!("extent {:?} not in scope to drop {:?}", extent, lvalue));
325 /// Schedule dropping of a not yet fully initialised box. This cleanup will (and should) only
326 /// be translated into unwind branch. The extent should be for the `EXPR` inside `box EXPR`.
327 pub fn schedule_box_free(&mut self,
330 lvalue: &Lvalue<'tcx>,
332 for scope in self.scopes.iter_mut().rev() {
333 // We must invalidate all the cached_blocks leading up to the scope we’re looking
334 // for, because otherwise some/most of the blocks in the chain might become
335 // incorrect (i.e. they still are pointing at old cached_block).
336 scope.cached_block = None;
337 if scope.extent == extent {
338 scope.frees.push((span, lvalue.clone(), item_ty));
342 self.hir.span_bug(span,
343 &format!("extent {:?} not in scope to drop {:?}", extent, lvalue));
346 pub fn extent_of_innermost_scope(&self) -> CodeExtent {
347 self.scopes.last().map(|scope| scope.extent).unwrap()
350 pub fn extent_of_outermost_scope(&self) -> CodeExtent {
351 self.scopes.first().map(|scope| scope.extent).unwrap()
355 pub fn panic_bounds_check(&mut self,
357 index: Operand<'tcx>,
360 // fn(&(filename: &'static str, line: u32), index: usize, length: usize) -> !
361 let func = self.lang_function(lang_items::PanicBoundsCheckFnLangItem);
362 let args = func.ty.fn_args();
363 let ref_ty = args.skip_binder()[0];
364 let (region, tup_ty) = if let ty::TyRef(region, tyandmut) = ref_ty.sty {
365 (region, tyandmut.ty)
367 self.hir.span_bug(span, &format!("unexpected panic_bound_check type: {:?}", func.ty));
369 let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty));
370 let (file, line) = self.span_to_fileline_args(span);
371 let elems = vec![Operand::Constant(file), Operand::Constant(line)];
372 // FIXME: We should have this as a constant, rather than a stack variable (to not pollute
373 // icache with cold branch code), however to achieve that we either have to rely on rvalue
374 // promotion or have some way, in MIR, to create constants.
375 self.cfg.push_assign(block, DUMMY_SP, &tuple, // tuple = (file_arg, line_arg);
376 Rvalue::Aggregate(AggregateKind::Tuple, elems));
377 // FIXME: is this region really correct here?
378 self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple;
379 Rvalue::Ref(*region, BorrowKind::Unique, tuple));
380 let cleanup = self.diverge_cleanup();
381 self.cfg.terminate(block, Terminator::Call {
382 func: Operand::Constant(func),
383 args: vec![Operand::Consume(tuple_ref), index, len],
389 /// Create diverge cleanup and branch to it from `block`.
390 pub fn panic(&mut self, block: BasicBlock, msg: &'static str, span: Span) {
391 // fn(&(msg: &'static str filename: &'static str, line: u32)) -> !
392 let func = self.lang_function(lang_items::PanicFnLangItem);
393 let args = func.ty.fn_args();
394 let ref_ty = args.skip_binder()[0];
395 let (region, tup_ty) = if let ty::TyRef(region, tyandmut) = ref_ty.sty {
396 (region, tyandmut.ty)
398 self.hir.span_bug(span, &format!("unexpected panic type: {:?}", func.ty));
400 let (tuple, tuple_ref) = (self.temp(tup_ty), self.temp(ref_ty));
401 let (file, line) = self.span_to_fileline_args(span);
402 let message = Constant {
404 ty: self.hir.tcx().mk_static_str(),
405 literal: self.hir.str_literal(intern_and_get_ident(msg))
407 let elems = vec![Operand::Constant(message),
408 Operand::Constant(file),
409 Operand::Constant(line)];
410 // FIXME: We should have this as a constant, rather than a stack variable (to not pollute
411 // icache with cold branch code), however to achieve that we either have to rely on rvalue
412 // promotion or have some way, in MIR, to create constants.
413 self.cfg.push_assign(block, DUMMY_SP, &tuple, // tuple = (message_arg, file_arg, line_arg);
414 Rvalue::Aggregate(AggregateKind::Tuple, elems));
415 // FIXME: is this region really correct here?
416 self.cfg.push_assign(block, DUMMY_SP, &tuple_ref, // tuple_ref = &tuple;
417 Rvalue::Ref(*region, BorrowKind::Unique, tuple));
418 let cleanup = self.diverge_cleanup();
419 self.cfg.terminate(block, Terminator::Call {
420 func: Operand::Constant(func),
421 args: vec![Operand::Consume(tuple_ref)],
427 fn lang_function(&mut self, lang_item: lang_items::LangItem) -> Constant<'tcx> {
428 let funcdid = match self.hir.tcx().lang_items.require(lang_item) {
431 self.hir.tcx().sess.fatal(&*m)
436 ty: self.hir.tcx().lookup_item_type(funcdid).ty,
437 literal: Literal::Item {
439 kind: ItemKind::Function,
440 substs: self.hir.tcx().mk_substs(Substs::empty())
445 fn span_to_fileline_args(&mut self, span: Span) -> (Constant<'tcx>, Constant<'tcx>) {
446 let span_lines = self.hir.tcx().sess.codemap().lookup_char_pos(span.lo);
449 ty: self.hir.tcx().mk_static_str(),
450 literal: self.hir.str_literal(intern_and_get_ident(&span_lines.file.name))
453 ty: self.hir.tcx().types.u32,
454 literal: self.hir.usize_literal(span_lines.line)