1 // Copyright 2012-2016 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use syntax::ast::{self, MetaItem};
13 use rustc_data_structures::indexed_set::{IdxSet, IdxSetBuf};
14 use rustc_data_structures::indexed_vec::Idx;
15 use rustc_data_structures::bitslice::{bitwise, BitwiseOperator};
17 use rustc::ty::{self, TyCtxt};
18 use rustc::mir::{self, Mir, BasicBlock, BasicBlockData, Location, Statement, Terminator};
19 use rustc::session::Session;
21 use std::fmt::{self, Debug};
24 use std::path::PathBuf;
27 pub use self::impls::{MaybeInitializedLvals, MaybeUninitializedLvals};
28 pub use self::impls::{DefinitelyInitializedLvals};
29 pub use self::impls::borrows::{Borrows, BorrowData, BorrowIndex};
30 pub(crate) use self::drop_flag_effects::*;
32 use self::move_paths::MoveData;
34 mod drop_flag_effects;
39 pub(crate) use self::move_paths::indexes;
41 pub(crate) struct DataflowBuilder<'a, 'tcx: 'a, BD> where BD: BitDenotation
44 flow_state: DataflowAnalysis<'a, 'tcx, BD>,
45 print_preflow_to: Option<String>,
46 print_postflow_to: Option<String>,
49 pub trait Dataflow<BD: BitDenotation> {
50 /// Sets up and runs the dataflow problem, using `p` to render results if
51 /// implementation so chooses.
52 fn dataflow<P>(&mut self, p: P) where P: Fn(&BD, BD::Idx) -> &Debug {
53 let _ = p; // default implementation does not instrument process.
58 /// Sets up the entry, gen, and kill sets for this instance of a dataflow problem.
59 fn build_sets(&mut self);
61 /// Finds a fixed-point solution to this instance of a dataflow problem.
62 fn propagate(&mut self);
65 impl<'a, 'tcx: 'a, BD> Dataflow<BD> for DataflowBuilder<'a, 'tcx, BD> where BD: BitDenotation
67 fn dataflow<P>(&mut self, p: P) where P: Fn(&BD, BD::Idx) -> &Debug {
68 self.flow_state.build_sets();
69 self.pre_dataflow_instrumentation(|c,i| p(c,i)).unwrap();
70 self.flow_state.propagate();
71 self.post_dataflow_instrumentation(|c,i| p(c,i)).unwrap();
74 fn build_sets(&mut self) { self.flow_state.build_sets(); }
75 fn propagate(&mut self) { self.flow_state.propagate(); }
78 pub(crate) fn has_rustc_mir_with(attrs: &[ast::Attribute], name: &str) -> Option<MetaItem> {
80 if attr.check_name("rustc_mir") {
81 let items = attr.meta_item_list();
82 for item in items.iter().flat_map(|l| l.iter()) {
83 match item.meta_item() {
84 Some(mi) if mi.check_name(name) => return Some(mi.clone()),
93 pub struct MoveDataParamEnv<'tcx> {
94 pub(crate) move_data: MoveData<'tcx>,
95 pub(crate) param_env: ty::ParamEnv<'tcx>,
98 pub(crate) fn do_dataflow<'a, 'tcx, BD, P>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
100 node_id: ast::NodeId,
101 attributes: &[ast::Attribute],
102 dead_unwinds: &IdxSet<BasicBlock>,
105 -> DataflowResults<BD>
106 where BD: BitDenotation,
107 P: Fn(&BD, BD::Idx) -> &fmt::Debug
109 let name_found = |sess: &Session, attrs: &[ast::Attribute], name| -> Option<String> {
110 if let Some(item) = has_rustc_mir_with(attrs, name) {
111 if let Some(s) = item.value_str() {
112 return Some(s.to_string())
116 &format!("{} attribute requires a path", item.name()));
123 let print_preflow_to =
124 name_found(tcx.sess, attributes, "borrowck_graphviz_preflow");
125 let print_postflow_to =
126 name_found(tcx.sess, attributes, "borrowck_graphviz_postflow");
128 let mut mbcx = DataflowBuilder {
132 flow_state: DataflowAnalysis::new(tcx, mir, dead_unwinds, bd),
136 mbcx.flow_state.results()
139 struct PropagationContext<'b, 'a: 'b, 'tcx: 'a, O> where O: 'b + BitDenotation
141 builder: &'b mut DataflowAnalysis<'a, 'tcx, O>,
145 impl<'a, 'tcx: 'a, BD> DataflowAnalysis<'a, 'tcx, BD> where BD: BitDenotation
147 fn propagate(&mut self) {
148 let mut temp = IdxSetBuf::new_empty(self.flow_state.sets.bits_per_block);
149 let mut propcx = PropagationContext {
153 while propcx.changed {
154 propcx.changed = false;
155 propcx.reset(&mut temp);
156 propcx.walk_cfg(&mut temp);
160 fn build_sets(&mut self) {
161 // First we need to build the entry-, gen- and kill-sets. The
162 // gather_moves information provides a high-level mapping from
163 // mir-locations to the MoveOuts (and those correspond
164 // directly to gen-sets here). But we still need to figure out
168 let sets = &mut self.flow_state.sets.for_block(mir::START_BLOCK.index());
169 self.flow_state.operator.start_block_effect(sets);
172 for (bb, data) in self.mir.basic_blocks().iter_enumerated() {
173 let &mir::BasicBlockData { ref statements, ref terminator, is_cleanup: _ } = data;
175 let sets = &mut self.flow_state.sets.for_block(bb.index());
176 for j_stmt in 0..statements.len() {
177 let location = Location { block: bb, statement_index: j_stmt };
178 self.flow_state.operator.statement_effect(sets, location);
181 if terminator.is_some() {
182 let location = Location { block: bb, statement_index: statements.len() };
183 self.flow_state.operator.terminator_effect(sets, location);
189 impl<'b, 'a: 'b, 'tcx: 'a, BD> PropagationContext<'b, 'a, 'tcx, BD> where BD: BitDenotation
191 fn reset(&mut self, bits: &mut IdxSet<BD::Idx>) {
192 let e = if BD::bottom_value() {!0} else {0};
193 for b in bits.words_mut() {
198 fn walk_cfg(&mut self, in_out: &mut IdxSet<BD::Idx>) {
199 let mir = self.builder.mir;
200 for (bb_idx, bb_data) in mir.basic_blocks().iter().enumerate() {
201 let builder = &mut self.builder;
203 let sets = builder.flow_state.sets.for_block(bb_idx);
204 debug_assert!(in_out.words().len() == sets.on_entry.words().len());
205 in_out.clone_from(sets.on_entry);
206 in_out.union(sets.gen_set);
207 in_out.subtract(sets.kill_set);
209 builder.propagate_bits_into_graph_successors_of(
210 in_out, &mut self.changed, (mir::BasicBlock::new(bb_idx), bb_data));
215 fn dataflow_path(context: &str, prepost: &str, path: &str) -> PathBuf {
216 format!("{}_{}", context, prepost);
217 let mut path = PathBuf::from(path);
218 let new_file_name = {
219 let orig_file_name = path.file_name().unwrap().to_str().unwrap();
220 format!("{}_{}", context, orig_file_name)
222 path.set_file_name(new_file_name);
226 impl<'a, 'tcx: 'a, BD> DataflowBuilder<'a, 'tcx, BD> where BD: BitDenotation
228 fn pre_dataflow_instrumentation<P>(&self, p: P) -> io::Result<()>
229 where P: Fn(&BD, BD::Idx) -> &Debug
231 if let Some(ref path_str) = self.print_preflow_to {
232 let path = dataflow_path(BD::name(), "preflow", path_str);
233 graphviz::print_borrowck_graph_to(self, &path, p)
239 fn post_dataflow_instrumentation<P>(&self, p: P) -> io::Result<()>
240 where P: Fn(&BD, BD::Idx) -> &Debug
242 if let Some(ref path_str) = self.print_postflow_to {
243 let path = dataflow_path(BD::name(), "postflow", path_str);
244 graphviz::print_borrowck_graph_to(self, &path, p)
251 /// Maps each block to a set of bits
257 impl<E:Idx> Clone for Bits<E> {
258 fn clone(&self) -> Self { Bits { bits: self.bits.clone() } }
261 impl<E:Idx> Bits<E> {
262 fn new(bits: IdxSetBuf<E>) -> Self {
267 /// DataflowResultsConsumer abstracts over walking the MIR with some
268 /// already constructed dataflow results.
270 /// It abstracts over the FlowState and also completely hides the
271 /// underlying flow analysis results, because it needs to handle cases
272 /// where we are combining the results of *multiple* flow analyses
273 /// (e.g. borrows + inits + uninits).
274 pub trait DataflowResultsConsumer<'a, 'tcx: 'a> {
277 // Observation Hooks: override (at least one of) these to get analysis feedback.
278 fn visit_block_entry(&mut self,
280 _flow_state: &Self::FlowState) {}
282 fn visit_statement_entry(&mut self,
284 _stmt: &Statement<'tcx>,
285 _flow_state: &Self::FlowState) {}
287 fn visit_terminator_entry(&mut self,
289 _term: &Terminator<'tcx>,
290 _flow_state: &Self::FlowState) {}
292 // Main entry point: this drives the processing of results.
294 fn analyze_results(&mut self, flow_uninit: &mut Self::FlowState) {
295 let flow = flow_uninit;
296 for bb in self.mir().basic_blocks().indices() {
297 self.reset_to_entry_of(bb, flow);
298 self.process_basic_block(bb, flow);
302 fn process_basic_block(&mut self, bb: BasicBlock, flow_state: &mut Self::FlowState) {
303 let BasicBlockData { ref statements, ref terminator, is_cleanup: _ } =
305 let mut location = Location { block: bb, statement_index: 0 };
306 for stmt in statements.iter() {
307 self.reconstruct_statement_effect(location, flow_state);
308 self.visit_statement_entry(location, stmt, flow_state);
309 self.apply_local_effect(location, flow_state);
310 location.statement_index += 1;
313 if let Some(ref term) = *terminator {
314 self.reconstruct_terminator_effect(location, flow_state);
315 self.visit_terminator_entry(location, term, flow_state);
317 // We don't need to apply the effect of the terminator,
318 // since we are only visiting dataflow state on control
319 // flow entry to the various nodes. (But we still need to
320 // reconstruct the effect, because the visit method might
325 // Delegated Hooks: Provide access to the MIR and process the flow state.
327 fn mir(&self) -> &'a Mir<'tcx>;
329 // reset the state bitvector to represent the entry to block `bb`.
330 fn reset_to_entry_of(&mut self,
332 flow_state: &mut Self::FlowState);
334 // build gen + kill sets for statement at `loc`.
335 fn reconstruct_statement_effect(&mut self,
337 flow_state: &mut Self::FlowState);
339 // build gen + kill sets for terminator for `loc`.
340 fn reconstruct_terminator_effect(&mut self,
342 flow_state: &mut Self::FlowState);
344 // apply current gen + kill sets to `flow_state`.
346 // (`bb` and `stmt_idx` parameters can be ignored if desired by
347 // client. For the terminator, the `stmt_idx` will be the number
348 // of statements in the block.)
349 fn apply_local_effect(&mut self,
351 flow_state: &mut Self::FlowState);
354 pub struct DataflowAnalysis<'a, 'tcx: 'a, O> where O: BitDenotation
356 flow_state: DataflowState<O>,
357 dead_unwinds: &'a IdxSet<mir::BasicBlock>,
361 impl<'a, 'tcx: 'a, O> DataflowAnalysis<'a, 'tcx, O> where O: BitDenotation
363 pub fn results(self) -> DataflowResults<O> {
364 DataflowResults(self.flow_state)
367 pub fn mir(&self) -> &'a Mir<'tcx> { self.mir }
370 pub struct DataflowResults<O>(pub(crate) DataflowState<O>) where O: BitDenotation;
372 impl<O: BitDenotation> DataflowResults<O> {
373 pub fn sets(&self) -> &AllSets<O::Idx> {
377 pub fn operator(&self) -> &O {
382 /// State of a dataflow analysis; couples a collection of bit sets
383 /// with operator used to initialize and merge bits during analysis.
384 pub struct DataflowState<O: BitDenotation>
386 /// All the sets for the analysis. (Factored into its
387 /// own structure so that we can borrow it mutably
388 /// on its own separate from other fields.)
389 pub sets: AllSets<O::Idx>,
391 /// operator used to initialize, combine, and interpret bits.
392 pub(crate) operator: O,
395 impl<O: BitDenotation> DataflowState<O> {
396 pub fn each_bit<F>(&self, words: &IdxSet<O::Idx>, f: F) where F: FnMut(O::Idx)
398 let bits_per_block = self.operator.bits_per_block();
399 words.each_bit(bits_per_block, f)
402 pub fn interpret_set<'c, P>(&self,
404 words: &IdxSet<O::Idx>,
407 where P: Fn(&O, O::Idx) -> &Debug
409 let mut v = Vec::new();
410 self.each_bit(words, |i| {
411 v.push(render_idx(o, i));
418 pub struct AllSets<E: Idx> {
419 /// Analysis bitwidth for each block.
420 bits_per_block: usize,
422 /// Number of words associated with each block entry
423 /// equal to bits_per_block / usize::BITS, rounded up.
424 words_per_block: usize,
426 /// For each block, bits generated by executing the statements in
427 /// the block. (For comparison, the Terminator for each block is
428 /// handled in a flow-specific manner during propagation.)
431 /// For each block, bits killed by executing the statements in the
432 /// block. (For comparison, the Terminator for each block is
433 /// handled in a flow-specific manner during propagation.)
436 /// For each block, bits valid on entry to the block.
437 on_entry_sets: Bits<E>,
440 /// Triple of sets associated with a given block.
442 /// Generally, one sets up `on_entry`, `gen_set`, and `kill_set` for
443 /// each block individually, and then runs the dataflow analysis which
444 /// iteratively modifies the various `on_entry` sets (but leaves the
445 /// other two sets unchanged, since they represent the effect of the
446 /// block, which should be invariant over the course of the analysis).
448 /// It is best to ensure that the intersection of `gen_set` and
449 /// `kill_set` is empty; otherwise the results of the dataflow will
450 /// have a hidden dependency on what order the bits are generated and
451 /// killed during the iteration. (This is such a good idea that the
452 /// `fn gen` and `fn kill` methods that set their state enforce this
454 pub struct BlockSets<'a, E: Idx> {
455 /// Dataflow state immediately before control flow enters the given block.
456 pub(crate) on_entry: &'a mut IdxSet<E>,
458 /// Bits that are set to 1 by the time we exit the given block.
459 pub(crate) gen_set: &'a mut IdxSet<E>,
461 /// Bits that are set to 0 by the time we exit the given block.
462 pub(crate) kill_set: &'a mut IdxSet<E>,
465 impl<'a, E:Idx> BlockSets<'a, E> {
466 fn gen(&mut self, e: &E) {
468 self.kill_set.remove(e);
470 fn kill(&mut self, e: &E) {
471 self.gen_set.remove(e);
472 self.kill_set.add(e);
476 impl<E:Idx> AllSets<E> {
477 pub fn bits_per_block(&self) -> usize { self.bits_per_block }
478 pub fn for_block(&mut self, block_idx: usize) -> BlockSets<E> {
479 let offset = self.words_per_block * block_idx;
480 let range = E::new(offset)..E::new(offset + self.words_per_block);
482 on_entry: self.on_entry_sets.bits.range_mut(&range),
483 gen_set: self.gen_sets.bits.range_mut(&range),
484 kill_set: self.kill_sets.bits.range_mut(&range),
488 fn lookup_set_for<'a>(&self, sets: &'a Bits<E>, block_idx: usize) -> &'a IdxSet<E> {
489 let offset = self.words_per_block * block_idx;
490 let range = E::new(offset)..E::new(offset + self.words_per_block);
491 sets.bits.range(&range)
493 pub fn gen_set_for(&self, block_idx: usize) -> &IdxSet<E> {
494 self.lookup_set_for(&self.gen_sets, block_idx)
496 pub fn kill_set_for(&self, block_idx: usize) -> &IdxSet<E> {
497 self.lookup_set_for(&self.kill_sets, block_idx)
499 pub fn on_entry_set_for(&self, block_idx: usize) -> &IdxSet<E> {
500 self.lookup_set_for(&self.on_entry_sets, block_idx)
504 /// Parameterization for the precise form of data flow that is used.
505 pub trait DataflowOperator: BitwiseOperator {
506 /// Specifies the initial value for each bit in the `on_entry` set
507 fn bottom_value() -> bool;
510 pub trait BitDenotation: DataflowOperator {
511 /// Specifies what index type is used to access the bitvector.
514 /// A name describing the dataflow analysis that this
515 /// BitDenotation is supporting. The name should be something
516 /// suitable for plugging in as part of a filename e.g. avoid
517 /// space-characters or other things that tend to look bad on a
518 /// file system, like slashes or periods. It is also better for
519 /// the name to be reasonably short, again because it will be
520 /// plugged into a filename.
521 fn name() -> &'static str;
523 /// Size of each bitvector allocated for each block in the analysis.
524 fn bits_per_block(&self) -> usize;
526 /// Mutates the block-sets (the flow sets for the given
527 /// basic block) according to the effects that have been
528 /// established *prior* to entering the start block.
530 /// (For example, establishing the call arguments.)
532 /// (Typically this should only modify `sets.on_entry`, since the
533 /// gen and kill sets should reflect the effects of *executing*
534 /// the start block itself.)
535 fn start_block_effect(&self, sets: &mut BlockSets<Self::Idx>);
537 /// Mutates the block-sets (the flow sets for the given
538 /// basic block) according to the effects of evaluating statement.
540 /// This is used, in particular, for building up the
541 /// "transfer-function" representing the overall-effect of the
542 /// block, represented via GEN and KILL sets.
544 /// The statement is identified as `bb_data[idx_stmt]`, where
545 /// `bb_data` is the sequence of statements identified by `bb` in
547 fn statement_effect(&self,
548 sets: &mut BlockSets<Self::Idx>,
551 /// Mutates the block-sets (the flow sets for the given
552 /// basic block) according to the effects of evaluating
555 /// This is used, in particular, for building up the
556 /// "transfer-function" representing the overall-effect of the
557 /// block, represented via GEN and KILL sets.
559 /// The effects applied here cannot depend on which branch the
561 fn terminator_effect(&self,
562 sets: &mut BlockSets<Self::Idx>,
565 /// Mutates the block-sets according to the (flow-dependent)
566 /// effect of a successful return from a Call terminator.
568 /// If basic-block BB_x ends with a call-instruction that, upon
569 /// successful return, flows to BB_y, then this method will be
570 /// called on the exit flow-state of BB_x in order to set up the
571 /// entry flow-state of BB_y.
573 /// This is used, in particular, as a special case during the
574 /// "propagate" loop where all of the basic blocks are repeatedly
575 /// visited. Since the effects of a Call terminator are
576 /// flow-dependent, the current MIR cannot encode them via just
577 /// GEN and KILL sets attached to the block, and so instead we add
578 /// this extra machinery to represent the flow-dependent effect.
580 /// FIXME: Right now this is a bit of a wart in the API. It might
581 /// be better to represent this as an additional gen- and
582 /// kill-sets associated with each edge coming out of the basic
584 fn propagate_call_return(&self,
585 in_out: &mut IdxSet<Self::Idx>,
586 call_bb: mir::BasicBlock,
587 dest_bb: mir::BasicBlock,
588 dest_lval: &mir::Lvalue);
591 impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation
593 pub fn new(_tcx: TyCtxt<'a, 'tcx, 'tcx>,
595 dead_unwinds: &'a IdxSet<mir::BasicBlock>,
596 denotation: D) -> Self {
597 let bits_per_block = denotation.bits_per_block();
598 let usize_bits = mem::size_of::<usize>() * 8;
599 let words_per_block = (bits_per_block + usize_bits - 1) / usize_bits;
601 // (now rounded up to multiple of word size)
602 let bits_per_block = words_per_block * usize_bits;
604 let num_blocks = mir.basic_blocks().len();
605 let num_overall = num_blocks * bits_per_block;
607 let zeroes = Bits::new(IdxSetBuf::new_empty(num_overall));
608 let on_entry = Bits::new(if D::bottom_value() {
609 IdxSetBuf::new_filled(num_overall)
611 IdxSetBuf::new_empty(num_overall)
617 flow_state: DataflowState {
621 gen_sets: zeroes.clone(),
623 on_entry_sets: on_entry,
625 operator: denotation,
632 impl<'a, 'tcx: 'a, D> DataflowAnalysis<'a, 'tcx, D> where D: BitDenotation
634 /// Propagates the bits of `in_out` into all the successors of `bb`,
635 /// using bitwise operator denoted by `self.operator`.
637 /// For most blocks, this is entirely uniform. However, for blocks
638 /// that end with a call terminator, the effect of the call on the
639 /// dataflow state may depend on whether the call returned
640 /// successfully or unwound.
642 /// To reflect this, the `propagate_call_return` method of the
643 /// `BitDenotation` mutates `in_out` when propagating `in_out` via
644 /// a call terminator; such mutation is performed *last*, to
645 /// ensure its side-effects do not leak elsewhere (e.g. into
647 fn propagate_bits_into_graph_successors_of(
649 in_out: &mut IdxSet<D::Idx>,
651 (bb, bb_data): (mir::BasicBlock, &mir::BasicBlockData))
653 match bb_data.terminator().kind {
654 mir::TerminatorKind::Return |
655 mir::TerminatorKind::Resume |
656 mir::TerminatorKind::GeneratorDrop |
657 mir::TerminatorKind::Unreachable => {}
658 mir::TerminatorKind::Goto { ref target } |
659 mir::TerminatorKind::Assert { ref target, cleanup: None, .. } |
660 mir::TerminatorKind::Yield { resume: ref target, drop: None, .. } |
661 mir::TerminatorKind::Drop { ref target, location: _, unwind: None } |
662 mir::TerminatorKind::DropAndReplace {
663 ref target, value: _, location: _, unwind: None
665 self.propagate_bits_into_entry_set_for(in_out, changed, target);
667 mir::TerminatorKind::Yield { resume: ref target, drop: Some(ref drop), .. } => {
668 self.propagate_bits_into_entry_set_for(in_out, changed, target);
669 self.propagate_bits_into_entry_set_for(in_out, changed, drop);
671 mir::TerminatorKind::Assert { ref target, cleanup: Some(ref unwind), .. } |
672 mir::TerminatorKind::Drop { ref target, location: _, unwind: Some(ref unwind) } |
673 mir::TerminatorKind::DropAndReplace {
674 ref target, value: _, location: _, unwind: Some(ref unwind)
676 self.propagate_bits_into_entry_set_for(in_out, changed, target);
677 if !self.dead_unwinds.contains(&bb) {
678 self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
681 mir::TerminatorKind::SwitchInt { ref targets, .. } => {
682 for target in targets {
683 self.propagate_bits_into_entry_set_for(in_out, changed, target);
686 mir::TerminatorKind::Call { ref cleanup, ref destination, func: _, args: _ } => {
687 if let Some(ref unwind) = *cleanup {
688 if !self.dead_unwinds.contains(&bb) {
689 self.propagate_bits_into_entry_set_for(in_out, changed, unwind);
692 if let Some((ref dest_lval, ref dest_bb)) = *destination {
693 // N.B.: This must be done *last*, after all other
694 // propagation, as documented in comment above.
695 self.flow_state.operator.propagate_call_return(
696 in_out, bb, *dest_bb, dest_lval);
697 self.propagate_bits_into_entry_set_for(in_out, changed, dest_bb);
703 fn propagate_bits_into_entry_set_for(&mut self,
704 in_out: &IdxSet<D::Idx>,
706 bb: &mir::BasicBlock) {
707 let entry_set = self.flow_state.sets.for_block(bb.index()).on_entry;
708 let set_changed = bitwise(entry_set.words_mut(),
710 &self.flow_state.operator);