1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
12 //! A module for propagating forward dataflow information. The analysis
13 //! assumes that the items to be propagated can be represented as bits
14 //! and thus uses bitvectors. Your job is simply to specify the so-called
15 //! GEN and KILL bits for each expression.
24 use syntax::print::pprust::PrintState;
25 use util::nodemap::NodeMap;
27 use hir::intravisit::{self, IdRange};
28 use hir::print as pprust;
31 #[derive(Copy, Clone, Debug)]
32 pub enum EntryOrExit {
38 pub struct DataFlowContext<'a, 'tcx: 'a, O> {
39 tcx: TyCtxt<'a, 'tcx, 'tcx>,
41 /// a name for the analysis using this dataflow instance
42 analysis_name: &'static str,
44 /// the data flow operator
47 /// number of bits to propagate per id
50 /// number of words we will use to store bits_per_id.
51 /// equal to bits_per_id/usize::BITS rounded up.
54 // mapping from node to cfg node index
55 // FIXME (#6298): Shouldn't this go with CFG?
56 nodeid_to_index: NodeMap<Vec<CFGIndex>>,
58 // Bit sets per cfg node. The following three fields (`gens`, `kills`,
59 // and `on_entry`) all have the same structure. For each id in
60 // `id_range`, there is a range of words equal to `words_per_id`.
61 // So, to access the bits for any given id, you take a slice of
62 // the full vector (see the method `compute_id_range()`).
64 /// bits generated as we exit the cfg node. Updated by `add_gen()`.
67 /// bits killed as we exit the cfg node, or non-locally jump over
68 /// it. Updated by `add_kill(KillFrom::ScopeEnd)`.
69 scope_kills: Vec<usize>,
71 /// bits killed as we exit the cfg node directly; if it is jumped
72 /// over, e.g. via `break`, the kills are not reflected in the
73 /// jump's effects. Updated by `add_kill(KillFrom::Execution)`.
74 action_kills: Vec<usize>,
76 /// bits that are valid on entry to the cfg node. Updated by
81 pub trait BitwiseOperator {
82 /// Joins two predecessor bits together, typically either `|` or `&`
83 fn join(&self, succ: usize, pred: usize) -> usize;
86 /// Parameterization for the precise form of data flow that is used.
87 pub trait DataFlowOperator : BitwiseOperator {
88 /// Specifies the initial value for each bit in the `on_entry` set
89 fn initial_value(&self) -> bool;
92 struct PropagationContext<'a, 'b: 'a, 'tcx: 'b, O: 'a> {
93 dfcx: &'a mut DataFlowContext<'b, 'tcx, O>,
97 fn get_cfg_indices<'a>(id: ast::NodeId, index: &'a NodeMap<Vec<CFGIndex>>) -> &'a [CFGIndex] {
98 let opt_indices = index.get(&id);
99 opt_indices.map(|v| &v[..]).unwrap_or(&[])
102 impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
103 fn has_bitset_for_nodeid(&self, n: ast::NodeId) -> bool {
104 assert!(n != ast::DUMMY_NODE_ID);
105 self.nodeid_to_index.contains_key(&n)
109 impl<'a, 'tcx, O:DataFlowOperator> pprust::PpAnn for DataFlowContext<'a, 'tcx, O> {
110 fn nested(&self, state: &mut pprust::State, nested: pprust::Nested) -> io::Result<()> {
111 pprust::PpAnn::nested(&self.tcx.hir, state, nested)
114 ps: &mut pprust::State,
115 node: pprust::AnnNode) -> io::Result<()> {
116 let id = match node {
117 pprust::NodeName(_) => ast::CRATE_NODE_ID,
118 pprust::NodeExpr(expr) => expr.id,
119 pprust::NodeBlock(blk) => blk.id,
120 pprust::NodeItem(_) | pprust::NodeSubItem(_) => ast::CRATE_NODE_ID,
121 pprust::NodePat(pat) => pat.id
124 if !self.has_bitset_for_nodeid(id) {
128 assert!(self.bits_per_id > 0);
129 let indices = get_cfg_indices(id, &self.nodeid_to_index);
130 for &cfgidx in indices {
131 let (start, end) = self.compute_id_range(cfgidx);
132 let on_entry = &self.on_entry[start.. end];
133 let entry_str = bits_to_string(on_entry);
135 let gens = &self.gens[start.. end];
136 let gens_str = if gens.iter().any(|&u| u != 0) {
137 format!(" gen: {}", bits_to_string(gens))
142 let action_kills = &self.action_kills[start .. end];
143 let action_kills_str = if action_kills.iter().any(|&u| u != 0) {
144 format!(" action_kill: {}", bits_to_string(action_kills))
149 let scope_kills = &self.scope_kills[start .. end];
150 let scope_kills_str = if scope_kills.iter().any(|&u| u != 0) {
151 format!(" scope_kill: {}", bits_to_string(scope_kills))
157 format!("id {}: {}{}{}{}", id, entry_str,
158 gens_str, action_kills_str, scope_kills_str))?;
165 fn build_nodeid_to_index(body: Option<&hir::Body>,
166 cfg: &cfg::CFG) -> NodeMap<Vec<CFGIndex>> {
167 let mut index = NodeMap();
169 // FIXME (#6298): Would it be better to fold formals from decl
170 // into cfg itself? i.e. introduce a fn-based flow-graph in
171 // addition to the current block-based flow-graph, rather than
172 // have to put traversals like this here?
173 if let Some(body) = body {
174 add_entries_from_fn_body(&mut index, body, cfg.entry);
177 cfg.graph.each_node(|node_idx, node| {
178 if let cfg::CFGNodeData::AST(id) = node.data {
179 index.entry(id).or_insert(vec![]).push(node_idx);
186 /// Add mappings from the ast nodes for the formal bindings to
187 /// the entry-node in the graph.
188 fn add_entries_from_fn_body(index: &mut NodeMap<Vec<CFGIndex>>,
191 use hir::intravisit::Visitor;
195 index: &'a mut NodeMap<Vec<CFGIndex>>,
197 let mut formals = Formals { entry: entry, index: index };
198 for arg in &body.arguments {
199 formals.visit_pat(&arg.pat);
201 impl<'a, 'v> Visitor<'v> for Formals<'a> {
202 fn nested_visit_map<'this>(&'this mut self) -> intravisit::NestedVisitorMap<'this, 'v> {
203 intravisit::NestedVisitorMap::None
206 fn visit_pat(&mut self, p: &hir::Pat) {
207 self.index.entry(p.id).or_insert(vec![]).push(self.entry);
208 intravisit::walk_pat(self, p)
214 /// Flag used by `add_kill` to indicate whether the provided kill
215 /// takes effect only when control flows directly through the node in
216 /// question, or if the kill's effect is associated with any
217 /// control-flow directly through or indirectly over the node.
218 #[derive(Copy, Clone, PartialEq, Debug)]
220 /// A `ScopeEnd` kill is one that takes effect when any control
221 /// flow goes over the node. A kill associated with the end of the
222 /// scope of a variable declaration `let x;` is an example of a
226 /// An `Execution` kill is one that takes effect only when control
227 /// flow goes through the node to completion. A kill associated
228 /// with an assignment statement `x = expr;` is an example of an
229 /// `Execution` kill.
233 impl<'a, 'tcx, O:DataFlowOperator> DataFlowContext<'a, 'tcx, O> {
234 pub fn new(tcx: TyCtxt<'a, 'tcx, 'tcx>,
235 analysis_name: &'static str,
236 body: Option<&hir::Body>,
240 bits_per_id: usize) -> DataFlowContext<'a, 'tcx, O> {
241 let usize_bits = mem::size_of::<usize>() * 8;
242 let words_per_id = (bits_per_id + usize_bits - 1) / usize_bits;
243 let num_nodes = cfg.graph.all_nodes().len();
245 debug!("DataFlowContext::new(analysis_name: {}, id_range={:?}, \
246 bits_per_id={}, words_per_id={}) \
248 analysis_name, id_range, bits_per_id, words_per_id,
251 let entry = if oper.initial_value() { usize::MAX } else {0};
253 let zeroes = vec![0; num_nodes * words_per_id];
254 let gens = zeroes.clone();
255 let kills1 = zeroes.clone();
257 let on_entry = vec![entry; num_nodes * words_per_id];
259 let nodeid_to_index = build_nodeid_to_index(body, cfg);
269 action_kills: kills1,
275 pub fn add_gen(&mut self, id: ast::NodeId, bit: usize) {
276 //! Indicates that `id` generates `bit`
277 debug!("{} add_gen(id={}, bit={})",
278 self.analysis_name, id, bit);
279 assert!(self.nodeid_to_index.contains_key(&id));
280 assert!(self.bits_per_id > 0);
282 let indices = get_cfg_indices(id, &self.nodeid_to_index);
283 for &cfgidx in indices {
284 let (start, end) = self.compute_id_range(cfgidx);
285 let gens = &mut self.gens[start.. end];
290 pub fn add_kill(&mut self, kind: KillFrom, id: ast::NodeId, bit: usize) {
291 //! Indicates that `id` kills `bit`
292 debug!("{} add_kill(id={}, bit={})",
293 self.analysis_name, id, bit);
294 assert!(self.nodeid_to_index.contains_key(&id));
295 assert!(self.bits_per_id > 0);
297 let indices = get_cfg_indices(id, &self.nodeid_to_index);
298 for &cfgidx in indices {
299 let (start, end) = self.compute_id_range(cfgidx);
300 let kills = match kind {
301 KillFrom::Execution => &mut self.action_kills[start.. end],
302 KillFrom::ScopeEnd => &mut self.scope_kills[start.. end],
308 fn apply_gen_kill(&self, cfgidx: CFGIndex, bits: &mut [usize]) {
309 //! Applies the gen and kill sets for `cfgidx` to `bits`
310 debug!("{} apply_gen_kill(cfgidx={:?}, bits={}) [before]",
311 self.analysis_name, cfgidx, mut_bits_to_string(bits));
312 assert!(self.bits_per_id > 0);
314 let (start, end) = self.compute_id_range(cfgidx);
315 let gens = &self.gens[start.. end];
316 bitwise(bits, gens, &Union);
317 let kills = &self.action_kills[start.. end];
318 bitwise(bits, kills, &Subtract);
319 let kills = &self.scope_kills[start.. end];
320 bitwise(bits, kills, &Subtract);
322 debug!("{} apply_gen_kill(cfgidx={:?}, bits={}) [after]",
323 self.analysis_name, cfgidx, mut_bits_to_string(bits));
326 fn compute_id_range(&self, cfgidx: CFGIndex) -> (usize, usize) {
327 let n = cfgidx.node_id();
328 let start = n * self.words_per_id;
329 let end = start + self.words_per_id;
331 assert!(start < self.gens.len());
332 assert!(end <= self.gens.len());
333 assert!(self.gens.len() == self.action_kills.len());
334 assert!(self.gens.len() == self.scope_kills.len());
335 assert!(self.gens.len() == self.on_entry.len());
341 pub fn each_bit_on_entry<F>(&self, id: ast::NodeId, mut f: F) -> bool where
342 F: FnMut(usize) -> bool,
344 //! Iterates through each bit that is set on entry to `id`.
345 //! Only useful after `propagate()` has been called.
346 if !self.has_bitset_for_nodeid(id) {
349 let indices = get_cfg_indices(id, &self.nodeid_to_index);
350 for &cfgidx in indices {
351 if !self.each_bit_for_node(EntryOrExit::Entry, cfgidx, |i| f(i)) {
358 pub fn each_bit_for_node<F>(&self, e: EntryOrExit, cfgidx: CFGIndex, f: F) -> bool where
359 F: FnMut(usize) -> bool,
361 //! Iterates through each bit that is set on entry/exit to `cfgidx`.
362 //! Only useful after `propagate()` has been called.
364 if self.bits_per_id == 0 {
365 // Skip the surprisingly common degenerate case. (Note
366 // compute_id_range requires self.words_per_id > 0.)
370 let (start, end) = self.compute_id_range(cfgidx);
371 let on_entry = &self.on_entry[start.. end];
373 let slice = match e {
374 EntryOrExit::Entry => on_entry,
375 EntryOrExit::Exit => {
376 let mut t = on_entry.to_vec();
377 self.apply_gen_kill(cfgidx, &mut t);
382 debug!("{} each_bit_for_node({:?}, cfgidx={:?}) bits={}",
383 self.analysis_name, e, cfgidx, bits_to_string(slice));
384 self.each_bit(slice, f)
387 pub fn each_gen_bit<F>(&self, id: ast::NodeId, mut f: F) -> bool where
388 F: FnMut(usize) -> bool,
390 //! Iterates through each bit in the gen set for `id`.
391 if !self.has_bitset_for_nodeid(id) {
395 if self.bits_per_id == 0 {
396 // Skip the surprisingly common degenerate case. (Note
397 // compute_id_range requires self.words_per_id > 0.)
401 let indices = get_cfg_indices(id, &self.nodeid_to_index);
402 for &cfgidx in indices {
403 let (start, end) = self.compute_id_range(cfgidx);
404 let gens = &self.gens[start.. end];
405 debug!("{} each_gen_bit(id={}, gens={})",
406 self.analysis_name, id, bits_to_string(gens));
407 if !self.each_bit(gens, |i| f(i)) {
414 fn each_bit<F>(&self, words: &[usize], mut f: F) -> bool where
415 F: FnMut(usize) -> bool,
417 //! Helper for iterating over the bits in a bit set.
418 //! Returns false on the first call to `f` that returns false;
419 //! if all calls to `f` return true, then returns true.
421 let usize_bits = mem::size_of::<usize>() * 8;
422 for (word_index, &word) in words.iter().enumerate() {
424 let base_index = word_index * usize_bits;
425 for offset in 0..usize_bits {
426 let bit = 1 << offset;
427 if (word & bit) != 0 {
428 // NB: we round up the total number of bits
429 // that we store in any given bit set so that
430 // it is an even multiple of usize::BITS. This
431 // means that there may be some stray bits at
432 // the end that do not correspond to any
433 // actual value. So before we callback, check
434 // whether the bit_index is greater than the
435 // actual value the user specified and stop
437 let bit_index = base_index + offset as usize;
438 if bit_index >= self.bits_per_id {
440 } else if !f(bit_index) {
450 pub fn add_kills_from_flow_exits(&mut self, cfg: &cfg::CFG) {
451 //! Whenever you have a `break` or `continue` statement, flow
452 //! exits through any number of enclosing scopes on its way to
453 //! the new destination. This function infers the kill bits of
454 //! those control operators based on the kill bits associated
455 //! with those scopes.
457 //! This is usually called (if it is called at all), after
458 //! all add_gen and add_kill calls, but before propagate.
460 debug!("{} add_kills_from_flow_exits", self.analysis_name);
461 if self.bits_per_id == 0 {
462 // Skip the surprisingly common degenerate case. (Note
463 // compute_id_range requires self.words_per_id > 0.)
466 cfg.graph.each_edge(|_edge_index, edge| {
467 let flow_exit = edge.source();
468 let (start, end) = self.compute_id_range(flow_exit);
469 let mut orig_kills = self.scope_kills[start.. end].to_vec();
471 let mut changed = false;
472 for &node_id in &edge.data.exiting_scopes {
473 let opt_cfg_idx = self.nodeid_to_index.get(&node_id);
476 for &cfg_idx in indices {
477 let (start, end) = self.compute_id_range(cfg_idx);
478 let kills = &self.scope_kills[start.. end];
479 if bitwise(&mut orig_kills, kills, &Union) {
480 debug!("scope exits: scope id={} \
481 (node={:?} of {:?}) added killset: {}",
482 node_id, cfg_idx, indices,
483 bits_to_string(kills));
489 debug!("{} add_kills_from_flow_exits flow_exit={:?} \
490 no cfg_idx for exiting_scope={}",
491 self.analysis_name, flow_exit, node_id);
497 let bits = &mut self.scope_kills[start.. end];
498 debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [before]",
499 self.analysis_name, flow_exit, mut_bits_to_string(bits));
500 bits.copy_from_slice(&orig_kills[..]);
501 debug!("{} add_kills_from_flow_exits flow_exit={:?} bits={} [after]",
502 self.analysis_name, flow_exit, mut_bits_to_string(bits));
509 impl<'a, 'tcx, O:DataFlowOperator+Clone+'static> DataFlowContext<'a, 'tcx, O> {
510 // ^^^^^^^^^^^^^ only needed for pretty printing
511 pub fn propagate(&mut self, cfg: &cfg::CFG, body: &hir::Body) {
512 //! Performs the data flow analysis.
514 if self.bits_per_id == 0 {
515 // Optimize the surprisingly common degenerate case.
520 let words_per_id = self.words_per_id;
521 let mut propcx = PropagationContext {
526 let mut temp = vec![0; words_per_id];
527 while propcx.changed {
528 propcx.changed = false;
529 propcx.reset(&mut temp);
530 propcx.walk_cfg(cfg, &mut temp);
534 debug!("Dataflow result for {}:", self.analysis_name);
535 debug!("{}", pprust::to_string(self, |s| {
536 s.cbox(pprust::indent_unit)?;
538 s.print_expr(&body.value)
543 impl<'a, 'b, 'tcx, O:DataFlowOperator> PropagationContext<'a, 'b, 'tcx, O> {
544 fn walk_cfg(&mut self,
546 in_out: &mut [usize]) {
547 debug!("DataFlowContext::walk_cfg(in_out={}) {}",
548 bits_to_string(in_out), self.dfcx.analysis_name);
549 assert!(self.dfcx.bits_per_id > 0);
551 cfg.graph.each_node(|node_index, node| {
552 debug!("DataFlowContext::walk_cfg idx={:?} id={} begin in_out={}",
553 node_index, node.data.id(), bits_to_string(in_out));
555 let (start, end) = self.dfcx.compute_id_range(node_index);
557 // Initialize local bitvector with state on-entry.
558 in_out.copy_from_slice(&self.dfcx.on_entry[start.. end]);
560 // Compute state on-exit by applying transfer function to
562 self.dfcx.apply_gen_kill(node_index, in_out);
564 // Propagate state on-exit from node into its successors.
565 self.propagate_bits_into_graph_successors_of(in_out, cfg, node_index);
566 true // continue to next node
570 fn reset(&mut self, bits: &mut [usize]) {
571 let e = if self.dfcx.oper.initial_value() {usize::MAX} else {0};
577 fn propagate_bits_into_graph_successors_of(&mut self,
581 for (_, edge) in cfg.graph.outgoing_edges(cfgidx) {
582 self.propagate_bits_into_entry_set_for(pred_bits, edge);
586 fn propagate_bits_into_entry_set_for(&mut self,
588 edge: &cfg::CFGEdge) {
589 let source = edge.source();
590 let cfgidx = edge.target();
591 debug!("{} propagate_bits_into_entry_set_for(pred_bits={}, {:?} to {:?})",
592 self.dfcx.analysis_name, bits_to_string(pred_bits), source, cfgidx);
593 assert!(self.dfcx.bits_per_id > 0);
595 let (start, end) = self.dfcx.compute_id_range(cfgidx);
597 // (scoping mutable borrow of self.dfcx.on_entry)
598 let on_entry = &mut self.dfcx.on_entry[start.. end];
599 bitwise(on_entry, pred_bits, &self.dfcx.oper)
602 debug!("{} changed entry set for {:?} to {}",
603 self.dfcx.analysis_name, cfgidx,
604 bits_to_string(&self.dfcx.on_entry[start.. end]));
610 fn mut_bits_to_string(words: &mut [usize]) -> String {
611 bits_to_string(words)
614 fn bits_to_string(words: &[usize]) -> String {
615 let mut result = String::new();
618 // Note: this is a little endian printout of bytes.
622 for _ in 0..mem::size_of::<usize>() {
624 result.push_str(&format!("{:02x}", v & 0xFF));
634 fn bitwise<Op:BitwiseOperator>(out_vec: &mut [usize],
637 assert_eq!(out_vec.len(), in_vec.len());
638 let mut changed = false;
639 for (out_elt, in_elt) in out_vec.iter_mut().zip(in_vec) {
640 let old_val = *out_elt;
641 let new_val = op.join(old_val, *in_elt);
643 changed |= old_val != new_val;
648 fn set_bit(words: &mut [usize], bit: usize) -> bool {
649 debug!("set_bit: words={} bit={}",
650 mut_bits_to_string(words), bit_str(bit));
651 let usize_bits = mem::size_of::<usize>() * 8;
652 let word = bit / usize_bits;
653 let bit_in_word = bit % usize_bits;
654 let bit_mask = 1 << bit_in_word;
655 debug!("word={} bit_in_word={} bit_mask={}", word, bit_in_word, bit_mask);
656 let oldv = words[word];
657 let newv = oldv | bit_mask;
662 fn bit_str(bit: usize) -> String {
664 let lobits = 1 << (bit & 0b111);
665 format!("[{}:{}-{:02x}]", bit, byte, lobits)
669 impl BitwiseOperator for Union {
670 fn join(&self, a: usize, b: usize) -> usize { a | b }
673 impl BitwiseOperator for Subtract {
674 fn join(&self, a: usize, b: usize) -> usize { a & !b }