]> git.lizzy.rs Git - rust.git/blob - src/librustc_incremental/persist/save.rs
remove implementation detail from doc
[rust.git] / src / librustc_incremental / persist / save.rs
1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
4 //
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
10
11 use rustc::dep_graph::{DepGraph, DepKind};
12 use rustc::session::Session;
13 use rustc::ty::TyCtxt;
14 use rustc::util::common::time;
15 use rustc_data_structures::fx::FxHashMap;
16 use rustc_serialize::Encodable as RustcEncodable;
17 use rustc_serialize::opaque::Encoder;
18 use std::io::{self, Cursor, Write};
19 use std::fs::{self, File};
20 use std::path::PathBuf;
21
22 use super::data::*;
23 use super::fs::*;
24 use super::dirty_clean;
25 use super::file_format;
26 use super::work_product;
27
28 pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>) {
29     debug!("save_dep_graph()");
30     let _ignore = tcx.dep_graph.in_ignore();
31     let sess = tcx.sess;
32     if sess.opts.incremental.is_none() {
33         return;
34     }
35
36     time(sess.time_passes(), "persist query result cache", || {
37         save_in(sess,
38                 query_cache_path(sess),
39                 |e| encode_query_cache(tcx, e));
40     });
41
42     if tcx.sess.opts.debugging_opts.incremental_queries {
43         time(sess.time_passes(), "persist dep-graph", || {
44             save_in(sess,
45                     dep_graph_path(sess),
46                     |e| encode_dep_graph(tcx, e));
47         });
48     }
49
50     dirty_clean::check_dirty_clean_annotations(tcx);
51 }
52
53 pub fn save_work_products(sess: &Session, dep_graph: &DepGraph) {
54     if sess.opts.incremental.is_none() {
55         return;
56     }
57
58     debug!("save_work_products()");
59     let _ignore = dep_graph.in_ignore();
60     let path = work_products_path(sess);
61     save_in(sess, path, |e| encode_work_products(dep_graph, e));
62
63     // We also need to clean out old work-products, as not all of them are
64     // deleted during invalidation. Some object files don't change their
65     // content, they are just not needed anymore.
66     let new_work_products = dep_graph.work_products();
67     let previous_work_products = dep_graph.previous_work_products();
68
69     for (id, wp) in previous_work_products.iter() {
70         if !new_work_products.contains_key(id) {
71             work_product::delete_workproduct_files(sess, wp);
72             debug_assert!(wp.saved_files.iter().all(|&(_, ref file_name)| {
73                 !in_incr_comp_dir_sess(sess, file_name).exists()
74             }));
75         }
76     }
77
78     // Check that we did not delete one of the current work-products:
79     debug_assert!({
80         new_work_products.iter()
81                          .flat_map(|(_, wp)| wp.saved_files
82                                                .iter()
83                                                .map(|&(_, ref name)| name))
84                          .map(|name| in_incr_comp_dir_sess(sess, name))
85                          .all(|path| path.exists())
86     });
87 }
88
89 fn save_in<F>(sess: &Session, path_buf: PathBuf, encode: F)
90     where F: FnOnce(&mut Encoder) -> io::Result<()>
91 {
92     debug!("save: storing data in {}", path_buf.display());
93
94     // delete the old dep-graph, if any
95     // Note: It's important that we actually delete the old file and not just
96     // truncate and overwrite it, since it might be a shared hard-link, the
97     // underlying data of which we don't want to modify
98     if path_buf.exists() {
99         match fs::remove_file(&path_buf) {
100             Ok(()) => {
101                 debug!("save: remove old file");
102             }
103             Err(err) => {
104                 sess.err(&format!("unable to delete old dep-graph at `{}`: {}",
105                                   path_buf.display(),
106                                   err));
107                 return;
108             }
109         }
110     }
111
112     // generate the data in a memory buffer
113     let mut wr = Cursor::new(Vec::new());
114     file_format::write_file_header(&mut wr).unwrap();
115     match encode(&mut Encoder::new(&mut wr)) {
116         Ok(()) => {}
117         Err(err) => {
118             sess.err(&format!("could not encode dep-graph to `{}`: {}",
119                               path_buf.display(),
120                               err));
121             return;
122         }
123     }
124
125     // write the data out
126     let data = wr.into_inner();
127     match File::create(&path_buf).and_then(|mut file| file.write_all(&data)) {
128         Ok(_) => {
129             debug!("save: data written to disk successfully");
130         }
131         Err(err) => {
132             sess.err(&format!("failed to write dep-graph to `{}`: {}",
133                               path_buf.display(),
134                               err));
135             return;
136         }
137     }
138 }
139
140 fn encode_dep_graph(tcx: TyCtxt,
141                     encoder: &mut Encoder)
142                     -> io::Result<()> {
143     // First encode the commandline arguments hash
144     tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
145
146     // Encode the graph data.
147     let serialized_graph = tcx.dep_graph.serialize();
148
149     if tcx.sess.opts.debugging_opts.incremental_info {
150         #[derive(Clone)]
151         struct Stat {
152             kind: DepKind,
153             node_counter: u64,
154             edge_counter: u64,
155         }
156
157         let total_node_count = serialized_graph.nodes.len();
158         let total_edge_count = serialized_graph.edge_list_data.len();
159         let (total_edge_reads, total_duplicate_edge_reads) =
160             tcx.dep_graph.edge_deduplication_data();
161
162         let mut counts: FxHashMap<_, Stat> = FxHashMap();
163
164         for (i, &(node, _)) in serialized_graph.nodes.iter_enumerated() {
165             let stat = counts.entry(node.kind).or_insert(Stat {
166                 kind: node.kind,
167                 node_counter: 0,
168                 edge_counter: 0,
169             });
170
171             stat.node_counter += 1;
172             let (edge_start, edge_end) = serialized_graph.edge_list_indices[i];
173             stat.edge_counter += (edge_end - edge_start) as u64;
174         }
175
176         let mut counts: Vec<_> = counts.values().cloned().collect();
177         counts.sort_by_key(|s| -(s.node_counter as i64));
178
179         let percentage_of_all_nodes: Vec<f64> = counts.iter().map(|s| {
180             (100.0 * (s.node_counter as f64)) / (total_node_count as f64)
181         }).collect();
182
183         let average_edges_per_kind: Vec<f64> = counts.iter().map(|s| {
184             (s.edge_counter as f64) / (s.node_counter as f64)
185         }).collect();
186
187         println!("[incremental]");
188         println!("[incremental] DepGraph Statistics");
189
190         const SEPARATOR: &str = "[incremental] --------------------------------\
191                                  ----------------------------------------------\
192                                  ------------";
193
194         println!("{}", SEPARATOR);
195         println!("[incremental]");
196         println!("[incremental] Total Node Count: {}", total_node_count);
197         println!("[incremental] Total Edge Count: {}", total_edge_count);
198         println!("[incremental] Total Edge Reads: {}", total_edge_reads);
199         println!("[incremental] Total Duplicate Edge Reads: {}", total_duplicate_edge_reads);
200         println!("[incremental]");
201         println!("[incremental]  {:<36}| {:<17}| {:<12}| {:<17}|",
202                  "Node Kind",
203                  "Node Frequency",
204                  "Node Count",
205                  "Avg. Edge Count");
206         println!("[incremental] -------------------------------------\
207                   |------------------\
208                   |-------------\
209                   |------------------|");
210
211         for (i, stat) in counts.iter().enumerate() {
212             println!("[incremental]  {:<36}|{:>16.1}% |{:>12} |{:>17.1} |",
213                 format!("{:?}", stat.kind),
214                 percentage_of_all_nodes[i],
215                 stat.node_counter,
216                 average_edges_per_kind[i]);
217         }
218
219         println!("{}", SEPARATOR);
220         println!("[incremental]");
221     }
222
223     serialized_graph.encode(encoder)?;
224
225     Ok(())
226 }
227
228 fn encode_work_products(dep_graph: &DepGraph,
229                         encoder: &mut Encoder) -> io::Result<()> {
230     let work_products: Vec<_> = dep_graph
231         .work_products()
232         .iter()
233         .map(|(id, work_product)| {
234             SerializedWorkProduct {
235                 id: id.clone(),
236                 work_product: work_product.clone(),
237             }
238         })
239         .collect();
240
241     work_products.encode(encoder)
242 }
243
244 fn encode_query_cache(tcx: TyCtxt,
245                       encoder: &mut Encoder)
246                       -> io::Result<()> {
247     tcx.serialize_query_result_cache(encoder)
248 }