1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use rustc::dep_graph::DepNode;
12 use rustc::hir::def_id::DefId;
13 use rustc::hir::svh::Svh;
14 use rustc::ich::Fingerprint;
15 use rustc::middle::cstore::EncodedMetadataHashes;
16 use rustc::session::Session;
17 use rustc::ty::TyCtxt;
18 use rustc_data_structures::fx::FxHashMap;
19 use rustc_data_structures::graph;
20 use rustc_data_structures::indexed_vec::IndexVec;
21 use rustc_serialize::Encodable as RustcEncodable;
22 use rustc_serialize::opaque::Encoder;
23 use std::io::{self, Cursor, Write};
24 use std::fs::{self, File};
25 use std::path::PathBuf;
27 use IncrementalHashesMap;
32 use super::dirty_clean;
33 use super::file_format;
34 use super::work_product;
36 pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
37 incremental_hashes_map: IncrementalHashesMap,
38 metadata_hashes: &EncodedMetadataHashes,
40 debug!("save_dep_graph()");
41 let _ignore = tcx.dep_graph.in_ignore();
43 if sess.opts.incremental.is_none() {
47 let query = tcx.dep_graph.query();
49 if tcx.sess.opts.debugging_opts.incremental_info {
50 eprintln!("incremental: {} nodes in dep-graph", query.graph.len_nodes());
51 eprintln!("incremental: {} edges in dep-graph", query.graph.len_edges());
54 let mut hcx = HashContext::new(tcx, &incremental_hashes_map);
55 let preds = Predecessors::new(&query, &mut hcx);
56 let mut current_metadata_hashes = FxHashMap();
58 // IMPORTANT: We are saving the metadata hashes *before* the dep-graph,
59 // since metadata-encoding might add new entries to the
60 // DefIdDirectory (which is saved in the dep-graph file).
61 if sess.opts.debugging_opts.incremental_cc ||
62 sess.opts.debugging_opts.query_dep_graph {
64 metadata_hash_export_path(sess),
65 |e| encode_metadata_hashes(tcx,
68 &mut current_metadata_hashes,
74 |e| encode_dep_graph(tcx, &preds, e));
76 let prev_metadata_hashes = incremental_hashes_map.prev_metadata_hashes.borrow();
77 dirty_clean::check_dirty_clean_metadata(tcx,
78 &*prev_metadata_hashes,
79 ¤t_metadata_hashes);
82 pub fn save_work_products(sess: &Session) {
83 if sess.opts.incremental.is_none() {
87 debug!("save_work_products()");
88 let _ignore = sess.dep_graph.in_ignore();
89 let path = work_products_path(sess);
90 save_in(sess, path, |e| encode_work_products(sess, e));
92 // We also need to clean out old work-products, as not all of them are
93 // deleted during invalidation. Some object files don't change their
94 // content, they are just not needed anymore.
95 let new_work_products = sess.dep_graph.work_products();
96 let previous_work_products = sess.dep_graph.previous_work_products();
98 for (id, wp) in previous_work_products.iter() {
99 if !new_work_products.contains_key(id) {
100 work_product::delete_workproduct_files(sess, wp);
101 debug_assert!(wp.saved_files.iter().all(|&(_, ref file_name)| {
102 !in_incr_comp_dir_sess(sess, file_name).exists()
107 // Check that we did not delete one of the current work-products:
109 new_work_products.iter()
110 .flat_map(|(_, wp)| wp.saved_files
112 .map(|&(_, ref name)| name))
113 .map(|name| in_incr_comp_dir_sess(sess, name))
114 .all(|path| path.exists())
118 fn save_in<F>(sess: &Session, path_buf: PathBuf, encode: F)
119 where F: FnOnce(&mut Encoder) -> io::Result<()>
121 debug!("save: storing data in {}", path_buf.display());
123 // delete the old dep-graph, if any
124 // Note: It's important that we actually delete the old file and not just
125 // truncate and overwrite it, since it might be a shared hard-link, the
126 // underlying data of which we don't want to modify
127 if path_buf.exists() {
128 match fs::remove_file(&path_buf) {
130 debug!("save: remove old file");
133 sess.err(&format!("unable to delete old dep-graph at `{}`: {}",
141 // generate the data in a memory buffer
142 let mut wr = Cursor::new(Vec::new());
143 file_format::write_file_header(&mut wr).unwrap();
144 match encode(&mut Encoder::new(&mut wr)) {
147 sess.err(&format!("could not encode dep-graph to `{}`: {}",
154 // write the data out
155 let data = wr.into_inner();
156 match File::create(&path_buf).and_then(|mut file| file.write_all(&data)) {
158 debug!("save: data written to disk successfully");
161 sess.err(&format!("failed to write dep-graph to `{}`: {}",
169 pub fn encode_dep_graph(tcx: TyCtxt,
170 preds: &Predecessors,
171 encoder: &mut Encoder)
173 // First encode the commandline arguments hash
174 tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
176 // NB: We rely on this Vec being indexable by reduced_graph's NodeIndex.
177 let mut nodes: IndexVec<DepNodeIndex, DepNode> = preds
181 .map(|node| node.data.clone())
184 let mut edge_list_indices = IndexVec::with_capacity(nodes.len());
185 let mut edge_list_data = Vec::with_capacity(preds.reduced_graph.len_edges());
187 for node_index in 0 .. nodes.len() {
188 let start = edge_list_data.len() as u32;
190 for target in preds.reduced_graph.successor_nodes(graph::NodeIndex(node_index)) {
191 edge_list_data.push(DepNodeIndex::new(target.node_id()));
194 let end = edge_list_data.len() as u32;
195 debug_assert_eq!(node_index, edge_list_indices.len());
196 edge_list_indices.push((start, end));
199 // Let's make sure we had no overflow there.
200 assert!(edge_list_data.len() <= ::std::u32::MAX as usize);
201 // Check that we have a consistent number of edges.
202 assert_eq!(edge_list_data.len(), preds.reduced_graph.len_edges());
204 let bootstrap_outputs = preds.bootstrap_outputs
206 .map(|dep_node| (**dep_node).clone())
209 // Next, build the map of content hashes. To this end, we need to transform
210 // the (DepNode -> Fingerprint) map that we have into a
211 // (DepNodeIndex -> Fingerprint) map. This may necessitate adding nodes back
212 // to the dep-graph that have been filtered out during reduction.
213 let content_hashes = {
214 // We have to build a (DepNode -> DepNodeIndex) map. We over-allocate a
215 // little because we expect some more nodes to be added.
216 let capacity = (nodes.len() * 120) / 100;
217 let mut node_to_index = FxHashMap::with_capacity_and_hasher(capacity,
219 // Add the nodes we already have in the graph.
220 node_to_index.extend(nodes.iter_enumerated()
221 .map(|(index, &node)| (node, index)));
223 let mut content_hashes = Vec::with_capacity(preds.hashes.len());
225 for (&&dep_node, &hash) in preds.hashes.iter() {
226 let dep_node_index = *node_to_index
229 // There is no DepNodeIndex for this DepNode yet. This
230 // happens when the DepNode got filtered out during graph
231 // reduction. Since we have a content hash for the DepNode,
232 // we add it back to the graph.
233 let next_index = nodes.len();
234 nodes.push(dep_node);
236 debug_assert_eq!(next_index, edge_list_indices.len());
237 // Push an empty list of edges
238 edge_list_indices.push((0,0));
240 DepNodeIndex::new(next_index)
243 content_hashes.push((dep_node_index, hash));
249 let graph = SerializedDepGraph {
254 hashes: content_hashes,
257 // Encode the graph data.
258 graph.encode(encoder)?;
260 if tcx.sess.opts.debugging_opts.incremental_info {
261 eprintln!("incremental: {} nodes in reduced dep-graph", graph.nodes.len());
262 eprintln!("incremental: {} edges in serialized dep-graph", graph.edge_list_data.len());
263 eprintln!("incremental: {} hashes in serialized dep-graph", graph.hashes.len());
266 if tcx.sess.opts.debugging_opts.incremental_dump_hash {
267 for (dep_node, hash) in &preds.hashes {
268 println!("ICH for {:?} is {}", dep_node, hash);
275 pub fn encode_metadata_hashes(tcx: TyCtxt,
277 metadata_hashes: &EncodedMetadataHashes,
278 current_metadata_hashes: &mut FxHashMap<DefId, Fingerprint>,
279 encoder: &mut Encoder)
281 assert_eq!(metadata_hashes.hashes.len(),
282 metadata_hashes.hashes.iter().map(|x| (x.def_index, ())).collect::<FxHashMap<_,_>>().len());
284 let mut serialized_hashes = SerializedMetadataHashes {
285 entry_hashes: metadata_hashes.hashes.to_vec(),
286 index_map: FxHashMap()
289 if tcx.sess.opts.debugging_opts.query_dep_graph {
290 for serialized_hash in &serialized_hashes.entry_hashes {
291 let def_id = DefId::local(serialized_hash.def_index);
293 // Store entry in the index_map
294 let def_path_hash = tcx.def_path_hash(def_id);
295 serialized_hashes.index_map.insert(def_id.index, def_path_hash);
297 // Record hash in current_metadata_hashes
298 current_metadata_hashes.insert(def_id, serialized_hash.hash);
301 debug!("save: stored index_map (len={}) for serialized hashes",
302 serialized_hashes.index_map.len());
305 // Encode everything.
306 svh.encode(encoder)?;
307 serialized_hashes.encode(encoder)?;
312 pub fn encode_work_products(sess: &Session, encoder: &mut Encoder) -> io::Result<()> {
313 let work_products: Vec<_> = sess.dep_graph
316 .map(|(id, work_product)| {
317 SerializedWorkProduct {
319 work_product: work_product.clone(),
324 work_products.encode(encoder)