1 // Copyright 2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 use rustc::dep_graph::DepNode;
12 use rustc::hir::def_id::DefId;
13 use rustc::hir::svh::Svh;
14 use rustc::ich::Fingerprint;
15 use rustc::session::Session;
16 use rustc::ty::TyCtxt;
17 use rustc_data_structures::fx::FxHashMap;
18 use rustc_data_structures::graph::{NodeIndex, INCOMING};
19 use rustc_serialize::Encodable as RustcEncodable;
20 use rustc_serialize::opaque::Encoder;
22 use std::io::{self, Cursor, Write};
23 use std::fs::{self, File};
24 use std::path::PathBuf;
26 use IncrementalHashesMap;
28 use super::directory::*;
32 use super::dirty_clean;
33 use super::file_format;
34 use super::work_product;
35 use calculate_svh::IchHasher;
37 pub fn save_dep_graph<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
38 incremental_hashes_map: &IncrementalHashesMap,
40 debug!("save_dep_graph()");
41 let _ignore = tcx.dep_graph.in_ignore();
43 if sess.opts.incremental.is_none() {
47 let mut builder = DefIdDirectoryBuilder::new(tcx);
48 let query = tcx.dep_graph.query();
50 if tcx.sess.opts.debugging_opts.incremental_info {
51 println!("incremental: {} nodes in dep-graph", query.graph.len_nodes());
52 println!("incremental: {} edges in dep-graph", query.graph.len_edges());
55 let mut hcx = HashContext::new(tcx, incremental_hashes_map);
56 let preds = Predecessors::new(&query, &mut hcx);
57 let mut current_metadata_hashes = FxHashMap();
59 if sess.opts.debugging_opts.incremental_cc ||
60 sess.opts.debugging_opts.query_dep_graph {
61 // IMPORTANT: We are saving the metadata hashes *before* the dep-graph,
62 // since metadata-encoding might add new entries to the
63 // DefIdDirectory (which is saved in the dep-graph file).
65 metadata_hash_export_path(sess),
66 |e| encode_metadata_hashes(tcx,
70 &mut current_metadata_hashes,
76 |e| encode_dep_graph(&preds, &mut builder, e));
78 let prev_metadata_hashes = incremental_hashes_map.prev_metadata_hashes.borrow();
79 dirty_clean::check_dirty_clean_metadata(tcx,
80 &*prev_metadata_hashes,
81 ¤t_metadata_hashes);
84 pub fn save_work_products(sess: &Session) {
85 if sess.opts.incremental.is_none() {
89 debug!("save_work_products()");
90 let _ignore = sess.dep_graph.in_ignore();
91 let path = work_products_path(sess);
92 save_in(sess, path, |e| encode_work_products(sess, e));
94 // We also need to clean out old work-products, as not all of them are
95 // deleted during invalidation. Some object files don't change their
96 // content, they are just not needed anymore.
97 let new_work_products = sess.dep_graph.work_products();
98 let previous_work_products = sess.dep_graph.previous_work_products();
100 for (id, wp) in previous_work_products.iter() {
101 if !new_work_products.contains_key(id) {
102 work_product::delete_workproduct_files(sess, wp);
103 debug_assert!(wp.saved_files.iter().all(|&(_, ref file_name)| {
104 !in_incr_comp_dir_sess(sess, file_name).exists()
109 // Check that we did not delete one of the current work-products:
111 new_work_products.iter()
112 .flat_map(|(_, wp)| wp.saved_files
114 .map(|&(_, ref name)| name))
115 .map(|name| in_incr_comp_dir_sess(sess, name))
116 .all(|path| path.exists())
120 fn save_in<F>(sess: &Session, path_buf: PathBuf, encode: F)
121 where F: FnOnce(&mut Encoder) -> io::Result<()>
123 debug!("save: storing data in {}", path_buf.display());
125 // delete the old dep-graph, if any
126 // Note: It's important that we actually delete the old file and not just
127 // truncate and overwrite it, since it might be a shared hard-link, the
128 // underlying data of which we don't want to modify
129 if path_buf.exists() {
130 match fs::remove_file(&path_buf) {
132 debug!("save: remove old file");
135 sess.err(&format!("unable to delete old dep-graph at `{}`: {}",
143 // generate the data in a memory buffer
144 let mut wr = Cursor::new(Vec::new());
145 file_format::write_file_header(&mut wr).unwrap();
146 match encode(&mut Encoder::new(&mut wr)) {
149 sess.err(&format!("could not encode dep-graph to `{}`: {}",
156 // write the data out
157 let data = wr.into_inner();
158 match File::create(&path_buf).and_then(|mut file| file.write_all(&data)) {
160 debug!("save: data written to disk successfully");
163 sess.err(&format!("failed to write dep-graph to `{}`: {}",
171 pub fn encode_dep_graph(preds: &Predecessors,
172 builder: &mut DefIdDirectoryBuilder,
173 encoder: &mut Encoder)
175 // First encode the commandline arguments hash
176 let tcx = builder.tcx();
177 tcx.sess.opts.dep_tracking_hash().encode(encoder)?;
179 // Create a flat list of (Input, WorkProduct) edges for
181 let mut edges = FxHashMap();
182 for edge in preds.reduced_graph.all_edges() {
183 let source = *preds.reduced_graph.node_data(edge.source());
184 let target = *preds.reduced_graph.node_data(edge.target());
186 DepNode::MetaData(ref def_id) => {
187 // Metadata *targets* are always local metadata nodes. We have
188 // already handled those in `encode_metadata_hashes`.
189 assert!(def_id.is_local());
194 debug!("serialize edge: {:?} -> {:?}", source, target);
195 let source = builder.map(source);
196 let target = builder.map(target);
197 edges.entry(source).or_insert(vec![]).push(target);
200 if tcx.sess.opts.debugging_opts.incremental_dump_hash {
201 for (dep_node, hash) in &preds.hashes {
202 println!("HIR hash for {:?} is {}", dep_node, hash);
206 // Create the serialized dep-graph.
207 let bootstrap_outputs = preds.bootstrap_outputs.iter()
208 .map(|n| builder.map(n))
210 let edges = edges.into_iter()
211 .map(|(k, v)| SerializedEdgeSet { source: k, targets: v })
213 let graph = SerializedDepGraph {
218 .map(|(&dep_node, &hash)| {
220 dep_node: builder.map(dep_node),
227 if tcx.sess.opts.debugging_opts.incremental_info {
228 println!("incremental: {} nodes in reduced dep-graph", preds.reduced_graph.len_nodes());
229 println!("incremental: {} edges in serialized dep-graph", graph.edges.len());
230 println!("incremental: {} hashes in serialized dep-graph", graph.hashes.len());
233 debug!("graph = {:#?}", graph);
235 // Encode the directory and then the graph data.
236 builder.directory().encode(encoder)?;
237 graph.encode(encoder)?;
242 pub fn encode_metadata_hashes(tcx: TyCtxt,
244 preds: &Predecessors,
245 builder: &mut DefIdDirectoryBuilder,
246 current_metadata_hashes: &mut FxHashMap<DefId, Fingerprint>,
247 encoder: &mut Encoder)
249 // For each `MetaData(X)` node where `X` is local, accumulate a
250 // hash. These are the metadata items we export. Downstream
251 // crates will want to see a hash that tells them whether we might
252 // have changed the metadata for a given item since they last
255 // (I initially wrote this with an iterator, but it seemed harder to read.)
256 let mut serialized_hashes = SerializedMetadataHashes {
258 index_map: FxHashMap()
261 for (index, target) in preds.reduced_graph.all_nodes().iter().enumerate() {
262 let index = NodeIndex(index);
263 let def_id = match *target.data {
264 DepNode::MetaData(def_id) if def_id.is_local() => def_id,
268 // To create the hash for each item `X`, we don't hash the raw
269 // bytes of the metadata (though in principle we
270 // could). Instead, we walk the predecessors of `MetaData(X)`
271 // from the dep-graph. This corresponds to all the inputs that
272 // were read to construct the metadata. To create the hash for
273 // the metadata, we hash (the hash of) all of those inputs.
274 debug!("save: computing metadata hash for {:?}", def_id);
276 // Create a vector containing a pair of (source-id, hash).
277 // The source-id is stored as a `DepNode<u64>`, where the u64
278 // is the det. hash of the def-path. This is convenient
279 // because we can sort this to get a stable ordering across
280 // compilations, even if the def-ids themselves have changed.
281 let mut hashes: Vec<(DepNode<u64>, Fingerprint)> =
283 .depth_traverse(index, INCOMING)
284 .map(|index| preds.reduced_graph.node_data(index))
285 .filter(|dep_node| HashContext::is_hashable(dep_node))
287 let hash_dep_node = dep_node.map_def(|&def_id| Some(tcx.def_path_hash(def_id)))
289 let hash = preds.hashes[dep_node];
290 (hash_dep_node, hash)
295 let mut state = IchHasher::new();
296 hashes.hash(&mut state);
297 let hash = state.finish();
299 debug!("save: metadata hash for {:?} is {}", def_id, hash);
301 if tcx.sess.opts.debugging_opts.incremental_dump_hash {
302 println!("metadata hash for {:?} is {}", def_id, hash);
303 for pred_index in preds.reduced_graph.depth_traverse(index, INCOMING) {
304 let dep_node = preds.reduced_graph.node_data(pred_index);
305 if HashContext::is_hashable(&dep_node) {
306 println!("metadata hash for {:?} depends on {:?} with hash {}",
307 def_id, dep_node, preds.hashes[dep_node]);
312 serialized_hashes.hashes.push(SerializedMetadataHash {
313 def_index: def_id.index,
318 if tcx.sess.opts.debugging_opts.query_dep_graph {
319 for serialized_hash in &serialized_hashes.hashes {
320 let def_id = DefId::local(serialized_hash.def_index);
322 // Store entry in the index_map
323 let def_path_index = builder.add(def_id);
324 serialized_hashes.index_map.insert(def_id.index, def_path_index);
326 // Record hash in current_metadata_hashes
327 current_metadata_hashes.insert(def_id, serialized_hash.hash);
330 debug!("save: stored index_map (len={}) for serialized hashes",
331 serialized_hashes.index_map.len());
334 // Encode everything.
335 svh.encode(encoder)?;
336 serialized_hashes.encode(encoder)?;
341 pub fn encode_work_products(sess: &Session, encoder: &mut Encoder) -> io::Result<()> {
342 let work_products: Vec<_> = sess.dep_graph
345 .map(|(id, work_product)| {
346 SerializedWorkProduct {
348 work_product: work_product.clone(),
353 work_products.encode(encoder)