// except according to those terms.
use std::fmt::Debug;
+use std::sync::Arc;
macro_rules! try_opt {
($e:expr) => (
// in an extern crate.
MetaData(D),
+ // Represents some artifact that we save to disk. Note that these
+ // do not have a def-id as part of their identifier.
+ WorkProduct(Arc<WorkProductId>),
+
// Represents different phases in the compiler.
CrateReader,
CollectLanguageItems,
TransCrate => Some(TransCrate),
TransWriteMetadata => Some(TransWriteMetadata),
LinkBinary => Some(LinkBinary),
+
+ // work product names do not need to be mapped, because
+ // they are always absolute.
+ WorkProduct(ref id) => Some(WorkProduct(id.clone())),
+
Hir(ref d) => op(d).map(Hir),
MetaData(ref d) => op(d).map(MetaData),
CollectItem(ref d) => op(d).map(CollectItem),
}
}
}
+
+/// A "work product" corresponds to a `.o` (or other) file that we
+/// save in between runs. These ids do not have a DefId but rather
+/// some independent path or string that persists between runs without
+/// the need to be mapped or unmapped. (This ensures we can serialize
+/// them even in the absence of a tcx.)
+#[derive(Clone, Debug, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
+pub enum WorkProductId {
+ PartitionObjectFile(String), // see (*TransPartition) below
+}
+
// except according to those terms.
use hir::def_id::DefId;
+use rustc_data_structures::fnv::FnvHashMap;
+use std::cell::{Ref, RefCell};
use std::rc::Rc;
+use std::sync::Arc;
-use super::dep_node::DepNode;
+use super::dep_node::{DepNode, WorkProductId};
use super::query::DepGraphQuery;
use super::raii;
use super::thread::{DepGraphThreadData, DepMessage};
#[derive(Clone)]
pub struct DepGraph {
- data: Rc<DepGraphThreadData>
+ data: Rc<DepGraphData>
+}
+
+struct DepGraphData {
+ /// we send messages to the thread to let it build up the dep-graph
+ /// from the current run
+ thread: DepGraphThreadData,
+
+ /// when we load, there may be `.o` files, cached mir, or other such
+ /// things available to us. If we find that they are not dirty, we
+ /// load the path to the file storing those work-products here into
+ /// this map. We can later look for and extract that data.
+ previous_work_products: RefCell<FnvHashMap<Arc<WorkProductId>, WorkProduct>>,
+
+ /// work-products that we generate in this run
+ work_products: RefCell<FnvHashMap<Arc<WorkProductId>, WorkProduct>>,
}
impl DepGraph {
pub fn new(enabled: bool) -> DepGraph {
DepGraph {
- data: Rc::new(DepGraphThreadData::new(enabled))
+ data: Rc::new(DepGraphData {
+ thread: DepGraphThreadData::new(enabled),
+ previous_work_products: RefCell::new(FnvHashMap()),
+ work_products: RefCell::new(FnvHashMap())
+ })
}
}
/// then the other methods on this `DepGraph` will have no net effect.
#[inline]
pub fn enabled(&self) -> bool {
- self.data.enabled()
+ self.data.thread.enabled()
}
pub fn query(&self) -> DepGraphQuery<DefId> {
- self.data.query()
+ self.data.thread.query()
}
pub fn in_ignore<'graph>(&'graph self) -> raii::IgnoreTask<'graph> {
- raii::IgnoreTask::new(&self.data)
+ raii::IgnoreTask::new(&self.data.thread)
}
pub fn in_task<'graph>(&'graph self, key: DepNode<DefId>) -> raii::DepTask<'graph> {
- raii::DepTask::new(&self.data, key)
+ raii::DepTask::new(&self.data.thread, key)
}
pub fn with_ignore<OP,R>(&self, op: OP) -> R
}
pub fn read(&self, v: DepNode<DefId>) {
- self.data.enqueue(DepMessage::Read(v));
+ self.data.thread.enqueue(DepMessage::Read(v));
}
pub fn write(&self, v: DepNode<DefId>) {
- self.data.enqueue(DepMessage::Write(v));
+ self.data.thread.enqueue(DepMessage::Write(v));
+ }
+
+ /// Indicates that a previous work product exists for `v`. This is
+ /// invoked during initial start-up based on what nodes are clean
+ /// (and what files exist in the incr. directory).
+ pub fn insert_previous_work_product(&self, v: &Arc<WorkProductId>, data: WorkProduct) {
+ debug!("insert_previous_work_product({:?}, {:?})", v, data);
+ self.data.previous_work_products.borrow_mut()
+ .insert(v.clone(), data);
+ }
+
+ /// Indicates that we created the given work-product in this run
+ /// for `v`. This record will be preserved and loaded in the next
+ /// run.
+ pub fn insert_work_product(&self, v: &Arc<WorkProductId>, data: WorkProduct) {
+ debug!("insert_work_product({:?}, {:?})", v, data);
+ self.data.work_products.borrow_mut()
+ .insert(v.clone(), data);
}
+
+ /// Check whether a previous work product exists for `v` and, if
+ /// so, return the path that leads to it. Used to skip doing work.
+ pub fn previous_work_product(&self, v: &Arc<WorkProductId>) -> Option<WorkProduct> {
+ self.data.previous_work_products.borrow()
+ .get(v)
+ .cloned()
+ }
+
+ /// Access the map of work-products created during this run. Only
+ /// used during saving of the dep-graph.
+ pub fn work_products(&self) -> Ref<FnvHashMap<Arc<WorkProductId>, WorkProduct>> {
+ self.data.work_products.borrow()
+ }
+}
+
+/// A "work product" is an intermediate result that we save into the
+/// incremental directory for later re-use. The primary example are
+/// the object files that we save for each partition at code
+/// generation time.
+///
+/// Each work product is associated with a dep-node, representing the
+/// process that produced the work-product. If that dep-node is found
+/// to be dirty when we load up, then we will delete the work-product
+/// at load time. If the work-product is found to be clean, the we
+/// will keep a record in the `previous_work_products` list.
+///
+/// In addition, work products have an associated hash. This hash is
+/// an extra hash that can be used to decide if the work-product from
+/// a previous compilation can be re-used (in addition to the dirty
+/// edges check).
+///
+/// As the primary example, consider the object files we generate for
+/// each partition. In the first run, we create partitions based on
+/// the symbols that need to be compiled. For each partition P, we
+/// hash the symbols in P and create a `WorkProduct` record associated
+/// with `DepNode::TransPartition(P)`; the hash is the set of symbols
+/// in P.
+///
+/// The next time we compile, if the `DepNode::TransPartition(P)` is
+/// judged to be clean (which means none of the things we read to
+/// generate the partition were found to be dirty), it will be loaded
+/// into previous work products. We will then regenerate the set of
+/// symbols in the partition P and hash them (note that new symbols
+/// may be added -- for example, new monomorphizations -- even if
+/// nothing in P changed!). We will compare that hash against the
+/// previous hash. If it matches up, we can reuse the object file.
+#[derive(Clone, Debug, RustcEncodable, RustcDecodable)]
+pub struct WorkProduct {
+ /// extra hash used to decide if work-product is still suitable;
+ /// note that this is *not* a hash of the work-product itself.
+ /// See documentation on `WorkProduct` type for an example.
+ pub input_hash: u64,
+
+ /// filename storing this work-product (found in the incr. comp. directory)
+ pub file_name: String,
}