"toml-query 0.7.0 (registry+https://github.com/rust-lang/crates.io-index)",
]
+[[package]]
+name = "measureme"
+version = "0.2.1"
+source = "registry+https://github.com/rust-lang/crates.io-index"
+dependencies = [
+ "byteorder 1.2.7 (registry+https://github.com/rust-lang/crates.io-index)",
+ "memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
+ "rustc-hash 1.0.1 (registry+https://github.com/rust-lang/crates.io-index)",
+]
+
[[package]]
name = "memchr"
version = "2.1.1"
"jobserver 0.1.13 (registry+https://github.com/rust-lang/crates.io-index)",
"lazy_static 1.2.0 (registry+https://github.com/rust-lang/crates.io-index)",
"log 0.4.6 (registry+https://github.com/rust-lang/crates.io-index)",
+ "measureme 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)",
"num_cpus 1.8.0 (registry+https://github.com/rust-lang/crates.io-index)",
"parking_lot 0.7.1 (registry+https://github.com/rust-lang/crates.io-index)",
"polonius-engine 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)",
"checksum matches 0.1.8 (registry+https://github.com/rust-lang/crates.io-index)" = "7ffc5c5338469d4d3ea17d269fa8ea3512ad247247c30bd2df69e68309ed0a08"
"checksum mdbook 0.1.7 (registry+https://github.com/rust-lang/crates.io-index)" = "90b5a8d7e341ceee5db3882a06078d42661ddcfa2b3687319cc5da76ec4e782f"
"checksum mdbook 0.2.3 (registry+https://github.com/rust-lang/crates.io-index)" = "0ba0d44cb4089c741b9a91f3e5218298a40699c2f3a070a85014eed290c60819"
+"checksum measureme 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "36bb2b263a6795d352035024d6b30ce465bb79a5e5280d74c3b5f8464c657bcc"
"checksum memchr 2.1.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0a3eb002f0535929f1199681417029ebea04aadc0c7a4224b46be99c7f5d6a16"
"checksum memmap 0.6.2 (registry+https://github.com/rust-lang/crates.io-index)" = "e2ffa2c986de11a9df78620c01eeaaf27d94d3ff02bf81bfcca953102dd0c6ff"
"checksum memoffset 0.2.1 (registry+https://github.com/rust-lang/crates.io-index)" = "0f9dc261e2b62d7a622bf416ea3c5245cdd5d9a7fcc428c0d06804dfce1775b3"
chalk-engine = { version = "0.9.0", default-features=false }
rustc_fs_util = { path = "../librustc_fs_util" }
smallvec = { version = "0.6.7", features = ["union", "may_dangle"] }
+measureme = "0.2.1"
# Note that these dependencies are a lie, they're just here to get linkage to
# work.
use std::time::Duration;
use std::sync::{Arc, mpsc};
-use parking_lot::Mutex as PlMutex;
-
mod code_stats;
pub mod config;
pub mod filesearch;
pub profile_channel: Lock<Option<mpsc::Sender<ProfileQueriesMsg>>>,
/// Used by -Z self-profile
- pub self_profiling: Option<Arc<PlMutex<SelfProfiler>>>,
+ pub self_profiling: Option<Arc<SelfProfiler>>,
/// Some measurements that are being gathered during compilation.
pub perf_stats: PerfStats,
#[inline(never)]
#[cold]
- fn profiler_active<F: FnOnce(&mut SelfProfiler) -> ()>(&self, f: F) {
+ fn profiler_active<F: FnOnce(&SelfProfiler) -> ()>(&self, f: F) {
match &self.self_profiling {
None => bug!("profiler_active() called but there was no profiler active"),
Some(profiler) => {
- let mut p = profiler.lock();
-
- f(&mut p);
+ f(&profiler);
}
}
}
#[inline(always)]
- pub fn profiler<F: FnOnce(&mut SelfProfiler) -> ()>(&self, f: F) {
+ pub fn profiler<F: FnOnce(&SelfProfiler) -> ()>(&self, f: F) {
if unlikely!(self.self_profiling.is_some()) {
self.profiler_active(f)
}
driver_lint_caps: FxHashMap<lint::LintId, lint::Level>,
) -> Session {
let self_profiler =
- if sopts.debugging_opts.self_profile { Some(Arc::new(PlMutex::new(SelfProfiler::new()))) }
+ if sopts.debugging_opts.self_profile {
+ let profiler = SelfProfiler::new();
+ match profiler {
+ Ok(profiler) => {
+ crate::ty::query::QueryName::register_with_profiler(&profiler);
+ Some(Arc::new(profiler))
+ },
+ Err(e) => {
+ early_warn(sopts.error_format, &format!("failed to create profiler: {}", e));
+ None
+ }
+ }
+ }
else { None };
let host_triple = TargetTriple::from_triple(config::host_triple());
use crate::hir::def_id::{CrateNum, DefId};
use crate::ty::TyCtxt;
use crate::ty::query::queries;
-use crate::ty::query::Query;
+use crate::ty::query::{Query, QueryName};
use crate::ty::query::QueryCache;
use crate::ty::query::plumbing::CycleError;
use crate::util::profiling::ProfileCategory;
// Query configuration and description traits.
pub trait QueryConfig<'tcx> {
- const NAME: &'static str;
+ const NAME: QueryName;
const CATEGORY: ProfileCategory;
type Key: Eq + Hash + Clone + Debug;
use crate::util::nodemap::{DefIdSet, DefIdMap, ItemLocalSet};
use crate::util::common::{ErrorReported};
use crate::util::profiling::ProfileCategory::*;
-use crate::session::Session;
use rustc_data_structures::svh::Svh;
use rustc_data_structures::bit_set::BitSet;
let mut lock = cache.borrow_mut();
if let Some(value) = lock.results.get(key) {
profq_msg!(tcx, ProfileQueriesMsg::CacheHit);
- tcx.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY));
+ tcx.sess.profiler(|p| p.record_query_hit(Q::NAME));
let result = (value.value.clone(), value.index);
#[cfg(debug_assertions)]
{
//in another thread has completed. Record how long we wait in the
//self-profiler
#[cfg(parallel_compiler)]
- tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME, Q::CATEGORY));
+ tcx.sess.profiler(|p| p.query_blocked_start(Q::NAME));
job.clone()
},
#[cfg(parallel_compiler)]
{
let result = job.r#await(tcx, span);
- tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME, Q::CATEGORY));
+ tcx.sess.profiler(|p| p.query_blocked_end(Q::NAME));
if let Err(cycle) = result {
return TryGetJob::Cycle(Q::handle_cycle_error(tcx, cycle));
key: Q::Key)
-> Q::Value {
debug!("ty::query::get_query<{}>(key={:?}, span={:?})",
- Q::NAME,
+ Q::NAME.as_str(),
key,
span);
profq_msg!(self,
ProfileQueriesMsg::QueryBegin(
span.data(),
- profq_query_msg!(Q::NAME, self, key),
+ profq_query_msg!(Q::NAME.as_str(), self, key),
)
);
if dep_node.kind.is_anon() {
profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
- self.sess.profiler(|p| p.start_query(Q::NAME, Q::CATEGORY));
+ self.sess.profiler(|p| p.start_query(Q::NAME));
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
self.start_query(job.job.clone(), diagnostics, |tcx| {
})
});
- self.sess.profiler(|p| p.end_query(Q::NAME, Q::CATEGORY));
+ self.sess.profiler(|p| p.end_query(Q::NAME));
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
self.dep_graph.read_index(dep_node_index);
let result = if let Some(result) = result {
profq_msg!(self, ProfileQueriesMsg::CacheHit);
- self.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY));
+ self.sess.profiler(|p| p.record_query_hit(Q::NAME));
result
} else {
// We could not load a result from the on-disk cache, so
// recompute.
- self.sess.profiler(|p| p.start_query(Q::NAME, Q::CATEGORY));
+ self.sess.profiler(|p| p.start_query(Q::NAME));
// The dep-graph for this computation is already in
// place
Q::compute(self, key)
});
- self.sess.profiler(|p| p.end_query(Q::NAME, Q::CATEGORY));
+ self.sess.profiler(|p| p.end_query(Q::NAME));
result
};
key, dep_node);
profq_msg!(self, ProfileQueriesMsg::ProviderBegin);
- self.sess.profiler(|p| p.start_query(Q::NAME, Q::CATEGORY));
+ self.sess.profiler(|p| p.start_query(Q::NAME));
let ((result, dep_node_index), diagnostics) = with_diagnostics(|diagnostics| {
self.start_query(job.job.clone(), diagnostics, |tcx| {
})
});
- self.sess.profiler(|p| p.end_query(Q::NAME, Q::CATEGORY));
+ self.sess.profiler(|p| p.end_query(Q::NAME));
profq_msg!(self, ProfileQueriesMsg::ProviderEnd);
if unlikely!(self.sess.opts.debugging_opts.query_dep_graph) {
let _ = self.get_query::<Q>(DUMMY_SP, key);
} else {
profq_msg!(self, ProfileQueriesMsg::CacheHit);
- self.sess.profiler(|p| p.record_query_hit(Q::NAME, Q::CATEGORY));
+ self.sess.profiler(|p| p.record_query_hit(Q::NAME));
}
}
) {
profq_msg!(
self,
- ProfileQueriesMsg::QueryBegin(span.data(), profq_query_msg!(Q::NAME, self, key))
+ ProfileQueriesMsg::QueryBegin(span.data(),
+ profq_query_msg!(Q::NAME.as_str(), self, key))
);
// We may be concurrently trying both execute and force a query
}
}
- pub fn record_computed_queries(&self, sess: &Session) {
- sess.profiler(|p| {
- $(
- p.record_computed_queries(
- <queries::$name<'_> as QueryConfig<'_>>::NAME,
- <queries::$name<'_> as QueryConfig<'_>>::CATEGORY,
- self.$name.lock().results.len()
- );
- )*
- });
- }
-
#[cfg(parallel_compiler)]
pub fn collect_active_jobs(&self) -> Vec<Lrc<QueryJob<$tcx>>> {
let mut jobs = Vec::new();
}
}
+ #[allow(nonstandard_style)]
+ #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
+ pub enum QueryName {
+ $($name),*
+ }
+
+ impl QueryName {
+ pub fn register_with_profiler(profiler: &crate::util::profiling::SelfProfiler) {
+ $(profiler.register_query_name(QueryName::$name);)*
+ }
+
+ pub fn as_str(&self) -> &'static str {
+ match self {
+ $(QueryName::$name => stringify!($name),)*
+ }
+ }
+ }
+
#[allow(nonstandard_style)]
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum Query<$tcx> {
$(Query::$name(key) => key.default_span(tcx),)*
}
}
+
+ pub fn query_name(&self) -> QueryName {
+ match self {
+ $(Query::$name(_) => QueryName::$name,)*
+ }
+ }
}
impl<'a, $tcx> HashStable<StableHashingContext<'a>> for Query<$tcx> {
type Key = $K;
type Value = $V;
- const NAME: &'static str = stringify!($name);
+ const NAME: QueryName = QueryName::$name;
const CATEGORY: ProfileCategory = $category;
}
use std::borrow::Cow;
-use std::fs;
-use std::io::{BufWriter, Write};
-use std::mem;
+use std::error::Error;
+use std::mem::{self, Discriminant};
use std::process;
use std::thread::ThreadId;
-use std::time::{Duration, Instant, SystemTime};
+use std::u32;
-use crate::session::config::Options;
+use crate::ty::query::QueryName;
-use rustc_data_structures::fx::FxHashMap;
+use measureme::{StringId, TimestampKind};
+
+/// MmapSerializatioSink is faster on macOS and Linux
+/// but FileSerializationSink is faster on Windows
+#[cfg(not(windows))]
+type Profiler = measureme::Profiler<measureme::MmapSerializationSink>;
+#[cfg(windows)]
+type Profiler = measureme::Profiler<measureme::FileSerializationSink>;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
pub enum ProfileCategory {
QueryBlockedEnd { query_name: &'static str, category: ProfileCategory, time: u64 },
}
-impl ProfilerEvent {
- fn timestamp(&self) -> u64 {
- use self::ProfilerEvent::*;
-
- match self {
- QueryStart { time, .. } |
- QueryEnd { time, .. } |
- GenericActivityStart { time, .. } |
- GenericActivityEnd { time, .. } |
- QueryCacheHit { time, .. } |
- QueryCount { time, .. } |
- IncrementalLoadResultStart { time, .. } |
- IncrementalLoadResultEnd { time, .. } |
- QueryBlockedStart { time, .. } |
- QueryBlockedEnd { time, .. } => *time
- }
- }
-}
-
fn thread_id_to_u64(tid: ThreadId) -> u64 {
unsafe { mem::transmute::<ThreadId, u64>(tid) }
}
pub struct SelfProfiler {
- events: FxHashMap<ThreadId, Vec<ProfilerEvent>>,
- start_time: SystemTime,
- start_instant: Instant,
+ profiler: Profiler,
+ query_event_kind: StringId,
+ generic_activity_event_kind: StringId,
+ incremental_load_result_event_kind: StringId,
+ query_blocked_event_kind: StringId,
+ query_cache_hit_event_kind: StringId,
}
impl SelfProfiler {
- pub fn new() -> SelfProfiler {
- let profiler = SelfProfiler {
- events: Default::default(),
- start_time: SystemTime::now(),
- start_instant: Instant::now(),
+ pub fn new() -> Result<SelfProfiler, Box<dyn Error>> {
+ let filename = format!("pid-{}.rustc_profile", process::id());
+ let path = std::path::Path::new(&filename);
+ let profiler = Profiler::new(path)?;
+
+ let query_event_kind = profiler.alloc_string("Query");
+ let generic_activity_event_kind = profiler.alloc_string("GenericActivity");
+ let incremental_load_result_event_kind = profiler.alloc_string("IncrementalLoadResult");
+ let query_blocked_event_kind = profiler.alloc_string("QueryBlocked");
+ let query_cache_hit_event_kind = profiler.alloc_string("QueryCacheHit");
+
+ Ok(SelfProfiler {
+ profiler,
+ query_event_kind,
+ generic_activity_event_kind,
+ incremental_load_result_event_kind,
+ query_blocked_event_kind,
+ query_cache_hit_event_kind,
+ })
+ }
+
+ fn get_query_name_string_id(query_name: QueryName) -> StringId {
+ let discriminant = unsafe {
+ mem::transmute::<Discriminant<QueryName>, u64>(mem::discriminant(&query_name))
};
- profiler
+ StringId::reserved(discriminant as u32)
+ }
+
+ pub fn register_query_name(&self, query_name: QueryName) {
+ let id = SelfProfiler::get_query_name_string_id(query_name);
+
+ self.profiler.alloc_string_with_reserved_id(id, query_name.as_str());
}
#[inline]
pub fn start_activity(
- &mut self,
- category: ProfileCategory,
+ &self,
label: impl Into<Cow<'static, str>>,
) {
- self.record(ProfilerEvent::GenericActivityStart {
- category,
- label: label.into(),
- time: self.get_time_from_start(),
- })
+ self.record(&label.into(), self.generic_activity_event_kind, TimestampKind::Start);
}
#[inline]
pub fn end_activity(
- &mut self,
- category: ProfileCategory,
+ &self,
label: impl Into<Cow<'static, str>>,
) {
- self.record(ProfilerEvent::GenericActivityEnd {
- category,
- label: label.into(),
- time: self.get_time_from_start(),
- })
+ self.record(&label.into(), self.generic_activity_event_kind, TimestampKind::End);
}
#[inline]
- pub fn record_computed_queries(
- &mut self,
- query_name: &'static str,
- category: ProfileCategory,
- count: usize)
- {
- self.record(ProfilerEvent::QueryCount {
- query_name,
- category,
- count,
- time: self.get_time_from_start(),
- })
+ pub fn record_query_hit(&self, query_name: QueryName) {
+ self.record_query(query_name, self.query_cache_hit_event_kind, TimestampKind::Instant);
}
#[inline]
- pub fn record_query_hit(&mut self, query_name: &'static str, category: ProfileCategory) {
- self.record(ProfilerEvent::QueryCacheHit {
- query_name,
- category,
- time: self.get_time_from_start(),
- })
+ pub fn start_query(&self, query_name: QueryName) {
+ self.record_query(query_name, self.query_event_kind, TimestampKind::Start);
}
#[inline]
- pub fn start_query(&mut self, query_name: &'static str, category: ProfileCategory) {
- self.record(ProfilerEvent::QueryStart {
- query_name,
- category,
- time: self.get_time_from_start(),
- });
+ pub fn end_query(&self, query_name: QueryName) {
+ self.record_query(query_name, self.query_event_kind, TimestampKind::End);
}
#[inline]
- pub fn end_query(&mut self, query_name: &'static str, category: ProfileCategory) {
- self.record(ProfilerEvent::QueryEnd {
+ pub fn incremental_load_result_start(&self, query_name: QueryName) {
+ self.record_query(
query_name,
- category,
- time: self.get_time_from_start(),
- })
+ self.incremental_load_result_event_kind,
+ TimestampKind::Start
+ );
}
#[inline]
- pub fn incremental_load_result_start(&mut self, query_name: &'static str) {
- self.record(ProfilerEvent::IncrementalLoadResultStart {
- query_name,
- time: self.get_time_from_start(),
- })
+ pub fn incremental_load_result_end(&self, query_name: QueryName) {
+ self.record_query(query_name, self.incremental_load_result_event_kind, TimestampKind::End);
}
#[inline]
- pub fn incremental_load_result_end(&mut self, query_name: &'static str) {
- self.record(ProfilerEvent::IncrementalLoadResultEnd {
- query_name,
- time: self.get_time_from_start(),
- })
- }
-
- #[inline]
- pub fn query_blocked_start(&mut self, query_name: &'static str, category: ProfileCategory) {
- self.record(ProfilerEvent::QueryBlockedStart {
- query_name,
- category,
- time: self.get_time_from_start(),
- })
+ pub fn query_blocked_start(&self, query_name: QueryName) {
+ self.record_query(query_name, self.query_blocked_event_kind, TimestampKind::Start);
}
#[inline]
- pub fn query_blocked_end(&mut self, query_name: &'static str, category: ProfileCategory) {
- self.record(ProfilerEvent::QueryBlockedEnd {
- query_name,
- category,
- time: self.get_time_from_start(),
- })
+ pub fn query_blocked_end(&self, query_name: QueryName) {
+ self.record_query(query_name, self.query_blocked_event_kind, TimestampKind::End);
}
#[inline]
- fn record(&mut self, event: ProfilerEvent) {
- let thread_id = std::thread::current().id();
- let events = self.events.entry(thread_id).or_default();
+ fn record(&self, event_id: &str, event_kind: StringId, timestamp_kind: TimestampKind) {
+ let thread_id = thread_id_to_u64(std::thread::current().id());
- events.push(event);
+ let event_id = self.profiler.alloc_string(event_id);
+ self.profiler.record_event(event_kind, event_id, thread_id, timestamp_kind);
}
#[inline]
- fn get_time_from_start(&self) -> u64 {
- let duration = Instant::now() - self.start_instant;
- duration.as_nanos() as u64
- }
-
- pub fn dump_raw_events(&self, opts: &Options) {
- use self::ProfilerEvent::*;
-
- let pid = process::id();
-
- let filename =
- format!("{}.profile_events.json", opts.crate_name.clone().unwrap_or_default());
-
- let mut file = BufWriter::new(fs::File::create(filename).unwrap());
-
- let threads: Vec<_> =
- self.events
- .keys()
- .into_iter()
- .map(|tid| format!("{}", thread_id_to_u64(*tid)))
- .collect();
-
- write!(file,
- "{{\
- \"processes\": {{\
- \"{}\": {{\
- \"threads\": [{}],\
- \"crate_name\": \"{}\",\
- \"opt_level\": \"{:?}\",\
- \"incremental\": {}\
- }}\
- }},\
- \"events\": [\
- ",
- pid,
- threads.join(","),
- opts.crate_name.clone().unwrap_or_default(),
- opts.optimize,
- if opts.incremental.is_some() { "true" } else { "false" },
- ).unwrap();
-
- let mut is_first = true;
- for (thread_id, events) in &self.events {
- let thread_id = thread_id_to_u64(*thread_id);
-
- for event in events {
- if is_first {
- is_first = false;
- } else {
- writeln!(file, ",").unwrap();
- }
-
- let (secs, nanos) = {
- let time = self.start_time + Duration::from_nanos(event.timestamp());
- let time_since_unix =
- time.duration_since(SystemTime::UNIX_EPOCH).unwrap_or_default();
- (time_since_unix.as_secs(), time_since_unix.subsec_nanos())
- };
+ fn record_query(
+ &self,
+ query_name: QueryName,
+ event_kind: StringId,
+ timestamp_kind: TimestampKind,
+ ) {
+ let dep_node_name = SelfProfiler::get_query_name_string_id(query_name);
- match event {
- QueryStart { query_name, category, time: _ } =>
- write!(file,
- "{{ \
- \"QueryStart\": {{ \
- \"query_name\": \"{}\",\
- \"category\": \"{:?}\",\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- query_name,
- category,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- QueryEnd { query_name, category, time: _ } =>
- write!(file,
- "{{\
- \"QueryEnd\": {{\
- \"query_name\": \"{}\",\
- \"category\": \"{:?}\",\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- query_name,
- category,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- GenericActivityStart { category, label, time: _ } =>
- write!(file,
- "{{
- \"GenericActivityStart\": {{\
- \"category\": \"{:?}\",\
- \"label\": \"{}\",\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- category,
- label,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- GenericActivityEnd { category, label, time: _ } =>
- write!(file,
- "{{\
- \"GenericActivityEnd\": {{\
- \"category\": \"{:?}\",\
- \"label\": \"{}\",\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- category,
- label,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- QueryCacheHit { query_name, category, time: _ } =>
- write!(file,
- "{{\
- \"QueryCacheHit\": {{\
- \"query_name\": \"{}\",\
- \"category\": \"{:?}\",\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- query_name,
- category,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- QueryCount { query_name, category, count, time: _ } =>
- write!(file,
- "{{\
- \"QueryCount\": {{\
- \"query_name\": \"{}\",\
- \"category\": \"{:?}\",\
- \"count\": {},\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- query_name,
- category,
- count,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- IncrementalLoadResultStart { query_name, time: _ } =>
- write!(file,
- "{{\
- \"IncrementalLoadResultStart\": {{\
- \"query_name\": \"{}\",\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- query_name,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- IncrementalLoadResultEnd { query_name, time: _ } =>
- write!(file,
- "{{\
- \"IncrementalLoadResultEnd\": {{\
- \"query_name\": \"{}\",\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- query_name,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- QueryBlockedStart { query_name, category, time: _ } =>
- write!(file,
- "{{\
- \"QueryBlockedStart\": {{\
- \"query_name\": \"{}\",\
- \"category\": \"{:?}\",\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- query_name,
- category,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- QueryBlockedEnd { query_name, category, time: _ } =>
- write!(file,
- "{{\
- \"QueryBlockedEnd\": {{\
- \"query_name\": \"{}\",\
- \"category\": \"{:?}\",\
- \"time\": {{\
- \"secs\": {},\
- \"nanos\": {}\
- }},\
- \"thread_id\": {}\
- }}\
- }}",
- query_name,
- category,
- secs,
- nanos,
- thread_id,
- ).unwrap(),
- }
- }
- }
+ let thread_id = thread_id_to_u64(std::thread::current().id());
- write!(file, "] }}").unwrap();
+ self.profiler.record_event(event_kind, dep_node_name, thread_id, timestamp_kind);
}
}
use rustc::middle::exported_symbols::SymbolExportLevel;
use rustc::session::config::{self, Lto};
use rustc::util::common::time_ext;
-use rustc::util::profiling::ProfileCategory;
use rustc_data_structures::fx::FxHashMap;
use rustc_codegen_ssa::{ModuleCodegen, ModuleKind};
.iter()
.filter_map(symbol_filter)
.collect::<Vec<CString>>();
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen,
- "generate_symbol_white_list_for_thinlto");
+ let _timer = cgcx.profile_activity("generate_symbol_white_list_for_thinlto");
info!("{} symbols to preserve in this crate", symbol_white_list.len());
// If we're performing LTO for the entire crate graph, then for each of our
}
for &(cnum, ref path) in cgcx.each_linked_rlib_for_lto.iter() {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen,
- format!("load: {}", path.display()));
+ let _timer = cgcx.profile_activity(format!("load: {}", path.display()));
let exported_symbols = cgcx.exported_symbols
.as_ref().expect("needs exported symbols for LTO");
symbol_white_list.extend(
// Like with "fat" LTO, get some better optimizations if landing pads
// are disabled by removing all landing pads.
if cgcx.no_landing_pads {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen,
- "LLVM_remove_landing_pads");
+ let _timer = cgcx.profile_activity("LLVM_remove_landing_pads");
llvm::LLVMRustMarkAllFunctionsNounwind(llmod);
save_temp_bitcode(&cgcx, &module, "thin-lto-after-nounwind");
}
use rustc::ty::TyCtxt;
use rustc_codegen_ssa::{ModuleCodegen, CompiledModule};
use rustc::util::common::time_ext;
-use rustc::util::profiling::ProfileCategory;
use rustc_fs_util::{path_to_c_string, link_or_copy};
use rustc_data_structures::small_c_str::SmallCStr;
use errors::{Handler, FatalError};
// Finally, run the actual optimization passes
{
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_function_passes");
+ let _timer = cgcx.profile_activity("LLVM_function_passes");
time_ext(config.time_passes,
None,
&format!("llvm function passes [{}]", module_name.unwrap()),
});
}
{
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_module_passes");
+ let _timer = cgcx.profile_activity("LLVM_module_passes");
time_ext(config.time_passes,
None,
&format!("llvm module passes [{}]", module_name.unwrap()),
config: &ModuleConfig)
-> Result<CompiledModule, FatalError>
{
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "codegen");
+ let _timer = cgcx.profile_activity("codegen");
{
let llmod = module.module_llvm.llmod();
let llcx = &*module.module_llvm.llcx;
if write_bc || config.emit_bc_compressed || config.embed_bitcode {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_make_bitcode");
+ let _timer = cgcx.profile_activity("LLVM_make_bitcode");
let thin = ThinBuffer::new(llmod);
let data = thin.data();
if write_bc {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_emit_bitcode");
+ let _timer = cgcx.profile_activity("LLVM_emit_bitcode");
if let Err(e) = fs::write(&bc_out, data) {
let msg = format!("failed to write bytecode to {}: {}", bc_out.display(), e);
diag_handler.err(&msg);
}
if config.embed_bitcode {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_embed_bitcode");
+ let _timer = cgcx.profile_activity("LLVM_embed_bitcode");
embed_bitcode(cgcx, llcx, llmod, Some(data));
}
if config.emit_bc_compressed {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen,
- "LLVM_compress_bitcode");
+ let _timer = cgcx.profile_activity("LLVM_compress_bitcode");
let dst = bc_out.with_extension(RLIB_BYTECODE_EXTENSION);
let data = bytecode::encode(&module.name, data);
if let Err(e) = fs::write(&dst, data) {
time_ext(config.time_passes, None, &format!("codegen passes [{}]", module_name.unwrap()),
|| -> Result<(), FatalError> {
if config.emit_ir {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_emit_ir");
+ let _timer = cgcx.profile_activity("LLVM_emit_ir");
let out = cgcx.output_filenames.temp_path(OutputType::LlvmAssembly, module_name);
let out_c = path_to_c_string(&out);
}
if config.emit_asm || asm_to_obj {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_emit_asm");
+ let _timer = cgcx.profile_activity("LLVM_emit_asm");
let path = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
// We can't use the same module for asm and binary output, because that triggers
}
if write_obj {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_emit_obj");
+ let _timer = cgcx.profile_activity("LLVM_emit_obj");
with_codegen(tm, llmod, config.no_builtins, |cpm| {
write_output_file(diag_handler, tm, cpm, llmod, &obj_out,
llvm::FileType::ObjectFile)
})?;
} else if asm_to_obj {
- let _timer = cgcx.profile_activity(ProfileCategory::Codegen, "LLVM_asm_to_obj");
+ let _timer = cgcx.profile_activity("LLVM_asm_to_obj");
let assembly = cgcx.output_filenames.temp_path(OutputType::Assembly, module_name);
run_assembler(cgcx, diag_handler, &assembly, &obj_out);
use rustc::session::Session;
use rustc::session::config::{OutputFilenames, OutputType, PrintRequest, OptLevel};
use rustc::ty::{self, TyCtxt};
-use rustc::util::profiling::ProfileCategory;
use rustc::util::common::ErrorReported;
use rustc_mir::monomorphize;
use rustc_codegen_ssa::ModuleCodegen;
// Run the linker on any artifacts that resulted from the LLVM run.
// This should produce either a finished executable or library.
- sess.profiler(|p| p.start_activity(ProfileCategory::Linking, "link_crate"));
+ sess.profiler(|p| p.start_activity("link_crate"));
time(sess, "linking", || {
back::link::link_binary(sess, &codegen_results,
outputs, &codegen_results.crate_name.as_str());
});
- sess.profiler(|p| p.end_activity(ProfileCategory::Linking, "link_crate"));
+ sess.profiler(|p| p.end_activity("link_crate"));
// Now that we won't touch anything in the incremental compilation directory
// any more, we can finalize it (which involves renaming it)
use rustc::hir::def_id::{CrateNum, LOCAL_CRATE};
use rustc::ty::TyCtxt;
use rustc::util::common::{time_depth, set_time_depth, print_time_passes_entry};
-use rustc::util::profiling::{ProfileCategory, SelfProfiler};
+use rustc::util::profiling::SelfProfiler;
use rustc_fs_util::link_or_copy;
use rustc_data_structures::svh::Svh;
use rustc_errors::{Handler, Level, DiagnosticBuilder, FatalError, DiagnosticId};
use syntax_pos::MultiSpan;
use syntax_pos::symbol::Symbol;
use jobserver::{Client, Acquired};
-use parking_lot::Mutex as PlMutex;
use std::any::Any;
use std::borrow::Cow;
}
pub struct ProfileGenericActivityTimer {
- profiler: Option<Arc<PlMutex<SelfProfiler>>>,
- category: ProfileCategory,
+ profiler: Option<Arc<SelfProfiler>>,
label: Cow<'static, str>,
}
impl ProfileGenericActivityTimer {
pub fn start(
- profiler: Option<Arc<PlMutex<SelfProfiler>>>,
- category: ProfileCategory,
+ profiler: Option<Arc<SelfProfiler>>,
label: Cow<'static, str>,
) -> ProfileGenericActivityTimer {
if let Some(profiler) = &profiler {
- let mut p = profiler.lock();
- p.start_activity(category, label.clone());
+ profiler.start_activity(label.clone());
}
ProfileGenericActivityTimer {
profiler,
- category,
label,
}
}
impl Drop for ProfileGenericActivityTimer {
fn drop(&mut self) {
if let Some(profiler) = &self.profiler {
- let mut p = profiler.lock();
- p.end_activity(self.category, self.label.clone());
+ profiler.end_activity(self.label.clone());
}
}
}
// Resources needed when running LTO
pub backend: B,
pub time_passes: bool,
- pub profiler: Option<Arc<PlMutex<SelfProfiler>>>,
+ pub profiler: Option<Arc<SelfProfiler>>,
pub lto: Lto,
pub no_landing_pads: bool,
pub save_temps: bool,
#[inline(never)]
#[cold]
- fn profiler_active<F: FnOnce(&mut SelfProfiler) -> ()>(&self, f: F) {
+ fn profiler_active<F: FnOnce(&SelfProfiler) -> ()>(&self, f: F) {
match &self.profiler {
None => bug!("profiler_active() called but there was no profiler active"),
Some(profiler) => {
- let mut p = profiler.lock();
-
- f(&mut p);
+ f(&*profiler);
}
}
}
#[inline(always)]
- pub fn profile<F: FnOnce(&mut SelfProfiler) -> ()>(&self, f: F) {
+ pub fn profile<F: FnOnce(&SelfProfiler) -> ()>(&self, f: F) {
if unlikely!(self.profiler.is_some()) {
self.profiler_active(f)
}
pub fn profile_activity(
&self,
- category: ProfileCategory,
label: impl Into<Cow<'static, str>>,
) -> ProfileGenericActivityTimer {
- ProfileGenericActivityTimer::start(self.profiler.clone(), category, label.into())
+ ProfileGenericActivityTimer::start(self.profiler.clone(), label.into())
}
}
needs_thin_lto: Vec<(String, B::ThinBuffer)>,
import_only_modules: Vec<(SerializedModule<B::ModuleBuffer>, WorkProduct)>
) -> Vec<(WorkItem<B>, u64)> {
- cgcx.profile(|p| p.start_activity(ProfileCategory::Linking, "codegen_run_lto"));
+ cgcx.profile(|p| p.start_activity("codegen_run_lto"));
let (lto_modules, copy_jobs) = if !needs_fat_lto.is_empty() {
assert!(needs_thin_lto.is_empty());
}), 0)
})).collect();
- cgcx.profile(|p| p.end_activity(ProfileCategory::Linking, "codegen_run_lto"));
+ cgcx.profile(|p| p.end_activity("codegen_run_lto"));
result
}
// surface that there was an error in this worker.
bomb.result = {
let label = work.name();
- cgcx.profile(|p| p.start_activity(ProfileCategory::Codegen, label.clone()));
+ cgcx.profile(|p| p.start_activity(label.clone()));
let result = execute_work_item(&cgcx, work).ok();
- cgcx.profile(|p| p.end_activity(ProfileCategory::Codegen, label));
+ cgcx.profile(|p| p.end_activity(label));
result
};
use rustc::ty::query::Providers;
use rustc::middle::cstore::{self, LinkagePreference};
use rustc::util::common::{time, print_time_passes_entry};
-use rustc::util::profiling::ProfileCategory;
use rustc::session::config::{self, EntryFnType, Lto};
use rustc::session::Session;
use rustc_mir::monomorphize::item::DefPathBasedNames;
let cgu_name_builder = &mut CodegenUnitNameBuilder::new(tcx);
// Codegen the metadata.
- tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen, "codegen crate metadata"));
+ tcx.sess.profiler(|p| p.start_activity("codegen crate metadata"));
let metadata_cgu_name = cgu_name_builder.build_cgu_name(LOCAL_CRATE,
&["crate"],
let metadata = time(tcx.sess, "write metadata", || {
backend.write_metadata(tcx, &mut metadata_llvm_module)
});
- tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen, "codegen crate metadata"));
+ tcx.sess.profiler(|p| p.end_activity("codegen crate metadata"));
let metadata_module = ModuleCodegen {
name: metadata_cgu_name,
match cgu_reuse {
CguReuse::No => {
- tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen,
- format!("codegen {}", cgu.name())));
+ tcx.sess.profiler(|p| p.start_activity(format!("codegen {}", cgu.name())));
let start_time = Instant::now();
let stats = backend.compile_codegen_unit(tcx, *cgu.name());
all_stats.extend(stats);
total_codegen_time += start_time.elapsed();
- tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen,
- format!("codegen {}", cgu.name())));
+ tcx.sess.profiler(|p| p.end_activity(format!("codegen {}", cgu.name())));
false
}
CguReuse::PreLto => {
profile::dump(&compiler.sess, "profile_queries".to_string())
}
- if compiler.sess.opts.debugging_opts.self_profile {
- compiler.sess.profiler(|p| p.dump_raw_events(&compiler.sess.opts));
- }
-
r
}
.set_continue_after_error(sess.opts.debugging_opts.continue_parse_after_error);
hygiene::set_default_edition(sess.edition());
- sess.profiler(|p| p.start_activity(ProfileCategory::Parsing, "parsing"));
+ sess.profiler(|p| p.start_activity("parsing"));
let krate = time(sess, "parsing", || match *input {
Input::File(ref file) => parse::parse_crate_from_file(file, &sess.parse_sess),
Input::Str {
ref name,
} => parse::parse_crate_from_source_str(name.clone(), input.clone(), &sess.parse_sess),
})?;
- sess.profiler(|p| p.end_activity(ProfileCategory::Parsing, "parsing"));
+ sess.profiler(|p| p.end_activity("parsing"));
sess.diagnostic().set_continue_after_error(true);
syntax_ext::register_builtins(&mut resolver, plugin_info.syntax_exts);
// Expand all macros
- sess.profiler(|p| p.start_activity(ProfileCategory::Expansion, "macro expansion"));
+ sess.profiler(|p| p.start_activity("macro expansion"));
krate = time(sess, "expansion", || {
// Windows dlls do not have rpaths, so they don't know how to find their
// dependencies. It's up to us to tell the system where to find all the
}
krate
});
- sess.profiler(|p| p.end_activity(ProfileCategory::Expansion, "macro expansion"));
+ sess.profiler(|p| p.end_activity("macro expansion"));
time(sess, "maybe building test harness", || {
syntax::test::modify_for_testing(
yield BoxedGlobalCtxt::initial_yield(());
box_region_allow_access!(for('gcx), (&'gcx GlobalCtxt<'gcx>), (gcx));
- gcx.queries.record_computed_queries(sess);
-
if sess.opts.debugging_opts.query_stats {
gcx.queries.print_stats();
}
::rustc::middle::dependency_format::calculate(tcx)
});
- tcx.sess.profiler(|p| p.start_activity(ProfileCategory::Codegen, "codegen crate"));
+ tcx.sess.profiler(|p| p.start_activity("codegen crate"));
let codegen = time(tcx.sess, "codegen", move || codegen_backend.codegen_crate(tcx, rx));
- tcx.sess.profiler(|p| p.end_activity(ProfileCategory::Codegen, "codegen crate"));
+ tcx.sess.profiler(|p| p.end_activity("codegen crate"));
if log_enabled!(::log::Level::Info) {
println!("Post-codegen");
use rustc::ty::{self, Ty, TyCtxt};
use rustc::ty::query::Providers;
use rustc::util;
-use rustc::util::profiling::ProfileCategory;
use syntax_pos::Span;
use util::common::time;
pub fn check_crate<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>)
-> Result<(), ErrorReported>
{
- tcx.sess.profiler(|p| p.start_activity(ProfileCategory::TypeChecking, "type-check crate"));
+ tcx.sess.profiler(|p| p.start_activity("type-check crate"));
// this ensures that later parts of type checking can assume that items
// have valid types and not error
check_unused::check_crate(tcx);
check_for_entry_fn(tcx);
- tcx.sess.profiler(|p| p.end_activity(ProfileCategory::TypeChecking, "type-check crate"));
+ tcx.sess.profiler(|p| p.end_activity("type-check crate"));
if tcx.sess.err_count() == 0 {
Ok(())
Crate("lock_api"),
Crate("log"),
Crate("log_settings"),
+ Crate("measureme"),
Crate("memchr"),
Crate("memmap"),
Crate("memoffset"),