1 //! # Rust Compiler Self-Profiling
3 //! This module implements the basic framework for the compiler's self-
4 //! profiling support. It provides the `SelfProfiler` type which enables
5 //! recording "events". An event is something that starts and ends at a given
6 //! point in time and has an ID and a kind attached to it. This allows for
7 //! tracing the compiler's activity.
9 //! Internally this module uses the custom tailored [measureme][mm] crate for
10 //! efficiently recording events to disk in a compact format that can be
11 //! post-processed and analyzed by the suite of tools in the `measureme`
12 //! project. The highest priority for the tracing framework is on incurring as
13 //! little overhead as possible.
18 //! Events have a few properties:
20 //! - The `event_kind` designates the broad category of an event (e.g. does it
21 //! correspond to the execution of a query provider or to loading something
22 //! from the incr. comp. on-disk cache, etc).
23 //! - The `event_id` designates the query invocation or function call it
24 //! corresponds to, possibly including the query key or function arguments.
25 //! - Each event stores the ID of the thread it was recorded on.
26 //! - The timestamp stores beginning and end of the event, or the single point
27 //! in time it occurred at for "instant" events.
30 //! ## Event Filtering
32 //! Event generation can be filtered by event kind. Recording all possible
33 //! events generates a lot of data, much of which is not needed for most kinds
34 //! of analysis. So, in order to keep overhead as low as possible for a given
35 //! use case, the `SelfProfiler` will only record the kinds of events that
36 //! pass the filter specified as a command line argument to the compiler.
39 //! ## `event_id` Assignment
41 //! As far as `measureme` is concerned, `event_id`s are just strings. However,
42 //! it would incur too much overhead to generate and persist each `event_id`
43 //! string at the point where the event is recorded. In order to make this more
44 //! efficient `measureme` has two features:
46 //! - Strings can share their content, so that re-occurring parts don't have to
47 //! be copied over and over again. One allocates a string in `measureme` and
48 //! gets back a `StringId`. This `StringId` is then used to refer to that
49 //! string. `measureme` strings are actually DAGs of string components so that
50 //! arbitrary sharing of substrings can be done efficiently. This is useful
51 //! because `event_id`s contain lots of redundant text like query names or
52 //! def-path components.
54 //! - `StringId`s can be "virtual" which means that the client picks a numeric
55 //! ID according to some application-specific scheme and can later make that
56 //! ID be mapped to an actual string. This is used to cheaply generate
57 //! `event_id`s while the events actually occur, causing little timing
58 //! distortion, and then later map those `StringId`s, in bulk, to actual
59 //! `event_id` strings. This way the largest part of the tracing overhead is
60 //! localized to one contiguous chunk of time.
62 //! How are these `event_id`s generated in the compiler? For things that occur
63 //! infrequently (e.g. "generic activities"), we just allocate the string the
64 //! first time it is used and then keep the `StringId` in a hash table. This
65 //! is implemented in `SelfProfiler::get_or_alloc_cached_string()`.
67 //! For queries it gets more interesting: First we need a unique numeric ID for
68 //! each query invocation (the `QueryInvocationId`). This ID is used as the
69 //! virtual `StringId` we use as `event_id` for a given event. This ID has to
70 //! be available both when the query is executed and later, together with the
71 //! query key, when we allocate the actual `event_id` strings in bulk.
73 //! We could make the compiler generate and keep track of such an ID for each
74 //! query invocation but luckily we already have something that fits all the
75 //! the requirements: the query's `DepNodeIndex`. So we use the numeric value
76 //! of the `DepNodeIndex` as `event_id` when recording the event and then,
77 //! just before the query context is dropped, we walk the entire query cache
78 //! (which stores the `DepNodeIndex` along with the query key for each
79 //! invocation) and allocate the corresponding strings together with a mapping
80 //! for `DepNodeIndex as StringId`.
82 //! [mm]: https://github.com/rust-lang/measureme/
85 use crate::fx::FxHashMap;
87 use std::borrow::Borrow;
88 use std::collections::hash_map::Entry;
89 use std::convert::Into;
90 use std::error::Error;
95 use std::time::{Duration, Instant};
97 pub use measureme::EventId;
98 use measureme::{EventIdBuilder, Profiler, SerializableString, StringId};
99 use parking_lot::RwLock;
101 bitflags::bitflags! {
102 struct EventFilter: u32 {
103 const GENERIC_ACTIVITIES = 1 << 0;
104 const QUERY_PROVIDERS = 1 << 1;
105 const QUERY_CACHE_HITS = 1 << 2;
106 const QUERY_BLOCKED = 1 << 3;
107 const INCR_CACHE_LOADS = 1 << 4;
109 const QUERY_KEYS = 1 << 5;
110 const FUNCTION_ARGS = 1 << 6;
112 const INCR_RESULT_HASHING = 1 << 8;
113 const ARTIFACT_SIZES = 1 << 9;
115 const DEFAULT = Self::GENERIC_ACTIVITIES.bits |
116 Self::QUERY_PROVIDERS.bits |
117 Self::QUERY_BLOCKED.bits |
118 Self::INCR_CACHE_LOADS.bits |
119 Self::INCR_RESULT_HASHING.bits |
120 Self::ARTIFACT_SIZES.bits;
122 const ARGS = Self::QUERY_KEYS.bits | Self::FUNCTION_ARGS.bits;
126 // keep this in sync with the `-Z self-profile-events` help message in rustc_session/options.rs
127 const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
128 ("none", EventFilter::empty()),
129 ("all", EventFilter::all()),
130 ("default", EventFilter::DEFAULT),
131 ("generic-activity", EventFilter::GENERIC_ACTIVITIES),
132 ("query-provider", EventFilter::QUERY_PROVIDERS),
133 ("query-cache-hit", EventFilter::QUERY_CACHE_HITS),
134 ("query-blocked", EventFilter::QUERY_BLOCKED),
135 ("incr-cache-load", EventFilter::INCR_CACHE_LOADS),
136 ("query-keys", EventFilter::QUERY_KEYS),
137 ("function-args", EventFilter::FUNCTION_ARGS),
138 ("args", EventFilter::ARGS),
139 ("llvm", EventFilter::LLVM),
140 ("incr-result-hashing", EventFilter::INCR_RESULT_HASHING),
141 ("artifact-sizes", EventFilter::ARTIFACT_SIZES),
144 /// Something that uniquely identifies a query invocation.
145 pub struct QueryInvocationId(pub u32);
147 /// A reference to the SelfProfiler. It can be cloned and sent across thread
148 /// boundaries at will.
150 pub struct SelfProfilerRef {
151 // This field is `None` if self-profiling is disabled for the current
152 // compilation session.
153 profiler: Option<Arc<SelfProfiler>>,
155 // We store the filter mask directly in the reference because that doesn't
156 // cost anything and allows for filtering with checking if the profiler is
158 event_filter_mask: EventFilter,
160 // Print verbose generic activities to stdout
161 print_verbose_generic_activities: bool,
163 // Print extra verbose generic activities to stdout
164 print_extra_verbose_generic_activities: bool,
167 impl SelfProfilerRef {
169 profiler: Option<Arc<SelfProfiler>>,
170 print_verbose_generic_activities: bool,
171 print_extra_verbose_generic_activities: bool,
172 ) -> SelfProfilerRef {
173 // If there is no SelfProfiler then the filter mask is set to NONE,
174 // ensuring that nothing ever tries to actually access it.
175 let event_filter_mask =
176 profiler.as_ref().map_or(EventFilter::empty(), |p| p.event_filter_mask);
181 print_verbose_generic_activities,
182 print_extra_verbose_generic_activities,
186 // This shim makes sure that calls only get executed if the filter mask
187 // lets them pass. It also contains some trickery to make sure that
188 // code is optimized for non-profiling compilation sessions, i.e. anything
189 // past the filter check is never inlined so it doesn't clutter the fast
192 fn exec<F>(&self, event_filter: EventFilter, f: F) -> TimingGuard<'_>
194 F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
197 fn cold_call<F>(profiler_ref: &SelfProfilerRef, f: F) -> TimingGuard<'_>
199 F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
201 let profiler = profiler_ref.profiler.as_ref().unwrap();
205 if unlikely!(self.event_filter_mask.contains(event_filter)) {
212 /// Start profiling a verbose generic activity. Profiling continues until the
213 /// VerboseTimingGuard returned from this call is dropped. In addition to recording
214 /// a measureme event, "verbose" generic activities also print a timing entry to
215 /// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
216 pub fn verbose_generic_activity<'a>(
218 event_label: &'static str,
219 ) -> VerboseTimingGuard<'a> {
221 if self.print_verbose_generic_activities { Some(event_label.to_owned()) } else { None };
223 VerboseTimingGuard::start(message, self.generic_activity(event_label))
226 /// Start profiling an extra verbose generic activity. Profiling continues until the
227 /// VerboseTimingGuard returned from this call is dropped. In addition to recording
228 /// a measureme event, "extra verbose" generic activities also print a timing entry to
229 /// stdout if the compiler is invoked with -Ztime-passes.
230 pub fn extra_verbose_generic_activity<'a, A>(
232 event_label: &'static str,
234 ) -> VerboseTimingGuard<'a>
236 A: Borrow<str> + Into<String>,
238 let message = if self.print_extra_verbose_generic_activities {
239 Some(format!("{}({})", event_label, event_arg.borrow()))
244 VerboseTimingGuard::start(message, self.generic_activity_with_arg(event_label, event_arg))
247 /// Start profiling a generic activity. Profiling continues until the
248 /// TimingGuard returned from this call is dropped.
250 pub fn generic_activity(&self, event_label: &'static str) -> TimingGuard<'_> {
251 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
252 let event_label = profiler.get_or_alloc_cached_string(event_label);
253 let event_id = EventId::from_label(event_label);
254 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
258 /// Start profiling with some event filter for a given event. Profiling continues until the
259 /// TimingGuard returned from this call is dropped.
261 pub fn generic_activity_with_event_id(&self, event_id: EventId) -> TimingGuard<'_> {
262 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
263 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
267 /// Start profiling a generic activity. Profiling continues until the
268 /// TimingGuard returned from this call is dropped.
270 pub fn generic_activity_with_arg<A>(
272 event_label: &'static str,
276 A: Borrow<str> + Into<String>,
278 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
279 let builder = EventIdBuilder::new(&profiler.profiler);
280 let event_label = profiler.get_or_alloc_cached_string(event_label);
281 let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
282 let event_arg = profiler.get_or_alloc_cached_string(event_arg);
283 builder.from_label_and_arg(event_label, event_arg)
285 builder.from_label(event_label)
287 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
291 /// Record the size of an artifact that the compiler produces
293 /// `artifact_kind` is the class of artifact (e.g., query_cache, object_file, etc.)
294 /// `artifact_name` is an identifier to the specific artifact being stored (usually a filename)
296 pub fn artifact_size<A>(&self, artifact_kind: &str, artifact_name: A, size: u64)
298 A: Borrow<str> + Into<String>,
300 drop(self.exec(EventFilter::ARTIFACT_SIZES, |profiler| {
301 let builder = EventIdBuilder::new(&profiler.profiler);
302 let event_label = profiler.get_or_alloc_cached_string(artifact_kind);
303 let event_arg = profiler.get_or_alloc_cached_string(artifact_name);
304 let event_id = builder.from_label_and_arg(event_label, event_arg);
305 let thread_id = get_thread_id();
307 profiler.profiler.record_integer_event(
308 profiler.artifact_size_event_kind,
319 pub fn generic_activity_with_args(
321 event_label: &'static str,
322 event_args: &[String],
323 ) -> TimingGuard<'_> {
324 self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
325 let builder = EventIdBuilder::new(&profiler.profiler);
326 let event_label = profiler.get_or_alloc_cached_string(event_label);
327 let event_id = if profiler.event_filter_mask.contains(EventFilter::FUNCTION_ARGS) {
328 let event_args: Vec<_> = event_args
330 .map(|s| profiler.get_or_alloc_cached_string(&s[..]))
332 builder.from_label_and_args(event_label, &event_args)
334 builder.from_label(event_label)
336 TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
340 /// Start profiling a query provider. Profiling continues until the
341 /// TimingGuard returned from this call is dropped.
343 pub fn query_provider(&self) -> TimingGuard<'_> {
344 self.exec(EventFilter::QUERY_PROVIDERS, |profiler| {
345 TimingGuard::start(profiler, profiler.query_event_kind, EventId::INVALID)
349 /// Record a query in-memory cache hit.
351 pub fn query_cache_hit(&self, query_invocation_id: QueryInvocationId) {
352 self.instant_query_event(
353 |profiler| profiler.query_cache_hit_event_kind,
355 EventFilter::QUERY_CACHE_HITS,
359 /// Start profiling a query being blocked on a concurrent execution.
360 /// Profiling continues until the TimingGuard returned from this call is
363 pub fn query_blocked(&self) -> TimingGuard<'_> {
364 self.exec(EventFilter::QUERY_BLOCKED, |profiler| {
365 TimingGuard::start(profiler, profiler.query_blocked_event_kind, EventId::INVALID)
369 /// Start profiling how long it takes to load a query result from the
370 /// incremental compilation on-disk cache. Profiling continues until the
371 /// TimingGuard returned from this call is dropped.
373 pub fn incr_cache_loading(&self) -> TimingGuard<'_> {
374 self.exec(EventFilter::INCR_CACHE_LOADS, |profiler| {
377 profiler.incremental_load_result_event_kind,
383 /// Start profiling how long it takes to hash query results for incremental compilation.
384 /// Profiling continues until the TimingGuard returned from this call is dropped.
386 pub fn incr_result_hashing(&self) -> TimingGuard<'_> {
387 self.exec(EventFilter::INCR_RESULT_HASHING, |profiler| {
390 profiler.incremental_result_hashing_event_kind,
397 fn instant_query_event(
399 event_kind: fn(&SelfProfiler) -> StringId,
400 query_invocation_id: QueryInvocationId,
401 event_filter: EventFilter,
403 drop(self.exec(event_filter, |profiler| {
404 let event_id = StringId::new_virtual(query_invocation_id.0);
405 let thread_id = get_thread_id();
407 profiler.profiler.record_instant_event(
408 event_kind(profiler),
409 EventId::from_virtual(event_id),
417 pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) {
418 if let Some(profiler) = &self.profiler {
423 /// Gets a `StringId` for the given string. This method makes sure that
424 /// any strings going through it will only be allocated once in the
426 /// Returns `None` if the self-profiling is not enabled.
427 pub fn get_or_alloc_cached_string(&self, s: &str) -> Option<StringId> {
428 self.profiler.as_ref().map(|p| p.get_or_alloc_cached_string(s))
432 pub fn enabled(&self) -> bool {
433 self.profiler.is_some()
437 pub fn llvm_recording_enabled(&self) -> bool {
438 self.event_filter_mask.contains(EventFilter::LLVM)
441 pub fn get_self_profiler(&self) -> Option<Arc<SelfProfiler>> {
442 self.profiler.clone()
446 pub struct SelfProfiler {
448 event_filter_mask: EventFilter,
450 string_cache: RwLock<FxHashMap<String, StringId>>,
452 query_event_kind: StringId,
453 generic_activity_event_kind: StringId,
454 incremental_load_result_event_kind: StringId,
455 incremental_result_hashing_event_kind: StringId,
456 query_blocked_event_kind: StringId,
457 query_cache_hit_event_kind: StringId,
458 artifact_size_event_kind: StringId,
463 output_directory: &Path,
464 crate_name: Option<&str>,
465 event_filters: &Option<Vec<String>>,
466 ) -> Result<SelfProfiler, Box<dyn Error + Send + Sync>> {
467 fs::create_dir_all(output_directory)?;
469 let crate_name = crate_name.unwrap_or("unknown-crate");
470 let filename = format!("{}-{}.rustc_profile", crate_name, process::id());
471 let path = output_directory.join(&filename);
472 let profiler = Profiler::new(&path)?;
474 let query_event_kind = profiler.alloc_string("Query");
475 let generic_activity_event_kind = profiler.alloc_string("GenericActivity");
476 let incremental_load_result_event_kind = profiler.alloc_string("IncrementalLoadResult");
477 let incremental_result_hashing_event_kind =
478 profiler.alloc_string("IncrementalResultHashing");
479 let query_blocked_event_kind = profiler.alloc_string("QueryBlocked");
480 let query_cache_hit_event_kind = profiler.alloc_string("QueryCacheHit");
481 let artifact_size_event_kind = profiler.alloc_string("ArtifactSize");
483 let mut event_filter_mask = EventFilter::empty();
485 if let Some(ref event_filters) = *event_filters {
486 let mut unknown_events = vec![];
487 for item in event_filters {
488 if let Some(&(_, mask)) =
489 EVENT_FILTERS_BY_NAME.iter().find(|&(name, _)| name == item)
491 event_filter_mask |= mask;
493 unknown_events.push(item.clone());
497 // Warn about any unknown event names
498 if !unknown_events.is_empty() {
499 unknown_events.sort();
500 unknown_events.dedup();
503 "Unknown self-profiler events specified: {}. Available options are: {}.",
504 unknown_events.join(", "),
505 EVENT_FILTERS_BY_NAME
507 .map(|&(name, _)| name.to_string())
513 event_filter_mask = EventFilter::DEFAULT;
519 string_cache: RwLock::new(FxHashMap::default()),
521 generic_activity_event_kind,
522 incremental_load_result_event_kind,
523 incremental_result_hashing_event_kind,
524 query_blocked_event_kind,
525 query_cache_hit_event_kind,
526 artifact_size_event_kind,
530 /// Allocates a new string in the profiling data. Does not do any caching
531 /// or deduplication.
532 pub fn alloc_string<STR: SerializableString + ?Sized>(&self, s: &STR) -> StringId {
533 self.profiler.alloc_string(s)
536 /// Gets a `StringId` for the given string. This method makes sure that
537 /// any strings going through it will only be allocated once in the
539 pub fn get_or_alloc_cached_string<A>(&self, s: A) -> StringId
541 A: Borrow<str> + Into<String>,
543 // Only acquire a read-lock first since we assume that the string is
544 // already present in the common case.
546 let string_cache = self.string_cache.read();
548 if let Some(&id) = string_cache.get(s.borrow()) {
553 let mut string_cache = self.string_cache.write();
554 // Check if the string has already been added in the small time window
555 // between dropping the read lock and acquiring the write lock.
556 match string_cache.entry(s.into()) {
557 Entry::Occupied(e) => *e.get(),
558 Entry::Vacant(e) => {
559 let string_id = self.profiler.alloc_string(&e.key()[..]);
565 pub fn map_query_invocation_id_to_string(&self, from: QueryInvocationId, to: StringId) {
566 let from = StringId::new_virtual(from.0);
567 self.profiler.map_virtual_to_concrete_string(from, to);
570 pub fn bulk_map_query_invocation_id_to_single_string<I>(&self, from: I, to: StringId)
572 I: Iterator<Item = QueryInvocationId> + ExactSizeIterator,
574 let from = from.map(|qid| StringId::new_virtual(qid.0));
575 self.profiler.bulk_map_virtual_to_single_concrete_string(from, to);
578 pub fn query_key_recording_enabled(&self) -> bool {
579 self.event_filter_mask.contains(EventFilter::QUERY_KEYS)
582 pub fn event_id_builder(&self) -> EventIdBuilder<'_> {
583 EventIdBuilder::new(&self.profiler)
588 pub struct TimingGuard<'a>(Option<measureme::TimingGuard<'a>>);
590 impl<'a> TimingGuard<'a> {
593 profiler: &'a SelfProfiler,
594 event_kind: StringId,
596 ) -> TimingGuard<'a> {
597 let thread_id = get_thread_id();
598 let raw_profiler = &profiler.profiler;
600 raw_profiler.start_recording_interval_event(event_kind, event_id, thread_id);
601 TimingGuard(Some(timing_guard))
605 pub fn finish_with_query_invocation_id(self, query_invocation_id: QueryInvocationId) {
606 if let Some(guard) = self.0 {
608 let event_id = StringId::new_virtual(query_invocation_id.0);
609 let event_id = EventId::from_virtual(event_id);
610 guard.finish_with_override_event_id(event_id);
616 pub fn none() -> TimingGuard<'a> {
621 pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
628 pub struct VerboseTimingGuard<'a> {
629 start_and_message: Option<(Instant, Option<usize>, String)>,
630 _guard: TimingGuard<'a>,
633 impl<'a> VerboseTimingGuard<'a> {
634 pub fn start(message: Option<String>, _guard: TimingGuard<'a>) -> Self {
637 start_and_message: message.map(|msg| (Instant::now(), get_resident_set_size(), msg)),
642 pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
648 impl Drop for VerboseTimingGuard<'_> {
650 if let Some((start_time, start_rss, ref message)) = self.start_and_message {
651 let end_rss = get_resident_set_size();
652 print_time_passes_entry(&message, start_time.elapsed(), start_rss, end_rss);
657 pub fn print_time_passes_entry(
660 start_rss: Option<usize>,
661 end_rss: Option<usize>,
663 let rss_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as usize;
664 let rss_change_to_mb = |rss| (rss as f64 / 1_000_000.0).round() as i128;
666 let mem_string = match (start_rss, end_rss) {
667 (Some(start_rss), Some(end_rss)) => {
668 let change_rss = end_rss as i128 - start_rss as i128;
671 "; rss: {:>4}MB -> {:>4}MB ({:>+5}MB)",
672 rss_to_mb(start_rss),
674 rss_change_to_mb(change_rss),
677 (Some(start_rss), None) => format!("; rss start: {:>4}MB", rss_to_mb(start_rss)),
678 (None, Some(end_rss)) => format!("; rss end: {:>4}MB", rss_to_mb(end_rss)),
679 (None, None) => String::new(),
682 eprintln!("time: {:>7}{}\t{}", duration_to_secs_str(dur), mem_string, what);
685 // Hack up our own formatting for the duration to make it easier for scripts
686 // to parse (always use the same number of decimal places and the same unit).
687 pub fn duration_to_secs_str(dur: std::time::Duration) -> String {
688 format!("{:.3}", dur.as_secs_f64())
691 fn get_thread_id() -> u32 {
692 std::thread::current().id().as_u64().get() as u32
698 pub fn get_resident_set_size() -> Option<usize> {
699 use std::mem::{self, MaybeUninit};
700 use winapi::shared::minwindef::DWORD;
701 use winapi::um::processthreadsapi::GetCurrentProcess;
702 use winapi::um::psapi::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS};
704 let mut pmc = MaybeUninit::<PROCESS_MEMORY_COUNTERS>::uninit();
706 GetProcessMemoryInfo(GetCurrentProcess(), pmc.as_mut_ptr(), mem::size_of_val(&pmc) as DWORD)
710 let pmc = unsafe { pmc.assume_init() };
711 Some(pmc.WorkingSetSize as usize)
715 } else if #[cfg(unix)] {
716 pub fn get_resident_set_size() -> Option<usize> {
718 let contents = fs::read("/proc/self/statm").ok()?;
719 let contents = String::from_utf8(contents).ok()?;
720 let s = contents.split_whitespace().nth(field)?;
721 let npages = s.parse::<usize>().ok()?;
725 pub fn get_resident_set_size() -> Option<usize> {