]> git.lizzy.rs Git - rust.git/blobdiff - src/librustc_data_structures/profiling.rs
Don't use ExpnKind::descr to get the name of a bang macro.
[rust.git] / src / librustc_data_structures / profiling.rs
index f9bfe5a1652ae82d09490f8830dd5721458641f8..44cef727f034be23d7884972cbda659bd5a8bcec 100644 (file)
@@ -1,13 +1,99 @@
+//! # Rust Compiler Self-Profiling
+//!
+//! This module implements the basic framework for the compiler's self-
+//! profiling support. It provides the `SelfProfiler` type which enables
+//! recording "events". An event is something that starts and ends at a given
+//! point in time and has an ID and a kind attached to it. This allows for
+//! tracing the compiler's activity.
+//!
+//! Internally this module uses the custom tailored [measureme][mm] crate for
+//! efficiently recording events to disk in a compact format that can be
+//! post-processed and analyzed by the suite of tools in the `measureme`
+//! project. The highest priority for the tracing framework is on incurring as
+//! little overhead as possible.
+//!
+//!
+//! ## Event Overview
+//!
+//! Events have a few properties:
+//!
+//! - The `event_kind` designates the broad category of an event (e.g. does it
+//!   correspond to the execution of a query provider or to loading something
+//!   from the incr. comp. on-disk cache, etc).
+//! - The `event_id` designates the query invocation or function call it
+//!   corresponds to, possibly including the query key or function arguments.
+//! - Each event stores the ID of the thread it was recorded on.
+//! - The timestamp stores beginning and end of the event, or the single point
+//!   in time it occurred at for "instant" events.
+//!
+//!
+//! ## Event Filtering
+//!
+//! Event generation can be filtered by event kind. Recording all possible
+//! events generates a lot of data, much of which is not needed for most kinds
+//! of analysis. So, in order to keep overhead as low as possible for a given
+//! use case, the `SelfProfiler` will only record the kinds of events that
+//! pass the filter specified as a command line argument to the compiler.
+//!
+//!
+//! ## `event_id` Assignment
+//!
+//! As far as `measureme` is concerned, `event_id`s are just strings. However,
+//! it would incur too much overhead to generate and persist each `event_id`
+//! string at the point where the event is recorded. In order to make this more
+//! efficient `measureme` has two features:
+//!
+//! - Strings can share their content, so that re-occurring parts don't have to
+//!   be copied over and over again. One allocates a string in `measureme` and
+//!   gets back a `StringId`. This `StringId` is then used to refer to that
+//!   string. `measureme` strings are actually DAGs of string components so that
+//!   arbitrary sharing of substrings can be done efficiently. This is useful
+//!   because `event_id`s contain lots of redundant text like query names or
+//!   def-path components.
+//!
+//! - `StringId`s can be "virtual" which means that the client picks a numeric
+//!   ID according to some application-specific scheme and can later make that
+//!   ID be mapped to an actual string. This is used to cheaply generate
+//!   `event_id`s while the events actually occur, causing little timing
+//!   distortion, and then later map those `StringId`s, in bulk, to actual
+//!   `event_id` strings. This way the largest part of the tracing overhead is
+//!   localized to one contiguous chunk of time.
+//!
+//! How are these `event_id`s generated in the compiler? For things that occur
+//! infrequently (e.g. "generic activities"), we just allocate the string the
+//! first time it is used and then keep the `StringId` in a hash table. This
+//! is implemented in `SelfProfiler::get_or_alloc_cached_string()`.
+//!
+//! For queries it gets more interesting: First we need a unique numeric ID for
+//! each query invocation (the `QueryInvocationId`). This ID is used as the
+//! virtual `StringId` we use as `event_id` for a given event. This ID has to
+//! be available both when the query is executed and later, together with the
+//! query key, when we allocate the actual `event_id` strings in bulk.
+//!
+//! We could make the compiler generate and keep track of such an ID for each
+//! query invocation but luckily we already have something that fits all the
+//! the requirements: the query's `DepNodeIndex`. So we use the numeric value
+//! of the `DepNodeIndex` as `event_id` when recording the event and then,
+//! just before the query context is dropped, we walk the entire query cache
+//! (which stores the `DepNodeIndex` along with the query key for each
+//! invocation) and allocate the corresponding strings together with a mapping
+//! for `DepNodeIndex as StringId`.
+//!
+//! [mm]: https://github.com/rust-lang/measureme/
+
+use crate::fx::FxHashMap;
+
 use std::error::Error;
 use std::fs;
-use std::mem::{self, Discriminant};
 use std::path::Path;
 use std::process;
 use std::sync::Arc;
 use std::thread::ThreadId;
+use std::time::{Duration, Instant};
 use std::u32;
 
-use measureme::{StringId};
+use measureme::{EventId, EventIdBuilder, SerializableString, StringId};
+use parking_lot::RwLock;
 
 /// MmapSerializatioSink is faster on macOS and Linux
 /// but FileSerializationSink is faster on Windows
 
 type Profiler = measureme::Profiler<SerializationSink>;
 
-pub trait QueryName: Sized + Copy {
-    fn discriminant(self) -> Discriminant<Self>;
-    fn as_str(self) -> &'static str;
-}
-
 #[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
 pub enum ProfileCategory {
     Parsing,
@@ -42,6 +123,8 @@ struct EventFilter: u32 {
         const QUERY_BLOCKED      = 1 << 3;
         const INCR_CACHE_LOADS   = 1 << 4;
 
+        const QUERY_KEYS         = 1 << 5;
+
         const DEFAULT = Self::GENERIC_ACTIVITIES.bits |
                         Self::QUERY_PROVIDERS.bits |
                         Self::QUERY_BLOCKED.bits |
@@ -53,20 +136,25 @@ struct EventFilter: u32 {
     }
 }
 
+// keep this in sync with the `-Z self-profile-events` help message in librustc_session/options.rs
 const EVENT_FILTERS_BY_NAME: &[(&str, EventFilter)] = &[
     ("none", EventFilter::NONE),
     ("all", EventFilter::ALL),
+    ("default", EventFilter::DEFAULT),
     ("generic-activity", EventFilter::GENERIC_ACTIVITIES),
     ("query-provider", EventFilter::QUERY_PROVIDERS),
     ("query-cache-hit", EventFilter::QUERY_CACHE_HITS),
-    ("query-blocked" , EventFilter::QUERY_BLOCKED),
+    ("query-blocked", EventFilter::QUERY_BLOCKED),
     ("incr-cache-load", EventFilter::INCR_CACHE_LOADS),
+    ("query-keys", EventFilter::QUERY_KEYS),
 ];
 
 fn thread_id_to_u32(tid: ThreadId) -> u32 {
-    unsafe { mem::transmute::<ThreadId, u64>(tid) as u32 }
+    unsafe { std::mem::transmute::<ThreadId, u64>(tid) as u32 }
 }
 
+/// Something that uniquely identifies a query invocation.
+pub struct QueryInvocationId(pub u32);
 
 /// A reference to the SelfProfiler. It can be cloned and sent across thread
 /// boundaries at will.
@@ -80,21 +168,30 @@ pub struct SelfProfilerRef {
     // cost anything and allows for filtering with checking if the profiler is
     // actually enabled.
     event_filter_mask: EventFilter,
+
+    // Print verbose generic activities to stdout
+    print_verbose_generic_activities: bool,
+
+    // Print extra verbose generic activities to stdout
+    print_extra_verbose_generic_activities: bool,
 }
 
 impl SelfProfilerRef {
-
-    pub fn new(profiler: Option<Arc<SelfProfiler>>) -> SelfProfilerRef {
+    pub fn new(
+        profiler: Option<Arc<SelfProfiler>>,
+        print_verbose_generic_activities: bool,
+        print_extra_verbose_generic_activities: bool,
+    ) -> SelfProfilerRef {
         // If there is no SelfProfiler then the filter mask is set to NONE,
         // ensuring that nothing ever tries to actually access it.
-        let event_filter_mask = profiler
-            .as_ref()
-            .map(|p| p.event_filter_mask)
-            .unwrap_or(EventFilter::NONE);
+        let event_filter_mask =
+            profiler.as_ref().map(|p| p.event_filter_mask).unwrap_or(EventFilter::NONE);
 
         SelfProfilerRef {
             profiler,
             event_filter_mask,
+            print_verbose_generic_activities,
+            print_extra_verbose_generic_activities,
         }
     }
 
@@ -105,11 +202,13 @@ pub fn new(profiler: Option<Arc<SelfProfiler>>) -> SelfProfilerRef {
     // path.
     #[inline(always)]
     fn exec<F>(&self, event_filter: EventFilter, f: F) -> TimingGuard<'_>
-        where F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>
+    where
+        F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
     {
         #[inline(never)]
         fn cold_call<F>(profiler_ref: &SelfProfilerRef, f: F) -> TimingGuard<'_>
-            where F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>
+        where
+            F: for<'a> FnOnce(&'a SelfProfiler) -> TimingGuard<'a>,
         {
             let profiler = profiler_ref.profiler.as_ref().unwrap();
             f(&**profiler)
@@ -122,36 +221,66 @@ fn cold_call<F>(profiler_ref: &SelfProfilerRef, f: F) -> TimingGuard<'_>
         }
     }
 
+    /// Start profiling a verbose generic activity. Profiling continues until the
+    /// VerboseTimingGuard returned from this call is dropped. In addition to recording
+    /// a measureme event, "verbose" generic activities also print a timing entry to
+    /// stdout if the compiler is invoked with -Ztime or -Ztime-passes.
+    #[inline(always)]
+    pub fn verbose_generic_activity<'a>(
+        &'a self,
+        event_id: &'static str,
+    ) -> VerboseTimingGuard<'a> {
+        VerboseTimingGuard::start(
+            event_id,
+            self.print_verbose_generic_activities,
+            self.generic_activity(event_id),
+        )
+    }
+
+    /// Start profiling a extra verbose generic activity. Profiling continues until the
+    /// VerboseTimingGuard returned from this call is dropped. In addition to recording
+    /// a measureme event, "extra verbose" generic activities also print a timing entry to
+    /// stdout if the compiler is invoked with -Ztime-passes.
+    #[inline(always)]
+    pub fn extra_verbose_generic_activity<'a>(
+        &'a self,
+        event_id: &'a str,
+    ) -> VerboseTimingGuard<'a> {
+        // FIXME: This does not yet emit a measureme event
+        // because callers encode arguments into `event_id`.
+        VerboseTimingGuard::start(
+            event_id,
+            self.print_extra_verbose_generic_activities,
+            TimingGuard::none(),
+        )
+    }
+
     /// Start profiling a generic activity. Profiling continues until the
     /// TimingGuard returned from this call is dropped.
     #[inline(always)]
-    pub fn generic_activity(&self, event_id: &str) -> TimingGuard<'_> {
+    pub fn generic_activity(&self, event_id: &'static str) -> TimingGuard<'_> {
         self.exec(EventFilter::GENERIC_ACTIVITIES, |profiler| {
-            let event_id = profiler.profiler.alloc_string(event_id);
-            TimingGuard::start(
-                profiler,
-                profiler.generic_activity_event_kind,
-                event_id
-            )
+            let event_id = profiler.get_or_alloc_cached_string(event_id);
+            let event_id = EventId::from_label(event_id);
+            TimingGuard::start(profiler, profiler.generic_activity_event_kind, event_id)
         })
     }
 
     /// Start profiling a query provider. Profiling continues until the
     /// TimingGuard returned from this call is dropped.
     #[inline(always)]
-    pub fn query_provider(&self, query_name: impl QueryName) -> TimingGuard<'_> {
+    pub fn query_provider(&self) -> TimingGuard<'_> {
         self.exec(EventFilter::QUERY_PROVIDERS, |profiler| {
-            let event_id = SelfProfiler::get_query_name_string_id(query_name);
-            TimingGuard::start(profiler, profiler.query_event_kind, event_id)
+            TimingGuard::start(profiler, profiler.query_event_kind, EventId::INVALID)
         })
     }
 
     /// Record a query in-memory cache hit.
     #[inline(always)]
-    pub fn query_cache_hit(&self, query_name: impl QueryName) {
+    pub fn query_cache_hit(&self, query_invocation_id: QueryInvocationId) {
         self.instant_query_event(
             |profiler| profiler.query_cache_hit_event_kind,
-            query_name,
+            query_invocation_id,
             EventFilter::QUERY_CACHE_HITS,
         );
     }
@@ -160,10 +289,9 @@ pub fn query_cache_hit(&self, query_name: impl QueryName) {
     /// Profiling continues until the TimingGuard returned from this call is
     /// dropped.
     #[inline(always)]
-    pub fn query_blocked(&self, query_name: impl QueryName) -> TimingGuard<'_> {
+    pub fn query_blocked(&self) -> TimingGuard<'_> {
         self.exec(EventFilter::QUERY_BLOCKED, |profiler| {
-            let event_id = SelfProfiler::get_query_name_string_id(query_name);
-            TimingGuard::start(profiler, profiler.query_blocked_event_kind, event_id)
+            TimingGuard::start(profiler, profiler.query_blocked_event_kind, EventId::INVALID)
         })
     }
 
@@ -171,13 +299,12 @@ pub fn query_blocked(&self, query_name: impl QueryName) -> TimingGuard<'_> {
     /// incremental compilation on-disk cache. Profiling continues until the
     /// TimingGuard returned from this call is dropped.
     #[inline(always)]
-    pub fn incr_cache_loading(&self, query_name: impl QueryName) -> TimingGuard<'_> {
+    pub fn incr_cache_loading(&self) -> TimingGuard<'_> {
         self.exec(EventFilter::INCR_CACHE_LOADS, |profiler| {
-            let event_id = SelfProfiler::get_query_name_string_id(query_name);
             TimingGuard::start(
                 profiler,
                 profiler.incremental_load_result_event_kind,
-                event_id
+                EventId::INVALID,
             )
         })
     }
@@ -186,16 +313,16 @@ pub fn incr_cache_loading(&self, query_name: impl QueryName) -> TimingGuard<'_>
     fn instant_query_event(
         &self,
         event_kind: fn(&SelfProfiler) -> StringId,
-        query_name: impl QueryName,
+        query_invocation_id: QueryInvocationId,
         event_filter: EventFilter,
     ) {
         drop(self.exec(event_filter, |profiler| {
-            let event_id = SelfProfiler::get_query_name_string_id(query_name);
+            let event_id = StringId::new_virtual(query_invocation_id.0);
             let thread_id = thread_id_to_u32(std::thread::current().id());
 
             profiler.profiler.record_instant_event(
                 event_kind(profiler),
-                event_id,
+                EventId::from_virtual(event_id),
                 thread_id,
             );
 
@@ -203,16 +330,24 @@ fn instant_query_event(
         }));
     }
 
-    pub fn register_queries(&self, f: impl FnOnce(&SelfProfiler)) {
+    pub fn with_profiler(&self, f: impl FnOnce(&SelfProfiler)) {
         if let Some(profiler) = &self.profiler {
             f(&profiler)
         }
     }
+
+    #[inline]
+    pub fn enabled(&self) -> bool {
+        self.profiler.is_some()
+    }
 }
 
 pub struct SelfProfiler {
     profiler: Profiler,
     event_filter_mask: EventFilter,
+
+    string_cache: RwLock<FxHashMap<&'static str, StringId>>,
+
     query_event_kind: StringId,
     generic_activity_event_kind: StringId,
     incremental_load_result_event_kind: StringId,
@@ -224,7 +359,7 @@ impl SelfProfiler {
     pub fn new(
         output_directory: &Path,
         crate_name: Option<&str>,
-        event_filters: &Option<Vec<String>>
+        event_filters: &Option<Vec<String>>,
     ) -> Result<SelfProfiler, Box<dyn Error>> {
         fs::create_dir_all(output_directory)?;
 
@@ -244,8 +379,9 @@ pub fn new(
         if let Some(ref event_filters) = *event_filters {
             let mut unknown_events = vec![];
             for item in event_filters {
-                if let Some(&(_, mask)) = EVENT_FILTERS_BY_NAME.iter()
-                                                               .find(|&(name, _)| name == item) {
+                if let Some(&(_, mask)) =
+                    EVENT_FILTERS_BY_NAME.iter().find(|&(name, _)| name == item)
+                {
                     event_filter_mask |= mask;
                 } else {
                     unknown_events.push(item.clone());
@@ -257,12 +393,15 @@ pub fn new(
                 unknown_events.sort();
                 unknown_events.dedup();
 
-                warn!("Unknown self-profiler events specified: {}. Available options are: {}.",
+                warn!(
+                    "Unknown self-profiler events specified: {}. Available options are: {}.",
                     unknown_events.join(", "),
-                    EVENT_FILTERS_BY_NAME.iter()
-                                         .map(|&(name, _)| name.to_string())
-                                         .collect::<Vec<_>>()
-                                         .join(", "));
+                    EVENT_FILTERS_BY_NAME
+                        .iter()
+                        .map(|&(name, _)| name.to_string())
+                        .collect::<Vec<_>>()
+                        .join(", ")
+                );
             }
         } else {
             event_filter_mask = EventFilter::DEFAULT;
@@ -271,6 +410,7 @@ pub fn new(
         Ok(SelfProfiler {
             profiler,
             event_filter_mask,
+            string_cache: RwLock::new(FxHashMap::default()),
             query_event_kind,
             generic_activity_event_kind,
             incremental_load_result_event_kind,
@@ -279,17 +419,51 @@ pub fn new(
         })
     }
 
-    fn get_query_name_string_id(query_name: impl QueryName) -> StringId {
-        let discriminant = unsafe {
-            mem::transmute::<Discriminant<_>, u64>(query_name.discriminant())
-        };
+    /// Allocates a new string in the profiling data. Does not do any caching
+    /// or deduplication.
+    pub fn alloc_string<STR: SerializableString + ?Sized>(&self, s: &STR) -> StringId {
+        self.profiler.alloc_string(s)
+    }
+
+    /// Gets a `StringId` for the given string. This method makes sure that
+    /// any strings going through it will only be allocated once in the
+    /// profiling data.
+    pub fn get_or_alloc_cached_string(&self, s: &'static str) -> StringId {
+        // Only acquire a read-lock first since we assume that the string is
+        // already present in the common case.
+        {
+            let string_cache = self.string_cache.read();
+
+            if let Some(&id) = string_cache.get(s) {
+                return id;
+            }
+        }
+
+        let mut string_cache = self.string_cache.write();
+        // Check if the string has already been added in the small time window
+        // between dropping the read lock and acquiring the write lock.
+        *string_cache.entry(s).or_insert_with(|| self.profiler.alloc_string(s))
+    }
+
+    pub fn map_query_invocation_id_to_string(&self, from: QueryInvocationId, to: StringId) {
+        let from = StringId::new_virtual(from.0);
+        self.profiler.map_virtual_to_concrete_string(from, to);
+    }
+
+    pub fn bulk_map_query_invocation_id_to_single_string<I>(&self, from: I, to: StringId)
+    where
+        I: Iterator<Item = QueryInvocationId> + ExactSizeIterator,
+    {
+        let from = from.map(|qid| StringId::new_virtual(qid.0));
+        self.profiler.bulk_map_virtual_to_single_concrete_string(from, to);
+    }
 
-        StringId::reserved(discriminant as u32)
+    pub fn query_key_recording_enabled(&self) -> bool {
+        self.event_filter_mask.contains(EventFilter::QUERY_KEYS)
     }
 
-    pub fn register_query_name(&self, query_name: impl QueryName) {
-        let id = SelfProfiler::get_query_name_string_id(query_name);
-        self.profiler.alloc_string_with_reserved_id(id, query_name.as_str());
+    pub fn event_id_builder(&self) -> EventIdBuilder<'_, SerializationSink> {
+        EventIdBuilder::new(&self.profiler)
     }
 }
 
@@ -301,18 +475,115 @@ impl<'a> TimingGuard<'a> {
     pub fn start(
         profiler: &'a SelfProfiler,
         event_kind: StringId,
-        event_id: StringId,
+        event_id: EventId,
     ) -> TimingGuard<'a> {
         let thread_id = thread_id_to_u32(std::thread::current().id());
         let raw_profiler = &profiler.profiler;
-        let timing_guard = raw_profiler.start_recording_interval_event(event_kind,
-                                                                       event_id,
-                                                                       thread_id);
+        let timing_guard =
+            raw_profiler.start_recording_interval_event(event_kind, event_id, thread_id);
         TimingGuard(Some(timing_guard))
     }
 
+    #[inline]
+    pub fn finish_with_query_invocation_id(self, query_invocation_id: QueryInvocationId) {
+        if let Some(guard) = self.0 {
+            let event_id = StringId::new_virtual(query_invocation_id.0);
+            let event_id = EventId::from_virtual(event_id);
+            guard.finish_with_override_event_id(event_id);
+        }
+    }
+
     #[inline]
     pub fn none() -> TimingGuard<'a> {
         TimingGuard(None)
     }
+
+    #[inline(always)]
+    pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
+        let _timer = self;
+        f()
+    }
+}
+
+#[must_use]
+pub struct VerboseTimingGuard<'a> {
+    event_id: &'a str,
+    start: Option<Instant>,
+    _guard: TimingGuard<'a>,
+}
+
+impl<'a> VerboseTimingGuard<'a> {
+    pub fn start(event_id: &'a str, verbose: bool, _guard: TimingGuard<'a>) -> Self {
+        VerboseTimingGuard {
+            event_id,
+            _guard,
+            start: if unlikely!(verbose) { Some(Instant::now()) } else { None },
+        }
+    }
+
+    #[inline(always)]
+    pub fn run<R>(self, f: impl FnOnce() -> R) -> R {
+        let _timer = self;
+        f()
+    }
+}
+
+impl Drop for VerboseTimingGuard<'_> {
+    fn drop(&mut self) {
+        self.start.map(|start| print_time_passes_entry(true, self.event_id, start.elapsed()));
+    }
+}
+
+pub fn print_time_passes_entry(do_it: bool, what: &str, dur: Duration) {
+    if !do_it {
+        return;
+    }
+
+    let mem_string = match get_resident() {
+        Some(n) => {
+            let mb = n as f64 / 1_000_000.0;
+            format!("; rss: {}MB", mb.round() as usize)
+        }
+        None => String::new(),
+    };
+    println!("time: {}{}\t{}", duration_to_secs_str(dur), mem_string, what);
+}
+
+// Hack up our own formatting for the duration to make it easier for scripts
+// to parse (always use the same number of decimal places and the same unit).
+pub fn duration_to_secs_str(dur: std::time::Duration) -> String {
+    const NANOS_PER_SEC: f64 = 1_000_000_000.0;
+    let secs = dur.as_secs() as f64 + dur.subsec_nanos() as f64 / NANOS_PER_SEC;
+
+    format!("{:.3}", secs)
+}
+
+// Memory reporting
+#[cfg(unix)]
+fn get_resident() -> Option<usize> {
+    let field = 1;
+    let contents = fs::read("/proc/self/statm").ok()?;
+    let contents = String::from_utf8(contents).ok()?;
+    let s = contents.split_whitespace().nth(field)?;
+    let npages = s.parse::<usize>().ok()?;
+    Some(npages * 4096)
+}
+
+#[cfg(windows)]
+fn get_resident() -> Option<usize> {
+    use std::mem::{self, MaybeUninit};
+    use winapi::shared::minwindef::DWORD;
+    use winapi::um::processthreadsapi::GetCurrentProcess;
+    use winapi::um::psapi::{GetProcessMemoryInfo, PROCESS_MEMORY_COUNTERS};
+
+    let mut pmc = MaybeUninit::<PROCESS_MEMORY_COUNTERS>::uninit();
+    match unsafe {
+        GetProcessMemoryInfo(GetCurrentProcess(), pmc.as_mut_ptr(), mem::size_of_val(&pmc) as DWORD)
+    } {
+        0 => None,
+        _ => {
+            let pmc = unsafe { pmc.assume_init() };
+            Some(pmc.WorkingSetSize as usize)
+        }
+    }
 }