1 use std::collections::{BTreeMap, HashMap};
3 use std::io::{self, Write};
4 use std::thread::ThreadId;
5 use std::time::Instant;
7 use crate::session::config::{Options, OptLevel};
9 #[derive(Clone, Copy, Debug, PartialEq, Eq, Ord, PartialOrd)]
10 pub enum ProfileCategory {
20 #[derive(Clone, Copy, Debug, Eq, PartialEq)]
21 pub enum ProfilerEvent {
22 QueryStart { query_name: &'static str, category: ProfileCategory, time: Instant },
23 QueryEnd { query_name: &'static str, category: ProfileCategory, time: Instant },
24 GenericActivityStart { category: ProfileCategory, time: Instant },
25 GenericActivityEnd { category: ProfileCategory, time: Instant },
26 QueryCacheHit { query_name: &'static str, category: ProfileCategory },
27 QueryCount { query_name: &'static str, category: ProfileCategory, count: usize },
28 IncrementalLoadResultStart { query_name: &'static str, time: Instant },
29 IncrementalLoadResultEnd { query_name: &'static str, time: Instant },
30 QueryBlockedStart { query_name: &'static str, category: ProfileCategory, time: Instant },
31 QueryBlockedEnd { query_name: &'static str, category: ProfileCategory, time: Instant },
35 fn is_start_event(&self) -> bool {
36 use self::ProfilerEvent::*;
40 GenericActivityStart { .. } |
41 IncrementalLoadResultStart { .. } |
42 QueryBlockedStart { .. } => true,
45 GenericActivityEnd { .. } |
46 QueryCacheHit { .. } |
48 IncrementalLoadResultEnd { .. } |
49 QueryBlockedEnd { .. } => false,
54 pub struct SelfProfiler {
55 events: HashMap<ThreadId, Vec<ProfilerEvent>>,
58 struct CategoryResultData {
59 query_times: BTreeMap<&'static str, u64>,
60 query_cache_stats: BTreeMap<&'static str, (u64, u64)>, //(hits, total)
63 impl CategoryResultData {
64 fn new() -> CategoryResultData {
66 query_times: BTreeMap::new(),
67 query_cache_stats: BTreeMap::new(),
71 fn total_time(&self) -> u64 {
72 self.query_times.iter().map(|(_, time)| time).sum()
75 fn total_cache_data(&self) -> (u64, u64) {
76 let (mut hits, mut total) = (0, 0);
78 for (_, (h, t)) in &self.query_cache_stats {
87 impl Default for CategoryResultData {
88 fn default() -> CategoryResultData {
89 CategoryResultData::new()
93 struct CalculatedResults {
94 categories: BTreeMap<ProfileCategory, CategoryResultData>,
95 crate_name: Option<String>,
96 optimization_level: OptLevel,
101 impl CalculatedResults {
102 fn new() -> CalculatedResults {
104 categories: BTreeMap::new(),
106 optimization_level: OptLevel::No,
112 fn consolidate(mut cr1: CalculatedResults, cr2: CalculatedResults) -> CalculatedResults {
113 for (category, data) in cr2.categories {
114 let cr1_data = cr1.categories.entry(category).or_default();
116 for (query, time) in data.query_times {
117 *cr1_data.query_times.entry(query).or_default() += time;
120 for (query, (hits, total)) in data.query_cache_stats {
121 let (h, t) = cr1_data.query_cache_stats.entry(query).or_insert((0, 0));
130 fn total_time(&self) -> u64 {
131 self.categories.iter().map(|(_, data)| data.total_time()).sum()
134 fn with_options(mut self, opts: &Options) -> CalculatedResults {
135 self.crate_name = opts.crate_name.clone();
136 self.optimization_level = opts.optimize;
137 self.incremental = opts.incremental.is_some();
138 self.verbose = opts.debugging_opts.verbose;
144 fn time_between_ns(start: Instant, end: Instant) -> u64 {
146 let time = end - start;
147 (time.as_secs() * 1_000_000_000) + (time.subsec_nanos() as u64)
149 debug!("time_between_ns: ignorning instance of end < start");
154 fn calculate_percent(numerator: u64, denominator: u64) -> f32 {
156 ((numerator as f32) / (denominator as f32)) * 100.0
163 pub fn new() -> SelfProfiler {
164 let mut profiler = SelfProfiler {
165 events: HashMap::new(),
168 profiler.start_activity(ProfileCategory::Other);
174 pub fn start_activity(&mut self, category: ProfileCategory) {
175 self.record(ProfilerEvent::GenericActivityStart {
177 time: Instant::now(),
182 pub fn end_activity(&mut self, category: ProfileCategory) {
183 self.record(ProfilerEvent::GenericActivityEnd {
185 time: Instant::now(),
190 pub fn record_computed_queries(
192 query_name: &'static str,
193 category: ProfileCategory,
196 self.record(ProfilerEvent::QueryCount {
204 pub fn record_query_hit(&mut self, query_name: &'static str, category: ProfileCategory) {
205 self.record(ProfilerEvent::QueryCacheHit {
212 pub fn start_query(&mut self, query_name: &'static str, category: ProfileCategory) {
213 self.record(ProfilerEvent::QueryStart {
216 time: Instant::now(),
221 pub fn end_query(&mut self, query_name: &'static str, category: ProfileCategory) {
222 self.record(ProfilerEvent::QueryEnd {
225 time: Instant::now(),
230 pub fn incremental_load_result_start(&mut self, query_name: &'static str) {
231 self.record(ProfilerEvent::IncrementalLoadResultStart {
233 time: Instant::now(),
238 pub fn incremental_load_result_end(&mut self, query_name: &'static str) {
239 self.record(ProfilerEvent::IncrementalLoadResultEnd {
241 time: Instant::now(),
246 pub fn query_blocked_start(&mut self, query_name: &'static str, category: ProfileCategory) {
247 self.record(ProfilerEvent::QueryBlockedStart {
250 time: Instant::now(),
255 pub fn query_blocked_end(&mut self, query_name: &'static str, category: ProfileCategory) {
256 self.record(ProfilerEvent::QueryBlockedEnd {
259 time: Instant::now(),
264 fn record(&mut self, event: ProfilerEvent) {
265 let thread_id = std::thread::current().id();
266 let events = self.events.entry(thread_id).or_default();
271 fn calculate_thread_results(events: &Vec<ProfilerEvent>) -> CalculatedResults {
272 use self::ProfilerEvent::*;
275 events.last().map(|e| !e.is_start_event()).unwrap_or(true),
276 "there was an event running when calculate_reslts() was called"
279 let mut results = CalculatedResults::new();
281 //(event, child time to subtract)
282 let mut query_stack = Vec::new();
284 for event in events {
286 QueryStart { .. } | GenericActivityStart { .. } => {
287 query_stack.push((event, 0));
289 QueryEnd { query_name, category, time: end_time } => {
290 let previous_query = query_stack.pop();
291 if let Some((QueryStart {
292 query_name: p_query_name,
294 category: _ }, child_time_to_subtract)) = previous_query {
298 "Saw a query end but the previous query wasn't the corresponding start"
301 let time_ns = time_between_ns(*start_time, *end_time);
302 let self_time_ns = time_ns - child_time_to_subtract;
303 let result_data = results.categories.entry(*category).or_default();
305 *result_data.query_times.entry(query_name).or_default() += self_time_ns;
307 if let Some((_, child_time_to_subtract)) = query_stack.last_mut() {
308 *child_time_to_subtract += time_ns;
311 bug!("Saw a query end but the previous event wasn't a query start");
314 GenericActivityEnd { category, time: end_time } => {
315 let previous_event = query_stack.pop();
316 if let Some((GenericActivityStart {
317 category: previous_category,
318 time: start_time }, child_time_to_subtract)) = previous_event {
322 "Saw an end but the previous event wasn't the corresponding start"
325 let time_ns = time_between_ns(*start_time, *end_time);
326 let self_time_ns = time_ns - child_time_to_subtract;
327 let result_data = results.categories.entry(*category).or_default();
329 *result_data.query_times
330 .entry("{time spent not running queries}")
331 .or_default() += self_time_ns;
333 if let Some((_, child_time_to_subtract)) = query_stack.last_mut() {
334 *child_time_to_subtract += time_ns;
337 bug!("Saw an activity end but the previous event wasn't an activity start");
340 QueryCacheHit { category, query_name } => {
341 let result_data = results.categories.entry(*category).or_default();
344 result_data.query_cache_stats.entry(query_name).or_insert((0, 0));
348 QueryCount { category, query_name, count } => {
349 let result_data = results.categories.entry(*category).or_default();
352 result_data.query_cache_stats.entry(query_name).or_insert((0, 0));
353 *totals += *count as u64;
355 //we don't summarize incremental load result events in the simple output mode
356 IncrementalLoadResultStart { .. } | IncrementalLoadResultEnd { .. } => { },
357 //we don't summarize parallel query blocking in the simple output mode
358 QueryBlockedStart { .. } | QueryBlockedEnd { .. } => { },
362 //normalize the times to ms
363 for (_, data) in &mut results.categories {
364 for (_, time) in &mut data.query_times {
365 *time = *time / 1_000_000;
372 fn get_results(&self, opts: &Options) -> CalculatedResults {
375 .map(|(_, r)| SelfProfiler::calculate_thread_results(r))
376 .fold(CalculatedResults::new(), CalculatedResults::consolidate)
380 pub fn print_results(&mut self, opts: &Options) {
381 self.end_activity(ProfileCategory::Other);
383 let results = self.get_results(opts);
385 let total_time = results.total_time() as f32;
387 let out = io::stderr();
388 let mut lock = out.lock();
390 let crate_name = results.crate_name.map(|n| format!(" for {}", n)).unwrap_or_default();
392 writeln!(lock, "Self profiling results{}:", crate_name).unwrap();
393 writeln!(lock).unwrap();
395 writeln!(lock, "| Phase | Time (ms) \
396 | Time (%) | Queries | Hits (%)")
398 writeln!(lock, "| ----------------------------------------- | -------------- \
399 | -------- | -------------- | --------")
402 let mut categories: Vec<_> = results.categories.iter().collect();
403 categories.sort_by_cached_key(|(_, d)| d.total_time());
405 for (category, data) in categories.iter().rev() {
406 let (category_hits, category_total) = data.total_cache_data();
407 let category_hit_percent = calculate_percent(category_hits, category_total);
411 "| {0: <41} | {1: >14} | {2: >8.2} | {3: >14} | {4: >8}",
412 format!("{:?}", category),
414 ((data.total_time() as f32) / total_time) * 100.0,
416 format!("{:.2}", category_hit_percent),
419 //in verbose mode, show individual query data
421 //don't show queries that took less than 1ms
422 let mut times: Vec<_> = data.query_times.iter().filter(|(_, t)| **t > 0).collect();
423 times.sort_by(|(_, time1), (_, time2)| time2.cmp(time1));
425 for (query, time) in times {
426 let (hits, total) = data.query_cache_stats.get(query).unwrap_or(&(0, 0));
427 let hit_percent = calculate_percent(*hits, *total);
431 "| - {0: <39} | {1: >14} | {2: >8.2} | {3: >14} | {4: >8}",
434 ((*time as f32) / total_time) * 100.0,
436 format!("{:.2}", hit_percent),
442 writeln!(lock).unwrap();
443 writeln!(lock, "Optimization level: {:?}", opts.optimize).unwrap();
444 writeln!(lock, "Incremental: {}", if results.incremental { "on" } else { "off" }).unwrap();
447 pub fn save_results(&self, opts: &Options) {
448 let results = self.get_results(opts);
450 let compilation_options =
451 format!("{{ \"optimization_level\": \"{:?}\", \"incremental\": {} }}",
452 results.optimization_level,
453 if results.incremental { "true" } else { "false" });
455 let mut category_data = String::new();
457 for (category, data) in &results.categories {
458 let (hits, total) = data.total_cache_data();
459 let hit_percent = calculate_percent(hits, total);
461 category_data.push_str(&format!("{{ \"category\": \"{:?}\", \"time_ms\": {}, \
462 \"query_count\": {}, \"query_hits\": {} }}",
466 format!("{:.2}", hit_percent)));
469 let json = format!("{{ \"category_data\": {}, \"compilation_options\": {} }}",
471 compilation_options);
473 fs::write("self_profiler_results.json", json).unwrap();