1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![allow(unknown_features)]
35 #![feature(asm, slicing_syntax)]
36 #![feature(box_syntax)]
37 #![allow(unknown_features)] #![feature(int_uint)]
42 extern crate serialize;
43 extern crate "serialize" as rustc_serialize;
46 pub use self::TestFn::*;
47 pub use self::MetricChange::*;
48 pub use self::ColorConfig::*;
49 pub use self::TestResult::*;
50 pub use self::TestName::*;
51 use self::TestEvent::*;
52 use self::NamePadding::*;
53 use self::OutputLocation::*;
56 use getopts::{OptGroup, optflag, optopt};
58 use serialize::{json, Decodable, Encodable};
60 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
64 use std::collections::BTreeMap;
68 use std::io::fs::PathExtensions;
69 use std::io::stdio::StdWriter;
70 use std::io::{File, ChanReader, ChanWriter};
72 use std::iter::repeat;
73 use std::num::{Float, Int};
75 use std::str::FromStr;
76 use std::sync::mpsc::{channel, Sender};
77 use std::thread::{self, Thread};
78 use std::thunk::{Thunk, Invoke};
79 use std::time::Duration;
81 // to be used by rustc to compile tests in libtest
83 pub use {Bencher, TestName, TestResult, TestDesc,
84 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
85 Metric, MetricMap, MetricAdded, MetricRemoved,
86 MetricChange, Improvement, Regression, LikelyNoise,
87 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
88 run_test, test_main, test_main_static, filter_tests,
89 parse_opts, StaticBenchFn, ShouldFail};
94 // The name of a test. By convention this follows the rules for rust
95 // paths; i.e. it should be a series of identifiers separated by double
96 // colons. This way if some test runner wants to arrange the tests
97 // hierarchically it may.
99 #[derive(Clone, PartialEq, Eq, Hash, Show)]
101 StaticTestName(&'static str),
105 fn as_slice<'a>(&'a self) -> &'a str {
107 StaticTestName(s) => s,
108 DynTestName(ref s) => s.as_slice()
112 impl fmt::String for TestName {
113 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
114 fmt::String::fmt(self.as_slice(), f)
118 #[derive(Clone, Copy)]
126 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
127 let mut name = String::from_str(self.name.as_slice());
128 let fill = column_count.saturating_sub(name.len());
129 let mut pad = repeat(" ").take(fill).collect::<String>();
133 pad.push_str(name.as_slice());
137 name.push_str(pad.as_slice());
144 /// Represents a benchmark function.
145 pub trait TDynBenchFn {
146 fn run(&self, harness: &mut Bencher);
149 // A function that runs a test. If the function returns successfully,
150 // the test succeeds; if the function panics then the test fails. We
151 // may need to come up with a more clever definition of test in order
152 // to support isolation of tests into tasks.
155 StaticBenchFn(fn(&mut Bencher)),
156 StaticMetricFn(fn(&mut MetricMap)),
158 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
159 DynBenchFn(Box<TDynBenchFn+'static>)
163 fn padding(&self) -> NamePadding {
165 &StaticTestFn(..) => PadNone,
166 &StaticBenchFn(..) => PadOnRight,
167 &StaticMetricFn(..) => PadOnRight,
168 &DynTestFn(..) => PadNone,
169 &DynMetricFn(..) => PadOnRight,
170 &DynBenchFn(..) => PadOnRight,
175 impl fmt::Show for TestFn {
176 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
177 f.write_str(match *self {
178 StaticTestFn(..) => "StaticTestFn(..)",
179 StaticBenchFn(..) => "StaticBenchFn(..)",
180 StaticMetricFn(..) => "StaticMetricFn(..)",
181 DynTestFn(..) => "DynTestFn(..)",
182 DynMetricFn(..) => "DynMetricFn(..)",
183 DynBenchFn(..) => "DynBenchFn(..)"
188 /// Manager of the benchmarking runs.
190 /// This is fed into functions marked with `#[bench]` to allow for
191 /// set-up & tear-down before running a piece of code repeatedly via a
200 #[derive(Copy, Clone, Show, PartialEq, Eq, Hash)]
201 pub enum ShouldFail {
203 Yes(Option<&'static str>)
206 // The definition of a single test. A test runner will run a list of
208 #[derive(Clone, Show, PartialEq, Eq, Hash)]
209 pub struct TestDesc {
212 pub should_fail: ShouldFail,
215 unsafe impl Send for TestDesc {}
218 pub struct TestDescAndFn {
223 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
230 pub fn new(value: f64, noise: f64) -> Metric {
231 Metric {value: value, noise: noise}
236 pub struct MetricMap(BTreeMap<String,Metric>);
238 impl Clone for MetricMap {
239 fn clone(&self) -> MetricMap {
240 let MetricMap(ref map) = *self;
241 MetricMap(map.clone())
245 /// Analysis of a single change in metric
246 #[derive(Copy, PartialEq, Show)]
247 pub enum MetricChange {
255 pub type MetricDiff = BTreeMap<String,MetricChange>;
257 // The default console test runner. It accepts the command line
258 // arguments and a vector of test_descs.
259 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
261 match parse_opts(args) {
263 Some(Err(msg)) => panic!("{:?}", msg),
266 match run_tests_console(&opts, tests) {
268 Ok(false) => panic!("Some tests failed"),
269 Err(e) => panic!("io error when running tests: {:?}", e),
273 // A variant optimized for invocation with a static test vector.
274 // This will panic (intentionally) when fed any dynamic tests, because
275 // it is copying the static values out into a dynamic vector and cannot
276 // copy dynamic values. It is doing this because from this point on
277 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
278 // semantics into parallel test runners, which in turn requires a ~[]
279 // rather than a &[].
280 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
281 let owned_tests = tests.iter().map(|t| {
283 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
284 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
285 _ => panic!("non-static tests passed to test::test_main_static")
288 test_main(args, owned_tests)
292 pub enum ColorConfig {
298 pub struct TestOpts {
299 pub filter: Option<Regex>,
300 pub run_ignored: bool,
302 pub run_benchmarks: bool,
303 pub logfile: Option<Path>,
305 pub color: ColorConfig,
310 fn new() -> TestOpts {
315 run_benchmarks: false,
323 /// Result of parsing the options.
324 pub type OptRes = Result<TestOpts, String>;
326 fn optgroups() -> Vec<getopts::OptGroup> {
327 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
328 getopts::optflag("", "test", "Run tests and not benchmarks"),
329 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
330 getopts::optflag("h", "help", "Display this message (longer with --help)"),
331 getopts::optopt("", "logfile", "Write logs to the specified file instead \
333 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
334 task, allow printing directly"),
335 getopts::optopt("", "color", "Configure coloring of output:
336 auto = colorize if stdout is a tty and tests are run on serially (default);
337 always = always colorize output;
338 never = never colorize output;", "auto|always|never"))
341 fn usage(binary: &str) {
342 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
345 The FILTER regex is tested against the name of all tests to run, and
346 only those tests that match are run.
348 By default, all tests are run in parallel. This can be altered with the
349 RUST_TEST_TASKS environment variable when running tests (set it to 1).
351 All tests have their standard output and standard error captured by default.
352 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
353 environment variable. Logging is not captured by default.
357 #[test] - Indicates a function is a test to be run. This function
359 #[bench] - Indicates a function is a benchmark to be run. This
360 function takes one argument (test::Bencher).
361 #[should_fail] - This function (also labeled with #[test]) will only pass if
362 the code causes a failure (an assertion failure or panic!)
363 A message may be provided, which the failure string must
364 contain: #[should_fail(expected = "foo")].
365 #[ignore] - When applied to a function which is already attributed as a
366 test, then the test runner will ignore these tests during
367 normal test runs. Running with --ignored will run these
369 usage = getopts::usage(message.as_slice(),
370 optgroups().as_slice()));
373 // Parses command line arguments into test options
374 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
375 let args_ = args.tail();
377 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
379 Err(f) => return Some(Err(f.to_string()))
382 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
384 let filter = if matches.free.len() > 0 {
385 let s = matches.free[0].as_slice();
386 match Regex::new(s) {
388 Err(e) => return Some(Err(format!("could not parse /{}/: {:?}", s, e)))
394 let run_ignored = matches.opt_present("ignored");
396 let logfile = matches.opt_str("logfile");
397 let logfile = logfile.map(|s| Path::new(s));
399 let run_benchmarks = matches.opt_present("bench");
400 let run_tests = ! run_benchmarks ||
401 matches.opt_present("test");
403 let mut nocapture = matches.opt_present("nocapture");
405 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
408 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
409 Some("auto") | None => AutoColor,
410 Some("always") => AlwaysColor,
411 Some("never") => NeverColor,
413 Some(v) => return Some(Err(format!("argument for --color must be \
414 auto, always, or never (was {})",
418 let test_opts = TestOpts {
420 run_ignored: run_ignored,
421 run_tests: run_tests,
422 run_benchmarks: run_benchmarks,
424 nocapture: nocapture,
431 #[derive(Clone, PartialEq)]
432 pub struct BenchSamples {
433 ns_iter_summ: stats::Summary<f64>,
437 #[derive(Clone, PartialEq)]
438 pub enum TestResult {
442 TrMetrics(MetricMap),
443 TrBench(BenchSamples),
446 unsafe impl Send for TestResult {}
448 enum OutputLocation<T> {
449 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
453 struct ConsoleTestState<T> {
454 log_out: Option<File>,
455 out: OutputLocation<T>,
459 show_all_stats: bool,
466 failures: Vec<(TestDesc, Vec<u8> )> ,
467 max_name_len: uint, // number of columns to fill when aligning names
470 impl<T: Writer> ConsoleTestState<T> {
471 pub fn new(opts: &TestOpts,
472 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
473 let log_out = match opts.logfile {
474 Some(ref path) => Some(try!(File::create(path))),
477 let out = match term::stdout() {
478 None => Raw(io::stdio::stdout_raw()),
482 Ok(ConsoleTestState {
485 use_color: use_color(opts),
488 show_all_stats: false,
494 metrics: MetricMap::new(),
495 failures: Vec::new(),
500 pub fn write_ok(&mut self) -> io::IoResult<()> {
501 self.write_pretty("ok", term::color::GREEN)
504 pub fn write_failed(&mut self) -> io::IoResult<()> {
505 self.write_pretty("FAILED", term::color::RED)
508 pub fn write_ignored(&mut self) -> io::IoResult<()> {
509 self.write_pretty("ignored", term::color::YELLOW)
512 pub fn write_metric(&mut self) -> io::IoResult<()> {
513 self.write_pretty("metric", term::color::CYAN)
516 pub fn write_bench(&mut self) -> io::IoResult<()> {
517 self.write_pretty("bench", term::color::CYAN)
520 pub fn write_added(&mut self) -> io::IoResult<()> {
521 self.write_pretty("added", term::color::GREEN)
524 pub fn write_improved(&mut self) -> io::IoResult<()> {
525 self.write_pretty("improved", term::color::GREEN)
528 pub fn write_removed(&mut self) -> io::IoResult<()> {
529 self.write_pretty("removed", term::color::YELLOW)
532 pub fn write_regressed(&mut self) -> io::IoResult<()> {
533 self.write_pretty("regressed", term::color::RED)
536 pub fn write_pretty(&mut self,
538 color: term::color::Color) -> io::IoResult<()> {
540 Pretty(ref mut term) => {
542 try!(term.fg(color));
544 try!(term.write(word.as_bytes()));
550 Raw(ref mut stdout) => stdout.write(word.as_bytes())
554 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
556 Pretty(ref mut term) => term.write(s.as_bytes()),
557 Raw(ref mut stdout) => stdout.write(s.as_bytes())
561 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
563 let noun = if len != 1 { "tests" } else { "test" };
564 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
567 pub fn write_test_start(&mut self, test: &TestDesc,
568 align: NamePadding) -> io::IoResult<()> {
569 let name = test.padded_name(self.max_name_len, align);
570 self.write_plain(format!("test {} ... ", name).as_slice())
573 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
575 TrOk => self.write_ok(),
576 TrFailed => self.write_failed(),
577 TrIgnored => self.write_ignored(),
578 TrMetrics(ref mm) => {
579 try!(self.write_metric());
580 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
583 try!(self.write_bench());
585 if self.show_boxplot {
586 let mut wr = Vec::new();
588 try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
590 let s = String::from_utf8(wr).unwrap();
592 try!(self.write_plain(format!(": {}", s).as_slice()));
595 if self.show_all_stats {
596 let mut wr = Vec::new();
598 try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
600 let s = String::from_utf8(wr).unwrap();
602 try!(self.write_plain(format!(": {}", s).as_slice()));
604 try!(self.write_plain(format!(": {}",
605 fmt_bench_samples(bs)).as_slice()));
611 self.write_plain("\n")
614 pub fn write_log(&mut self, test: &TestDesc,
615 result: &TestResult) -> io::IoResult<()> {
619 let s = format!("{} {}\n", match *result {
620 TrOk => "ok".to_string(),
621 TrFailed => "failed".to_string(),
622 TrIgnored => "ignored".to_string(),
623 TrMetrics(ref mm) => fmt_metrics(mm),
624 TrBench(ref bs) => fmt_bench_samples(bs)
625 }, test.name.as_slice());
626 o.write(s.as_bytes())
631 pub fn write_failures(&mut self) -> io::IoResult<()> {
632 try!(self.write_plain("\nfailures:\n"));
633 let mut failures = Vec::new();
634 let mut fail_out = String::new();
635 for &(ref f, ref stdout) in self.failures.iter() {
636 failures.push(f.name.to_string());
637 if stdout.len() > 0 {
638 fail_out.push_str(format!("---- {} stdout ----\n\t",
639 f.name.as_slice()).as_slice());
640 let output = String::from_utf8_lossy(stdout.as_slice());
641 fail_out.push_str(output.as_slice());
642 fail_out.push_str("\n");
645 if fail_out.len() > 0 {
646 try!(self.write_plain("\n"));
647 try!(self.write_plain(fail_out.as_slice()));
650 try!(self.write_plain("\nfailures:\n"));
652 for name in failures.iter() {
653 try!(self.write_plain(format!(" {}\n",
654 name.as_slice()).as_slice()));
659 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
661 let mut improved = 0u;
662 let mut regressed = 0u;
664 let mut removed = 0u;
666 for (k, v) in diff.iter() {
668 LikelyNoise => noise += 1,
671 try!(self.write_added());
672 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
676 try!(self.write_removed());
677 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
679 Improvement(pct) => {
681 try!(self.write_plain(format!(": {} ", *k).as_slice()));
682 try!(self.write_improved());
683 try!(self.write_plain(format!(" by {:.2}%\n",
684 pct as f64).as_slice()));
688 try!(self.write_plain(format!(": {} ", *k).as_slice()));
689 try!(self.write_regressed());
690 try!(self.write_plain(format!(" by {:.2}%\n",
691 pct as f64).as_slice()));
695 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
696 {} removed, {} improved, {} regressed, \
698 added, removed, improved, regressed,
701 try!(self.write_plain("updated ratchet file\n"));
703 try!(self.write_plain("left ratchet file untouched\n"));
708 pub fn write_run_finish(&mut self,
709 ratchet_metrics: &Option<Path>,
710 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
711 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
713 let ratchet_success = match *ratchet_metrics {
716 try!(self.write_plain(format!("\nusing metrics ratchet: {:?}\n",
717 pth.display()).as_slice()));
721 try!(self.write_plain(format!("with noise-tolerance \
725 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
726 try!(self.write_metric_diff(&diff));
731 let test_success = self.failed == 0u;
733 try!(self.write_failures());
736 let success = ratchet_success && test_success;
738 try!(self.write_plain("\ntest result: "));
740 // There's no parallelism at this point so it's safe to use color
741 try!(self.write_ok());
743 try!(self.write_failed());
745 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
746 self.passed, self.failed, self.ignored, self.measured);
747 try!(self.write_plain(s.as_slice()));
752 pub fn fmt_metrics(mm: &MetricMap) -> String {
753 let MetricMap(ref mm) = *mm;
754 let v : Vec<String> = mm.iter()
755 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
756 v.value as f64, v.noise as f64))
761 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
763 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
764 bs.ns_iter_summ.median as uint,
765 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
768 format!("{:>9} ns/iter (+/- {})",
769 bs.ns_iter_summ.median as uint,
770 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
774 // A simple console test runner
775 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
777 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
778 match (*event).clone() {
779 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
780 TeWait(ref test, padding) => st.write_test_start(test, padding),
781 TeResult(test, result, stdout) => {
782 try!(st.write_log(&test, &result));
783 try!(st.write_result(&result));
785 TrOk => st.passed += 1,
786 TrIgnored => st.ignored += 1,
788 let tname = test.name.as_slice();
789 let MetricMap(mm) = mm;
790 for (k,v) in mm.iter() {
792 .insert_metric(format!("{}.{}",
801 st.metrics.insert_metric(test.name.as_slice(),
802 bs.ns_iter_summ.median,
803 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
808 st.failures.push((test, stdout));
816 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
817 fn len_if_padded(t: &TestDescAndFn) -> uint {
818 match t.testfn.padding() {
820 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
823 match tests.iter().max_by(|t|len_if_padded(*t)) {
825 let n = t.desc.name.as_slice();
826 st.max_name_len = n.len();
830 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
831 return st.write_run_finish(&None, None);
835 fn should_sort_failures_before_printing_them() {
836 let test_a = TestDesc {
837 name: StaticTestName("a"),
839 should_fail: ShouldFail::No
842 let test_b = TestDesc {
843 name: StaticTestName("b"),
845 should_fail: ShouldFail::No
848 let mut st = ConsoleTestState {
850 out: Raw(Vec::new()),
854 show_all_stats: false,
861 metrics: MetricMap::new(),
862 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
865 st.write_failures().unwrap();
866 let s = match st.out {
867 Raw(ref m) => String::from_utf8_lossy(&m[]),
868 Pretty(_) => unreachable!()
871 let apos = s.find_str("a").unwrap();
872 let bpos = s.find_str("b").unwrap();
873 assert!(apos < bpos);
876 fn use_color(opts: &TestOpts) -> bool {
878 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
886 TeFiltered(Vec<TestDesc> ),
887 TeWait(TestDesc, NamePadding),
888 TeResult(TestDesc, TestResult, Vec<u8> ),
891 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
894 fn run_tests<F>(opts: &TestOpts,
895 tests: Vec<TestDescAndFn> ,
896 mut callback: F) -> io::IoResult<()> where
897 F: FnMut(TestEvent) -> io::IoResult<()>,
899 let filtered_tests = filter_tests(opts, tests);
900 let filtered_descs = filtered_tests.iter()
901 .map(|t| t.desc.clone())
904 try!(callback(TeFiltered(filtered_descs)));
906 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
907 filtered_tests.into_iter().partition(|e| {
909 StaticTestFn(_) | DynTestFn(_) => true,
914 // It's tempting to just spawn all the tests at once, but since we have
915 // many tests that run in other processes we would be making a big mess.
916 let concurrency = get_concurrency();
918 let mut remaining = filtered_tests;
922 let (tx, rx) = channel::<MonitorMsg>();
924 while pending > 0 || !remaining.is_empty() {
925 while pending < concurrency && !remaining.is_empty() {
926 let test = remaining.pop().unwrap();
927 if concurrency == 1 {
928 // We are doing one test at a time so we can print the name
929 // of the test before we run it. Useful for debugging tests
930 // that hang forever.
931 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
933 run_test(opts, !opts.run_tests, test, tx.clone());
937 let (desc, result, stdout) = rx.recv().unwrap();
938 if concurrency != 1 {
939 try!(callback(TeWait(desc.clone(), PadNone)));
941 try!(callback(TeResult(desc, result, stdout)));
945 // All benchmarks run at the end, in serial.
946 // (this includes metric fns)
947 for b in filtered_benchs_and_metrics.into_iter() {
948 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
949 run_test(opts, !opts.run_benchmarks, b, tx.clone());
950 let (test, result, stdout) = rx.recv().unwrap();
951 try!(callback(TeResult(test, result, stdout)));
956 fn get_concurrency() -> uint {
958 match os::getenv("RUST_TEST_TASKS") {
960 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
962 Some(n) if n > 0 => n,
963 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
967 rt::default_sched_threads()
972 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
973 let mut filtered = tests;
975 // Remove tests that don't match the test filter
976 filtered = match opts.filter {
980 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
984 // Maybe pull out the ignored test and unignore them
985 filtered = if !opts.run_ignored {
988 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
989 if test.desc.ignore {
990 let TestDescAndFn {desc, testfn} = test;
992 desc: TestDesc {ignore: false, ..desc},
999 filtered.into_iter().filter_map(|x| filter(x)).collect()
1002 // Sort the tests alphabetically
1003 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1008 pub fn run_test(opts: &TestOpts,
1010 test: TestDescAndFn,
1011 monitor_ch: Sender<MonitorMsg>) {
1013 let TestDescAndFn {desc, testfn} = test;
1015 if force_ignore || desc.ignore {
1016 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1020 fn run_test_inner(desc: TestDesc,
1021 monitor_ch: Sender<MonitorMsg>,
1024 Thread::spawn(move || {
1025 let (tx, rx) = channel();
1026 let mut reader = ChanReader::new(rx);
1027 let stdout = ChanWriter::new(tx.clone());
1028 let stderr = ChanWriter::new(tx);
1029 let mut cfg = thread::Builder::new().name(match desc.name {
1030 DynTestName(ref name) => name.clone().to_string(),
1031 StaticTestName(name) => name.to_string(),
1034 drop((stdout, stderr));
1036 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
1037 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
1040 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
1041 let stdout = reader.read_to_end().unwrap().into_iter().collect();
1042 let test_result = calc_result(&desc, result_guard.join());
1043 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1048 DynBenchFn(bencher) => {
1049 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1050 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1053 StaticBenchFn(benchfn) => {
1054 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1055 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1059 let mut mm = MetricMap::new();
1061 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1064 StaticMetricFn(f) => {
1065 let mut mm = MetricMap::new();
1067 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1070 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1071 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1072 Thunk::new(move|| f()))
1076 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1077 match (&desc.should_fail, task_result) {
1078 (&ShouldFail::No, Ok(())) |
1079 (&ShouldFail::Yes(None), Err(_)) => TrOk,
1080 (&ShouldFail::Yes(Some(msg)), Err(ref err))
1081 if err.downcast_ref::<String>()
1083 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1084 .map(|e| e.contains(msg))
1085 .unwrap_or(false) => TrOk,
1092 pub fn new() -> MetricMap {
1093 MetricMap(BTreeMap::new())
1096 /// Load MetricDiff from a file.
1100 /// This function will panic if the path does not exist or the path does not
1101 /// contain a valid metric map.
1102 pub fn load(p: &Path) -> MetricMap {
1103 assert!(p.exists());
1104 let mut f = File::open(p).unwrap();
1105 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1106 let mut decoder = json::Decoder::new(value);
1107 MetricMap(match Decodable::decode(&mut decoder) {
1109 Err(e) => panic!("failure decoding JSON: {:?}", e)
1113 /// Write MetricDiff to a file.
1114 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1115 let mut file = try!(File::create(p));
1116 let MetricMap(ref map) = *self;
1117 write!(&mut file, "{}", json::as_json(map))
1120 /// Compare against another MetricMap. Optionally compare all
1121 /// measurements in the maps using the provided `noise_pct` as a
1122 /// percentage of each value to consider noise. If `None`, each
1123 /// measurement's noise threshold is independently chosen as the
1124 /// maximum of that measurement's recorded noise quantity in either
1126 pub fn compare_to_old(&self, old: &MetricMap,
1127 noise_pct: Option<f64>) -> MetricDiff {
1128 let mut diff : MetricDiff = BTreeMap::new();
1129 let MetricMap(ref selfmap) = *self;
1130 let MetricMap(ref old) = *old;
1131 for (k, vold) in old.iter() {
1132 let r = match selfmap.get(k) {
1133 None => MetricRemoved,
1135 let delta = v.value - vold.value;
1136 let noise = match noise_pct {
1137 None => vold.noise.abs().max(v.noise.abs()),
1138 Some(pct) => vold.value * pct / 100.0
1140 if delta.abs() <= noise {
1143 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1144 if vold.noise < 0.0 {
1145 // When 'noise' is negative, it means we want
1146 // to see deltas that go up over time, and can
1147 // only tolerate slight negative movement.
1154 // When 'noise' is positive, it means we want
1155 // to see deltas that go down over time, and
1156 // can only tolerate slight positive movements.
1166 diff.insert((*k).clone(), r);
1168 let MetricMap(ref map) = *self;
1169 for (k, _) in map.iter() {
1170 if !diff.contains_key(k) {
1171 diff.insert((*k).clone(), MetricAdded);
1177 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1178 /// must be non-negative. The `noise` indicates the uncertainty of the
1179 /// metric, which doubles as the "noise range" of acceptable
1180 /// pairwise-regressions on this named value, when comparing from one
1181 /// metric to the next using `compare_to_old`.
1183 /// If `noise` is positive, then it means this metric is of a value
1184 /// you want to see grow smaller, so a change larger than `noise` in the
1185 /// positive direction represents a regression.
1187 /// If `noise` is negative, then it means this metric is of a value
1188 /// you want to see grow larger, so a change larger than `noise` in the
1189 /// negative direction represents a regression.
1190 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1195 let MetricMap(ref mut map) = *self;
1196 map.insert(name.to_string(), m);
1199 /// Attempt to "ratchet" an external metric file. This involves loading
1200 /// metrics from a metric file (if it exists), comparing against
1201 /// the metrics in `self` using `compare_to_old`, and rewriting the
1202 /// file to contain the metrics in `self` if none of the
1203 /// `MetricChange`s are `Regression`. Returns the diff as well
1204 /// as a boolean indicating whether the ratchet succeeded.
1205 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1206 let old = if p.exists() {
1212 let diff : MetricDiff = self.compare_to_old(&old, pct);
1213 let ok = diff.iter().all(|(_, v)| {
1215 Regression(_) => false,
1221 self.save(p).unwrap();
1230 /// A function that is opaque to the optimizer, to allow benchmarks to
1231 /// pretend to use outputs to assist in avoiding dead-code
1234 /// This function is a no-op, and does not even read from `dummy`.
1235 pub fn black_box<T>(dummy: T) -> T {
1236 // we need to "use" the argument in some way LLVM can't
1238 unsafe {asm!("" : : "r"(&dummy))}
1244 /// Callback for benchmark functions to run in their body.
1245 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1246 self.dur = Duration::span(|| {
1247 let k = self.iterations;
1248 for _ in range(0u64, k) {
1254 pub fn ns_elapsed(&mut self) -> u64 {
1255 self.dur.num_nanoseconds().unwrap() as u64
1258 pub fn ns_per_iter(&mut self) -> u64 {
1259 if self.iterations == 0 {
1262 self.ns_elapsed() / cmp::max(self.iterations, 1)
1266 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1267 self.iterations = n;
1271 // This is a more statistics-driven benchmark algorithm
1272 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1273 // Initial bench run to get ballpark figure.
1275 self.bench_n(n, |x| f(x));
1277 // Try to estimate iter count for 1ms falling back to 1m
1278 // iterations if first run took < 1ns.
1279 if self.ns_per_iter() == 0 {
1282 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1284 // if the first run took more than 1ms we don't want to just
1285 // be left doing 0 iterations on every loop. The unfortunate
1286 // side effect of not being able to do as many runs is
1287 // automatically handled by the statistical analysis below
1288 // (i.e. larger error bars).
1289 if n == 0 { n = 1; }
1291 let mut total_run = Duration::nanoseconds(0);
1292 let samples : &mut [f64] = &mut [0.0_f64; 50];
1294 let mut summ = None;
1295 let mut summ5 = None;
1297 let loop_run = Duration::span(|| {
1299 for p in samples.iter_mut() {
1300 self.bench_n(n, |x| f(x));
1301 *p = self.ns_per_iter() as f64;
1304 stats::winsorize(samples, 5.0);
1305 summ = Some(stats::Summary::new(samples));
1307 for p in samples.iter_mut() {
1308 self.bench_n(5 * n, |x| f(x));
1309 *p = self.ns_per_iter() as f64;
1312 stats::winsorize(samples, 5.0);
1313 summ5 = Some(stats::Summary::new(samples));
1315 let summ = summ.unwrap();
1316 let summ5 = summ5.unwrap();
1318 // If we've run for 100ms and seem to have converged to a
1320 if loop_run.num_milliseconds() > 100 &&
1321 summ.median_abs_dev_pct < 1.0 &&
1322 summ.median - summ5.median < summ5.median_abs_dev {
1326 total_run = total_run + loop_run;
1327 // Longest we ever run for is 3s.
1328 if total_run.num_seconds() > 3 {
1339 use std::time::Duration;
1340 use super::{Bencher, BenchSamples};
1342 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1343 let mut bs = Bencher {
1345 dur: Duration::nanoseconds(0),
1349 let ns_iter_summ = bs.auto_bench(f);
1351 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1352 let iter_s = 1_000_000_000 / ns_iter;
1353 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1356 ns_iter_summ: ns_iter_summ,
1364 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1365 TestDesc, TestDescAndFn, TestOpts, run_test,
1366 Metric, MetricMap, MetricAdded, MetricRemoved,
1367 Improvement, Regression, LikelyNoise,
1368 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1369 use std::io::TempDir;
1370 use std::thunk::Thunk;
1371 use std::sync::mpsc::channel;
1374 pub fn do_not_run_ignored_tests() {
1375 fn f() { panic!(); }
1376 let desc = TestDescAndFn {
1378 name: StaticTestName("whatever"),
1380 should_fail: ShouldFail::No,
1382 testfn: DynTestFn(Thunk::new(move|| f())),
1384 let (tx, rx) = channel();
1385 run_test(&TestOpts::new(), false, desc, tx);
1386 let (_, res, _) = rx.recv().unwrap();
1387 assert!(res != TrOk);
1391 pub fn ignored_tests_result_in_ignored() {
1393 let desc = TestDescAndFn {
1395 name: StaticTestName("whatever"),
1397 should_fail: ShouldFail::No,
1399 testfn: DynTestFn(Thunk::new(move|| f())),
1401 let (tx, rx) = channel();
1402 run_test(&TestOpts::new(), false, desc, tx);
1403 let (_, res, _) = rx.recv().unwrap();
1404 assert!(res == TrIgnored);
1408 fn test_should_fail() {
1409 fn f() { panic!(); }
1410 let desc = TestDescAndFn {
1412 name: StaticTestName("whatever"),
1414 should_fail: ShouldFail::Yes(None)
1416 testfn: DynTestFn(Thunk::new(move|| f())),
1418 let (tx, rx) = channel();
1419 run_test(&TestOpts::new(), false, desc, tx);
1420 let (_, res, _) = rx.recv().unwrap();
1421 assert!(res == TrOk);
1425 fn test_should_fail_good_message() {
1426 fn f() { panic!("an error message"); }
1427 let desc = TestDescAndFn {
1429 name: StaticTestName("whatever"),
1431 should_fail: ShouldFail::Yes(Some("error message"))
1433 testfn: DynTestFn(Thunk::new(move|| f())),
1435 let (tx, rx) = channel();
1436 run_test(&TestOpts::new(), false, desc, tx);
1437 let (_, res, _) = rx.recv().unwrap();
1438 assert!(res == TrOk);
1442 fn test_should_fail_bad_message() {
1443 fn f() { panic!("an error message"); }
1444 let desc = TestDescAndFn {
1446 name: StaticTestName("whatever"),
1448 should_fail: ShouldFail::Yes(Some("foobar"))
1450 testfn: DynTestFn(Thunk::new(move|| f())),
1452 let (tx, rx) = channel();
1453 run_test(&TestOpts::new(), false, desc, tx);
1454 let (_, res, _) = rx.recv().unwrap();
1455 assert!(res == TrFailed);
1459 fn test_should_fail_but_succeeds() {
1461 let desc = TestDescAndFn {
1463 name: StaticTestName("whatever"),
1465 should_fail: ShouldFail::Yes(None)
1467 testfn: DynTestFn(Thunk::new(move|| f())),
1469 let (tx, rx) = channel();
1470 run_test(&TestOpts::new(), false, desc, tx);
1471 let (_, res, _) = rx.recv().unwrap();
1472 assert!(res == TrFailed);
1476 fn first_free_arg_should_be_a_filter() {
1477 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1478 let opts = match parse_opts(args.as_slice()) {
1480 _ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
1482 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1486 fn parse_ignored_flag() {
1487 let args = vec!("progname".to_string(),
1488 "filter".to_string(),
1489 "--ignored".to_string());
1490 let opts = match parse_opts(args.as_slice()) {
1492 _ => panic!("Malformed arg in parse_ignored_flag")
1494 assert!((opts.run_ignored));
1498 pub fn filter_for_ignored_option() {
1499 // When we run ignored tests the test filter should filter out all the
1500 // unignored tests and flip the ignore flag on the rest to false
1502 let mut opts = TestOpts::new();
1503 opts.run_tests = true;
1504 opts.run_ignored = true;
1509 name: StaticTestName("1"),
1511 should_fail: ShouldFail::No,
1513 testfn: DynTestFn(Thunk::new(move|| {})),
1517 name: StaticTestName("2"),
1519 should_fail: ShouldFail::No,
1521 testfn: DynTestFn(Thunk::new(move|| {})),
1523 let filtered = filter_tests(&opts, tests);
1525 assert_eq!(filtered.len(), 1);
1526 assert_eq!(filtered[0].desc.name.to_string(),
1528 assert!(filtered[0].desc.ignore == false);
1532 pub fn sort_tests() {
1533 let mut opts = TestOpts::new();
1534 opts.run_tests = true;
1537 vec!("sha1::test".to_string(),
1538 "int::test_to_str".to_string(),
1539 "int::test_pow".to_string(),
1540 "test::do_not_run_ignored_tests".to_string(),
1541 "test::ignored_tests_result_in_ignored".to_string(),
1542 "test::first_free_arg_should_be_a_filter".to_string(),
1543 "test::parse_ignored_flag".to_string(),
1544 "test::filter_for_ignored_option".to_string(),
1545 "test::sort_tests".to_string());
1549 let mut tests = Vec::new();
1550 for name in names.iter() {
1551 let test = TestDescAndFn {
1553 name: DynTestName((*name).clone()),
1555 should_fail: ShouldFail::No,
1557 testfn: DynTestFn(Thunk::new(testfn)),
1563 let filtered = filter_tests(&opts, tests);
1566 vec!("int::test_pow".to_string(),
1567 "int::test_to_str".to_string(),
1568 "sha1::test".to_string(),
1569 "test::do_not_run_ignored_tests".to_string(),
1570 "test::filter_for_ignored_option".to_string(),
1571 "test::first_free_arg_should_be_a_filter".to_string(),
1572 "test::ignored_tests_result_in_ignored".to_string(),
1573 "test::parse_ignored_flag".to_string(),
1574 "test::sort_tests".to_string());
1576 for (a, b) in expected.iter().zip(filtered.iter()) {
1577 assert!(*a == b.desc.name.to_string());
1582 pub fn filter_tests_regex() {
1583 let mut opts = TestOpts::new();
1584 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1586 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1587 "no::XYZ", "no::abc"];
1591 let tests = names.iter().map(|name| {
1594 name: DynTestName(name.to_string()),
1596 should_fail: ShouldFail::No,
1598 testfn: DynTestFn(Thunk::new(test_fn))
1601 let filtered = filter_tests(&opts, tests);
1603 let expected: Vec<&str> =
1604 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1606 assert_eq!(filtered.len(), expected.len());
1607 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1608 assert_eq!(test.desc.name.as_slice(), *expected_name);
1613 pub fn test_metricmap_compare() {
1614 let mut m1 = MetricMap::new();
1615 let mut m2 = MetricMap::new();
1616 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1617 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1619 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1620 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1622 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1623 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1625 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1626 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1628 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1629 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1631 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1632 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1634 let diff1 = m2.compare_to_old(&m1, None);
1636 assert_eq!(*(diff1.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1637 assert_eq!(*(diff1.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1638 assert_eq!(*(diff1.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1639 assert_eq!(*(diff1.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1641 assert_eq!(*(diff1.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1643 assert_eq!(*(diff1.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1645 assert_eq!(*(diff1.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1646 Improvement(100.0));
1647 assert_eq!(diff1.len(), 7);
1649 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1651 assert_eq!(*(diff2.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1652 assert_eq!(*(diff2.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1653 assert_eq!(*(diff2.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1654 assert_eq!(*(diff2.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1656 assert_eq!(*(diff2.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1658 assert_eq!(*(diff2.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1660 assert_eq!(*(diff2.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1662 assert_eq!(diff2.len(), 7);
1666 pub fn ratchet_test() {
1668 let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
1669 let pth = dpth.path().join("ratchet.json");
1671 let mut m1 = MetricMap::new();
1672 m1.insert_metric("runtime", 1000.0, 2.0);
1673 m1.insert_metric("throughput", 50.0, 2.0);
1675 let mut m2 = MetricMap::new();
1676 m2.insert_metric("runtime", 1100.0, 2.0);
1677 m2.insert_metric("throughput", 50.0, 2.0);
1679 m1.save(&pth).unwrap();
1681 // Ask for a ratchet that should fail to advance.
1682 let (diff1, ok1) = m2.ratchet(&pth, None);
1683 assert_eq!(ok1, false);
1684 assert_eq!(diff1.len(), 2);
1685 assert_eq!(*(diff1.get(&"runtime".to_string()).unwrap()), Regression(10.0));
1686 assert_eq!(*(diff1.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1688 // Check that it was not rewritten.
1689 let m3 = MetricMap::load(&pth);
1690 let MetricMap(m3) = m3;
1691 assert_eq!(m3.len(), 2);
1692 assert_eq!(*(m3.get(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1693 assert_eq!(*(m3.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1695 // Ask for a ratchet with an explicit noise-percentage override,
1696 // that should advance.
1697 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1698 assert_eq!(ok2, true);
1699 assert_eq!(diff2.len(), 2);
1700 assert_eq!(*(diff2.get(&"runtime".to_string()).unwrap()), LikelyNoise);
1701 assert_eq!(*(diff2.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1703 // Check that it was rewritten.
1704 let m4 = MetricMap::load(&pth);
1705 let MetricMap(m4) = m4;
1706 assert_eq!(m4.len(), 2);
1707 assert_eq!(*(m4.get(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1708 assert_eq!(*(m4.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));