1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![allow(unknown_features)]
35 #![feature(asm, slicing_syntax)]
36 #![feature(box_syntax)]
37 #![allow(unknown_features)] #![feature(int_uint)]
42 extern crate serialize;
43 extern crate "serialize" as rustc_serialize;
46 pub use self::TestFn::*;
47 pub use self::MetricChange::*;
48 pub use self::ColorConfig::*;
49 pub use self::TestResult::*;
50 pub use self::TestName::*;
51 use self::TestEvent::*;
52 use self::NamePadding::*;
53 use self::OutputLocation::*;
56 use getopts::{OptGroup, optflag, optopt};
58 use serialize::{json, Decodable, Encodable};
60 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
64 use std::collections::BTreeMap;
67 use std::io::fs::PathExtensions;
68 use std::io::stdio::StdWriter;
69 use std::io::{File, ChanReader, ChanWriter};
71 use std::iter::repeat;
72 use std::num::{Float, Int};
74 use std::str::FromStr;
75 use std::sync::mpsc::{channel, Sender};
76 use std::thread::{self, Thread};
77 use std::thunk::{Thunk, Invoke};
78 use std::time::Duration;
80 // to be used by rustc to compile tests in libtest
82 pub use {Bencher, TestName, TestResult, TestDesc,
83 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
84 Metric, MetricMap, MetricAdded, MetricRemoved,
85 MetricChange, Improvement, Regression, LikelyNoise,
86 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
87 run_test, test_main, test_main_static, filter_tests,
88 parse_opts, StaticBenchFn, ShouldFail};
93 // The name of a test. By convention this follows the rules for rust
94 // paths; i.e. it should be a series of identifiers separated by double
95 // colons. This way if some test runner wants to arrange the tests
96 // hierarchically it may.
98 #[derive(Clone, PartialEq, Eq, Hash, Show)]
100 StaticTestName(&'static str),
104 fn as_slice<'a>(&'a self) -> &'a str {
106 StaticTestName(s) => s,
107 DynTestName(ref s) => s.as_slice()
111 impl fmt::Display for TestName {
112 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
113 fmt::Display::fmt(self.as_slice(), f)
117 #[derive(Clone, Copy)]
125 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
126 let mut name = String::from_str(self.name.as_slice());
127 let fill = column_count.saturating_sub(name.len());
128 let mut pad = repeat(" ").take(fill).collect::<String>();
132 pad.push_str(name.as_slice());
136 name.push_str(pad.as_slice());
143 /// Represents a benchmark function.
144 pub trait TDynBenchFn {
145 fn run(&self, harness: &mut Bencher);
148 // A function that runs a test. If the function returns successfully,
149 // the test succeeds; if the function panics then the test fails. We
150 // may need to come up with a more clever definition of test in order
151 // to support isolation of tests into tasks.
154 StaticBenchFn(fn(&mut Bencher)),
155 StaticMetricFn(fn(&mut MetricMap)),
157 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
158 DynBenchFn(Box<TDynBenchFn+'static>)
162 fn padding(&self) -> NamePadding {
164 &StaticTestFn(..) => PadNone,
165 &StaticBenchFn(..) => PadOnRight,
166 &StaticMetricFn(..) => PadOnRight,
167 &DynTestFn(..) => PadNone,
168 &DynMetricFn(..) => PadOnRight,
169 &DynBenchFn(..) => PadOnRight,
174 impl fmt::Debug for TestFn {
175 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
176 f.write_str(match *self {
177 StaticTestFn(..) => "StaticTestFn(..)",
178 StaticBenchFn(..) => "StaticBenchFn(..)",
179 StaticMetricFn(..) => "StaticMetricFn(..)",
180 DynTestFn(..) => "DynTestFn(..)",
181 DynMetricFn(..) => "DynMetricFn(..)",
182 DynBenchFn(..) => "DynBenchFn(..)"
187 /// Manager of the benchmarking runs.
189 /// This is fed into functions marked with `#[bench]` to allow for
190 /// set-up & tear-down before running a piece of code repeatedly via a
199 #[derive(Copy, Clone, Show, PartialEq, Eq, Hash)]
200 pub enum ShouldFail {
202 Yes(Option<&'static str>)
205 // The definition of a single test. A test runner will run a list of
207 #[derive(Clone, Show, PartialEq, Eq, Hash)]
208 pub struct TestDesc {
211 pub should_fail: ShouldFail,
214 unsafe impl Send for TestDesc {}
217 pub struct TestDescAndFn {
222 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
229 pub fn new(value: f64, noise: f64) -> Metric {
230 Metric {value: value, noise: noise}
235 pub struct MetricMap(BTreeMap<String,Metric>);
237 impl Clone for MetricMap {
238 fn clone(&self) -> MetricMap {
239 let MetricMap(ref map) = *self;
240 MetricMap(map.clone())
244 /// Analysis of a single change in metric
245 #[derive(Copy, PartialEq, Show)]
246 pub enum MetricChange {
254 pub type MetricDiff = BTreeMap<String,MetricChange>;
256 // The default console test runner. It accepts the command line
257 // arguments and a vector of test_descs.
258 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
260 match parse_opts(args) {
262 Some(Err(msg)) => panic!("{:?}", msg),
265 match run_tests_console(&opts, tests) {
267 Ok(false) => panic!("Some tests failed"),
268 Err(e) => panic!("io error when running tests: {:?}", e),
272 // A variant optimized for invocation with a static test vector.
273 // This will panic (intentionally) when fed any dynamic tests, because
274 // it is copying the static values out into a dynamic vector and cannot
275 // copy dynamic values. It is doing this because from this point on
276 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
277 // semantics into parallel test runners, which in turn requires a ~[]
278 // rather than a &[].
279 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
280 let owned_tests = tests.iter().map(|t| {
282 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
283 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
284 _ => panic!("non-static tests passed to test::test_main_static")
287 test_main(args, owned_tests)
291 pub enum ColorConfig {
297 pub struct TestOpts {
298 pub filter: Option<Regex>,
299 pub run_ignored: bool,
301 pub run_benchmarks: bool,
302 pub logfile: Option<Path>,
304 pub color: ColorConfig,
309 fn new() -> TestOpts {
314 run_benchmarks: false,
322 /// Result of parsing the options.
323 pub type OptRes = Result<TestOpts, String>;
325 fn optgroups() -> Vec<getopts::OptGroup> {
326 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
327 getopts::optflag("", "test", "Run tests and not benchmarks"),
328 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
329 getopts::optflag("h", "help", "Display this message (longer with --help)"),
330 getopts::optopt("", "logfile", "Write logs to the specified file instead \
332 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
333 task, allow printing directly"),
334 getopts::optopt("", "color", "Configure coloring of output:
335 auto = colorize if stdout is a tty and tests are run on serially (default);
336 always = always colorize output;
337 never = never colorize output;", "auto|always|never"))
340 fn usage(binary: &str) {
341 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
344 The FILTER regex is tested against the name of all tests to run, and
345 only those tests that match are run.
347 By default, all tests are run in parallel. This can be altered with the
348 RUST_TEST_TASKS environment variable when running tests (set it to 1).
350 All tests have their standard output and standard error captured by default.
351 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
352 environment variable. Logging is not captured by default.
356 #[test] - Indicates a function is a test to be run. This function
358 #[bench] - Indicates a function is a benchmark to be run. This
359 function takes one argument (test::Bencher).
360 #[should_fail] - This function (also labeled with #[test]) will only pass if
361 the code causes a failure (an assertion failure or panic!)
362 A message may be provided, which the failure string must
363 contain: #[should_fail(expected = "foo")].
364 #[ignore] - When applied to a function which is already attributed as a
365 test, then the test runner will ignore these tests during
366 normal test runs. Running with --ignored will run these
368 usage = getopts::usage(message.as_slice(),
369 optgroups().as_slice()));
372 // Parses command line arguments into test options
373 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
374 let args_ = args.tail();
376 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
378 Err(f) => return Some(Err(f.to_string()))
381 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
383 let filter = if matches.free.len() > 0 {
384 let s = matches.free[0].as_slice();
385 match Regex::new(s) {
387 Err(e) => return Some(Err(format!("could not parse /{}/: {:?}", s, e)))
393 let run_ignored = matches.opt_present("ignored");
395 let logfile = matches.opt_str("logfile");
396 let logfile = logfile.map(|s| Path::new(s));
398 let run_benchmarks = matches.opt_present("bench");
399 let run_tests = ! run_benchmarks ||
400 matches.opt_present("test");
402 let mut nocapture = matches.opt_present("nocapture");
404 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
407 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
408 Some("auto") | None => AutoColor,
409 Some("always") => AlwaysColor,
410 Some("never") => NeverColor,
412 Some(v) => return Some(Err(format!("argument for --color must be \
413 auto, always, or never (was {})",
417 let test_opts = TestOpts {
419 run_ignored: run_ignored,
420 run_tests: run_tests,
421 run_benchmarks: run_benchmarks,
423 nocapture: nocapture,
430 #[derive(Clone, PartialEq)]
431 pub struct BenchSamples {
432 ns_iter_summ: stats::Summary<f64>,
436 #[derive(Clone, PartialEq)]
437 pub enum TestResult {
441 TrMetrics(MetricMap),
442 TrBench(BenchSamples),
445 unsafe impl Send for TestResult {}
447 enum OutputLocation<T> {
448 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
452 struct ConsoleTestState<T> {
453 log_out: Option<File>,
454 out: OutputLocation<T>,
458 show_all_stats: bool,
465 failures: Vec<(TestDesc, Vec<u8> )> ,
466 max_name_len: uint, // number of columns to fill when aligning names
469 impl<T: Writer> ConsoleTestState<T> {
470 pub fn new(opts: &TestOpts,
471 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
472 let log_out = match opts.logfile {
473 Some(ref path) => Some(try!(File::create(path))),
476 let out = match term::stdout() {
477 None => Raw(io::stdio::stdout_raw()),
481 Ok(ConsoleTestState {
484 use_color: use_color(opts),
487 show_all_stats: false,
493 metrics: MetricMap::new(),
494 failures: Vec::new(),
499 pub fn write_ok(&mut self) -> io::IoResult<()> {
500 self.write_pretty("ok", term::color::GREEN)
503 pub fn write_failed(&mut self) -> io::IoResult<()> {
504 self.write_pretty("FAILED", term::color::RED)
507 pub fn write_ignored(&mut self) -> io::IoResult<()> {
508 self.write_pretty("ignored", term::color::YELLOW)
511 pub fn write_metric(&mut self) -> io::IoResult<()> {
512 self.write_pretty("metric", term::color::CYAN)
515 pub fn write_bench(&mut self) -> io::IoResult<()> {
516 self.write_pretty("bench", term::color::CYAN)
519 pub fn write_added(&mut self) -> io::IoResult<()> {
520 self.write_pretty("added", term::color::GREEN)
523 pub fn write_improved(&mut self) -> io::IoResult<()> {
524 self.write_pretty("improved", term::color::GREEN)
527 pub fn write_removed(&mut self) -> io::IoResult<()> {
528 self.write_pretty("removed", term::color::YELLOW)
531 pub fn write_regressed(&mut self) -> io::IoResult<()> {
532 self.write_pretty("regressed", term::color::RED)
535 pub fn write_pretty(&mut self,
537 color: term::color::Color) -> io::IoResult<()> {
539 Pretty(ref mut term) => {
541 try!(term.fg(color));
543 try!(term.write(word.as_bytes()));
549 Raw(ref mut stdout) => stdout.write(word.as_bytes())
553 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
555 Pretty(ref mut term) => term.write(s.as_bytes()),
556 Raw(ref mut stdout) => stdout.write(s.as_bytes())
560 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
562 let noun = if len != 1 { "tests" } else { "test" };
563 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
566 pub fn write_test_start(&mut self, test: &TestDesc,
567 align: NamePadding) -> io::IoResult<()> {
568 let name = test.padded_name(self.max_name_len, align);
569 self.write_plain(format!("test {} ... ", name).as_slice())
572 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
574 TrOk => self.write_ok(),
575 TrFailed => self.write_failed(),
576 TrIgnored => self.write_ignored(),
577 TrMetrics(ref mm) => {
578 try!(self.write_metric());
579 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
582 try!(self.write_bench());
584 if self.show_boxplot {
585 let mut wr = Vec::new();
587 try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
589 let s = String::from_utf8(wr).unwrap();
591 try!(self.write_plain(format!(": {}", s).as_slice()));
594 if self.show_all_stats {
595 let mut wr = Vec::new();
597 try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
599 let s = String::from_utf8(wr).unwrap();
601 try!(self.write_plain(format!(": {}", s).as_slice()));
603 try!(self.write_plain(format!(": {}",
604 fmt_bench_samples(bs)).as_slice()));
610 self.write_plain("\n")
613 pub fn write_log(&mut self, test: &TestDesc,
614 result: &TestResult) -> io::IoResult<()> {
618 let s = format!("{} {}\n", match *result {
619 TrOk => "ok".to_string(),
620 TrFailed => "failed".to_string(),
621 TrIgnored => "ignored".to_string(),
622 TrMetrics(ref mm) => fmt_metrics(mm),
623 TrBench(ref bs) => fmt_bench_samples(bs)
624 }, test.name.as_slice());
625 o.write(s.as_bytes())
630 pub fn write_failures(&mut self) -> io::IoResult<()> {
631 try!(self.write_plain("\nfailures:\n"));
632 let mut failures = Vec::new();
633 let mut fail_out = String::new();
634 for &(ref f, ref stdout) in self.failures.iter() {
635 failures.push(f.name.to_string());
636 if stdout.len() > 0 {
637 fail_out.push_str(format!("---- {} stdout ----\n\t",
638 f.name.as_slice()).as_slice());
639 let output = String::from_utf8_lossy(stdout.as_slice());
640 fail_out.push_str(output.as_slice());
641 fail_out.push_str("\n");
644 if fail_out.len() > 0 {
645 try!(self.write_plain("\n"));
646 try!(self.write_plain(fail_out.as_slice()));
649 try!(self.write_plain("\nfailures:\n"));
651 for name in failures.iter() {
652 try!(self.write_plain(format!(" {}\n",
653 name.as_slice()).as_slice()));
658 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
660 let mut improved = 0u;
661 let mut regressed = 0u;
663 let mut removed = 0u;
665 for (k, v) in diff.iter() {
667 LikelyNoise => noise += 1,
670 try!(self.write_added());
671 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
675 try!(self.write_removed());
676 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
678 Improvement(pct) => {
680 try!(self.write_plain(format!(": {} ", *k).as_slice()));
681 try!(self.write_improved());
682 try!(self.write_plain(format!(" by {:.2}%\n",
683 pct as f64).as_slice()));
687 try!(self.write_plain(format!(": {} ", *k).as_slice()));
688 try!(self.write_regressed());
689 try!(self.write_plain(format!(" by {:.2}%\n",
690 pct as f64).as_slice()));
694 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
695 {} removed, {} improved, {} regressed, \
697 added, removed, improved, regressed,
700 try!(self.write_plain("updated ratchet file\n"));
702 try!(self.write_plain("left ratchet file untouched\n"));
707 pub fn write_run_finish(&mut self,
708 ratchet_metrics: &Option<Path>,
709 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
710 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
712 let ratchet_success = match *ratchet_metrics {
715 try!(self.write_plain(format!("\nusing metrics ratchet: {:?}\n",
716 pth.display()).as_slice()));
720 try!(self.write_plain(format!("with noise-tolerance \
724 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
725 try!(self.write_metric_diff(&diff));
730 let test_success = self.failed == 0u;
732 try!(self.write_failures());
735 let success = ratchet_success && test_success;
737 try!(self.write_plain("\ntest result: "));
739 // There's no parallelism at this point so it's safe to use color
740 try!(self.write_ok());
742 try!(self.write_failed());
744 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
745 self.passed, self.failed, self.ignored, self.measured);
746 try!(self.write_plain(s.as_slice()));
751 pub fn fmt_metrics(mm: &MetricMap) -> String {
752 let MetricMap(ref mm) = *mm;
753 let v : Vec<String> = mm.iter()
754 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
755 v.value as f64, v.noise as f64))
760 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
762 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
763 bs.ns_iter_summ.median as uint,
764 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
767 format!("{:>9} ns/iter (+/- {})",
768 bs.ns_iter_summ.median as uint,
769 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
773 // A simple console test runner
774 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
776 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
777 match (*event).clone() {
778 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
779 TeWait(ref test, padding) => st.write_test_start(test, padding),
780 TeResult(test, result, stdout) => {
781 try!(st.write_log(&test, &result));
782 try!(st.write_result(&result));
784 TrOk => st.passed += 1,
785 TrIgnored => st.ignored += 1,
787 let tname = test.name.as_slice();
788 let MetricMap(mm) = mm;
789 for (k,v) in mm.iter() {
791 .insert_metric(format!("{}.{}",
800 st.metrics.insert_metric(test.name.as_slice(),
801 bs.ns_iter_summ.median,
802 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
807 st.failures.push((test, stdout));
815 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
816 fn len_if_padded(t: &TestDescAndFn) -> uint {
817 match t.testfn.padding() {
819 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
822 match tests.iter().max_by(|t|len_if_padded(*t)) {
824 let n = t.desc.name.as_slice();
825 st.max_name_len = n.len();
829 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
830 return st.write_run_finish(&None, None);
834 fn should_sort_failures_before_printing_them() {
835 let test_a = TestDesc {
836 name: StaticTestName("a"),
838 should_fail: ShouldFail::No
841 let test_b = TestDesc {
842 name: StaticTestName("b"),
844 should_fail: ShouldFail::No
847 let mut st = ConsoleTestState {
849 out: Raw(Vec::new()),
853 show_all_stats: false,
860 metrics: MetricMap::new(),
861 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
864 st.write_failures().unwrap();
865 let s = match st.out {
866 Raw(ref m) => String::from_utf8_lossy(&m[]),
867 Pretty(_) => unreachable!()
870 let apos = s.find_str("a").unwrap();
871 let bpos = s.find_str("b").unwrap();
872 assert!(apos < bpos);
875 fn use_color(opts: &TestOpts) -> bool {
877 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
885 TeFiltered(Vec<TestDesc> ),
886 TeWait(TestDesc, NamePadding),
887 TeResult(TestDesc, TestResult, Vec<u8> ),
890 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
893 fn run_tests<F>(opts: &TestOpts,
894 tests: Vec<TestDescAndFn> ,
895 mut callback: F) -> io::IoResult<()> where
896 F: FnMut(TestEvent) -> io::IoResult<()>,
898 let filtered_tests = filter_tests(opts, tests);
899 let filtered_descs = filtered_tests.iter()
900 .map(|t| t.desc.clone())
903 try!(callback(TeFiltered(filtered_descs)));
905 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
906 filtered_tests.into_iter().partition(|e| {
908 StaticTestFn(_) | DynTestFn(_) => true,
913 // It's tempting to just spawn all the tests at once, but since we have
914 // many tests that run in other processes we would be making a big mess.
915 let concurrency = get_concurrency();
917 let mut remaining = filtered_tests;
921 let (tx, rx) = channel::<MonitorMsg>();
923 while pending > 0 || !remaining.is_empty() {
924 while pending < concurrency && !remaining.is_empty() {
925 let test = remaining.pop().unwrap();
926 if concurrency == 1 {
927 // We are doing one test at a time so we can print the name
928 // of the test before we run it. Useful for debugging tests
929 // that hang forever.
930 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
932 run_test(opts, !opts.run_tests, test, tx.clone());
936 let (desc, result, stdout) = rx.recv().unwrap();
937 if concurrency != 1 {
938 try!(callback(TeWait(desc.clone(), PadNone)));
940 try!(callback(TeResult(desc, result, stdout)));
944 // All benchmarks run at the end, in serial.
945 // (this includes metric fns)
946 for b in filtered_benchs_and_metrics.into_iter() {
947 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
948 run_test(opts, !opts.run_benchmarks, b, tx.clone());
949 let (test, result, stdout) = rx.recv().unwrap();
950 try!(callback(TeResult(test, result, stdout)));
955 fn get_concurrency() -> uint {
957 match os::getenv("RUST_TEST_TASKS") {
959 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
961 Some(n) if n > 0 => n,
962 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
966 rt::default_sched_threads()
971 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
972 let mut filtered = tests;
974 // Remove tests that don't match the test filter
975 filtered = match opts.filter {
979 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
983 // Maybe pull out the ignored test and unignore them
984 filtered = if !opts.run_ignored {
987 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
988 if test.desc.ignore {
989 let TestDescAndFn {desc, testfn} = test;
991 desc: TestDesc {ignore: false, ..desc},
998 filtered.into_iter().filter_map(|x| filter(x)).collect()
1001 // Sort the tests alphabetically
1002 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1007 pub fn run_test(opts: &TestOpts,
1009 test: TestDescAndFn,
1010 monitor_ch: Sender<MonitorMsg>) {
1012 let TestDescAndFn {desc, testfn} = test;
1014 if force_ignore || desc.ignore {
1015 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1019 fn run_test_inner(desc: TestDesc,
1020 monitor_ch: Sender<MonitorMsg>,
1023 Thread::spawn(move || {
1024 let (tx, rx) = channel();
1025 let mut reader = ChanReader::new(rx);
1026 let stdout = ChanWriter::new(tx.clone());
1027 let stderr = ChanWriter::new(tx);
1028 let mut cfg = thread::Builder::new().name(match desc.name {
1029 DynTestName(ref name) => name.clone().to_string(),
1030 StaticTestName(name) => name.to_string(),
1033 drop((stdout, stderr));
1035 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
1036 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
1039 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
1040 let stdout = reader.read_to_end().unwrap().into_iter().collect();
1041 let test_result = calc_result(&desc, result_guard.join());
1042 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1047 DynBenchFn(bencher) => {
1048 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1049 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1052 StaticBenchFn(benchfn) => {
1053 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1054 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1058 let mut mm = MetricMap::new();
1060 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1063 StaticMetricFn(f) => {
1064 let mut mm = MetricMap::new();
1066 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1069 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1070 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1071 Thunk::new(move|| f()))
1075 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1076 match (&desc.should_fail, task_result) {
1077 (&ShouldFail::No, Ok(())) |
1078 (&ShouldFail::Yes(None), Err(_)) => TrOk,
1079 (&ShouldFail::Yes(Some(msg)), Err(ref err))
1080 if err.downcast_ref::<String>()
1082 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1083 .map(|e| e.contains(msg))
1084 .unwrap_or(false) => TrOk,
1091 pub fn new() -> MetricMap {
1092 MetricMap(BTreeMap::new())
1095 /// Load MetricDiff from a file.
1099 /// This function will panic if the path does not exist or the path does not
1100 /// contain a valid metric map.
1101 pub fn load(p: &Path) -> MetricMap {
1102 assert!(p.exists());
1103 let mut f = File::open(p).unwrap();
1104 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1105 let mut decoder = json::Decoder::new(value);
1106 MetricMap(match Decodable::decode(&mut decoder) {
1108 Err(e) => panic!("failure decoding JSON: {:?}", e)
1112 /// Write MetricDiff to a file.
1113 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1114 let mut file = try!(File::create(p));
1115 let MetricMap(ref map) = *self;
1116 write!(&mut file, "{}", json::as_json(map))
1119 /// Compare against another MetricMap. Optionally compare all
1120 /// measurements in the maps using the provided `noise_pct` as a
1121 /// percentage of each value to consider noise. If `None`, each
1122 /// measurement's noise threshold is independently chosen as the
1123 /// maximum of that measurement's recorded noise quantity in either
1125 pub fn compare_to_old(&self, old: &MetricMap,
1126 noise_pct: Option<f64>) -> MetricDiff {
1127 let mut diff : MetricDiff = BTreeMap::new();
1128 let MetricMap(ref selfmap) = *self;
1129 let MetricMap(ref old) = *old;
1130 for (k, vold) in old.iter() {
1131 let r = match selfmap.get(k) {
1132 None => MetricRemoved,
1134 let delta = v.value - vold.value;
1135 let noise = match noise_pct {
1136 None => vold.noise.abs().max(v.noise.abs()),
1137 Some(pct) => vold.value * pct / 100.0
1139 if delta.abs() <= noise {
1142 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1143 if vold.noise < 0.0 {
1144 // When 'noise' is negative, it means we want
1145 // to see deltas that go up over time, and can
1146 // only tolerate slight negative movement.
1153 // When 'noise' is positive, it means we want
1154 // to see deltas that go down over time, and
1155 // can only tolerate slight positive movements.
1165 diff.insert((*k).clone(), r);
1167 let MetricMap(ref map) = *self;
1168 for (k, _) in map.iter() {
1169 if !diff.contains_key(k) {
1170 diff.insert((*k).clone(), MetricAdded);
1176 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1177 /// must be non-negative. The `noise` indicates the uncertainty of the
1178 /// metric, which doubles as the "noise range" of acceptable
1179 /// pairwise-regressions on this named value, when comparing from one
1180 /// metric to the next using `compare_to_old`.
1182 /// If `noise` is positive, then it means this metric is of a value
1183 /// you want to see grow smaller, so a change larger than `noise` in the
1184 /// positive direction represents a regression.
1186 /// If `noise` is negative, then it means this metric is of a value
1187 /// you want to see grow larger, so a change larger than `noise` in the
1188 /// negative direction represents a regression.
1189 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1194 let MetricMap(ref mut map) = *self;
1195 map.insert(name.to_string(), m);
1198 /// Attempt to "ratchet" an external metric file. This involves loading
1199 /// metrics from a metric file (if it exists), comparing against
1200 /// the metrics in `self` using `compare_to_old`, and rewriting the
1201 /// file to contain the metrics in `self` if none of the
1202 /// `MetricChange`s are `Regression`. Returns the diff as well
1203 /// as a boolean indicating whether the ratchet succeeded.
1204 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1205 let old = if p.exists() {
1211 let diff : MetricDiff = self.compare_to_old(&old, pct);
1212 let ok = diff.iter().all(|(_, v)| {
1214 Regression(_) => false,
1220 self.save(p).unwrap();
1229 /// A function that is opaque to the optimizer, to allow benchmarks to
1230 /// pretend to use outputs to assist in avoiding dead-code
1233 /// This function is a no-op, and does not even read from `dummy`.
1234 pub fn black_box<T>(dummy: T) -> T {
1235 // we need to "use" the argument in some way LLVM can't
1237 unsafe {asm!("" : : "r"(&dummy))}
1243 /// Callback for benchmark functions to run in their body.
1244 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1245 self.dur = Duration::span(|| {
1246 let k = self.iterations;
1247 for _ in range(0u64, k) {
1253 pub fn ns_elapsed(&mut self) -> u64 {
1254 self.dur.num_nanoseconds().unwrap() as u64
1257 pub fn ns_per_iter(&mut self) -> u64 {
1258 if self.iterations == 0 {
1261 self.ns_elapsed() / cmp::max(self.iterations, 1)
1265 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1266 self.iterations = n;
1270 // This is a more statistics-driven benchmark algorithm
1271 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1272 // Initial bench run to get ballpark figure.
1274 self.bench_n(n, |x| f(x));
1276 // Try to estimate iter count for 1ms falling back to 1m
1277 // iterations if first run took < 1ns.
1278 if self.ns_per_iter() == 0 {
1281 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1283 // if the first run took more than 1ms we don't want to just
1284 // be left doing 0 iterations on every loop. The unfortunate
1285 // side effect of not being able to do as many runs is
1286 // automatically handled by the statistical analysis below
1287 // (i.e. larger error bars).
1288 if n == 0 { n = 1; }
1290 let mut total_run = Duration::nanoseconds(0);
1291 let samples : &mut [f64] = &mut [0.0_f64; 50];
1293 let mut summ = None;
1294 let mut summ5 = None;
1296 let loop_run = Duration::span(|| {
1298 for p in samples.iter_mut() {
1299 self.bench_n(n, |x| f(x));
1300 *p = self.ns_per_iter() as f64;
1303 stats::winsorize(samples, 5.0);
1304 summ = Some(stats::Summary::new(samples));
1306 for p in samples.iter_mut() {
1307 self.bench_n(5 * n, |x| f(x));
1308 *p = self.ns_per_iter() as f64;
1311 stats::winsorize(samples, 5.0);
1312 summ5 = Some(stats::Summary::new(samples));
1314 let summ = summ.unwrap();
1315 let summ5 = summ5.unwrap();
1317 // If we've run for 100ms and seem to have converged to a
1319 if loop_run.num_milliseconds() > 100 &&
1320 summ.median_abs_dev_pct < 1.0 &&
1321 summ.median - summ5.median < summ5.median_abs_dev {
1325 total_run = total_run + loop_run;
1326 // Longest we ever run for is 3s.
1327 if total_run.num_seconds() > 3 {
1338 use std::time::Duration;
1339 use super::{Bencher, BenchSamples};
1341 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1342 let mut bs = Bencher {
1344 dur: Duration::nanoseconds(0),
1348 let ns_iter_summ = bs.auto_bench(f);
1350 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1351 let iter_s = 1_000_000_000 / ns_iter;
1352 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1355 ns_iter_summ: ns_iter_summ,
1363 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1364 TestDesc, TestDescAndFn, TestOpts, run_test,
1365 Metric, MetricMap, MetricAdded, MetricRemoved,
1366 Improvement, Regression, LikelyNoise,
1367 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1368 use std::io::TempDir;
1369 use std::thunk::Thunk;
1370 use std::sync::mpsc::channel;
1373 pub fn do_not_run_ignored_tests() {
1374 fn f() { panic!(); }
1375 let desc = TestDescAndFn {
1377 name: StaticTestName("whatever"),
1379 should_fail: ShouldFail::No,
1381 testfn: DynTestFn(Thunk::new(move|| f())),
1383 let (tx, rx) = channel();
1384 run_test(&TestOpts::new(), false, desc, tx);
1385 let (_, res, _) = rx.recv().unwrap();
1386 assert!(res != TrOk);
1390 pub fn ignored_tests_result_in_ignored() {
1392 let desc = TestDescAndFn {
1394 name: StaticTestName("whatever"),
1396 should_fail: ShouldFail::No,
1398 testfn: DynTestFn(Thunk::new(move|| f())),
1400 let (tx, rx) = channel();
1401 run_test(&TestOpts::new(), false, desc, tx);
1402 let (_, res, _) = rx.recv().unwrap();
1403 assert!(res == TrIgnored);
1407 fn test_should_fail() {
1408 fn f() { panic!(); }
1409 let desc = TestDescAndFn {
1411 name: StaticTestName("whatever"),
1413 should_fail: ShouldFail::Yes(None)
1415 testfn: DynTestFn(Thunk::new(move|| f())),
1417 let (tx, rx) = channel();
1418 run_test(&TestOpts::new(), false, desc, tx);
1419 let (_, res, _) = rx.recv().unwrap();
1420 assert!(res == TrOk);
1424 fn test_should_fail_good_message() {
1425 fn f() { panic!("an error message"); }
1426 let desc = TestDescAndFn {
1428 name: StaticTestName("whatever"),
1430 should_fail: ShouldFail::Yes(Some("error message"))
1432 testfn: DynTestFn(Thunk::new(move|| f())),
1434 let (tx, rx) = channel();
1435 run_test(&TestOpts::new(), false, desc, tx);
1436 let (_, res, _) = rx.recv().unwrap();
1437 assert!(res == TrOk);
1441 fn test_should_fail_bad_message() {
1442 fn f() { panic!("an error message"); }
1443 let desc = TestDescAndFn {
1445 name: StaticTestName("whatever"),
1447 should_fail: ShouldFail::Yes(Some("foobar"))
1449 testfn: DynTestFn(Thunk::new(move|| f())),
1451 let (tx, rx) = channel();
1452 run_test(&TestOpts::new(), false, desc, tx);
1453 let (_, res, _) = rx.recv().unwrap();
1454 assert!(res == TrFailed);
1458 fn test_should_fail_but_succeeds() {
1460 let desc = TestDescAndFn {
1462 name: StaticTestName("whatever"),
1464 should_fail: ShouldFail::Yes(None)
1466 testfn: DynTestFn(Thunk::new(move|| f())),
1468 let (tx, rx) = channel();
1469 run_test(&TestOpts::new(), false, desc, tx);
1470 let (_, res, _) = rx.recv().unwrap();
1471 assert!(res == TrFailed);
1475 fn first_free_arg_should_be_a_filter() {
1476 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1477 let opts = match parse_opts(args.as_slice()) {
1479 _ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
1481 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1485 fn parse_ignored_flag() {
1486 let args = vec!("progname".to_string(),
1487 "filter".to_string(),
1488 "--ignored".to_string());
1489 let opts = match parse_opts(args.as_slice()) {
1491 _ => panic!("Malformed arg in parse_ignored_flag")
1493 assert!((opts.run_ignored));
1497 pub fn filter_for_ignored_option() {
1498 // When we run ignored tests the test filter should filter out all the
1499 // unignored tests and flip the ignore flag on the rest to false
1501 let mut opts = TestOpts::new();
1502 opts.run_tests = true;
1503 opts.run_ignored = true;
1508 name: StaticTestName("1"),
1510 should_fail: ShouldFail::No,
1512 testfn: DynTestFn(Thunk::new(move|| {})),
1516 name: StaticTestName("2"),
1518 should_fail: ShouldFail::No,
1520 testfn: DynTestFn(Thunk::new(move|| {})),
1522 let filtered = filter_tests(&opts, tests);
1524 assert_eq!(filtered.len(), 1);
1525 assert_eq!(filtered[0].desc.name.to_string(),
1527 assert!(filtered[0].desc.ignore == false);
1531 pub fn sort_tests() {
1532 let mut opts = TestOpts::new();
1533 opts.run_tests = true;
1536 vec!("sha1::test".to_string(),
1537 "int::test_to_str".to_string(),
1538 "int::test_pow".to_string(),
1539 "test::do_not_run_ignored_tests".to_string(),
1540 "test::ignored_tests_result_in_ignored".to_string(),
1541 "test::first_free_arg_should_be_a_filter".to_string(),
1542 "test::parse_ignored_flag".to_string(),
1543 "test::filter_for_ignored_option".to_string(),
1544 "test::sort_tests".to_string());
1548 let mut tests = Vec::new();
1549 for name in names.iter() {
1550 let test = TestDescAndFn {
1552 name: DynTestName((*name).clone()),
1554 should_fail: ShouldFail::No,
1556 testfn: DynTestFn(Thunk::new(testfn)),
1562 let filtered = filter_tests(&opts, tests);
1565 vec!("int::test_pow".to_string(),
1566 "int::test_to_str".to_string(),
1567 "sha1::test".to_string(),
1568 "test::do_not_run_ignored_tests".to_string(),
1569 "test::filter_for_ignored_option".to_string(),
1570 "test::first_free_arg_should_be_a_filter".to_string(),
1571 "test::ignored_tests_result_in_ignored".to_string(),
1572 "test::parse_ignored_flag".to_string(),
1573 "test::sort_tests".to_string());
1575 for (a, b) in expected.iter().zip(filtered.iter()) {
1576 assert!(*a == b.desc.name.to_string());
1581 pub fn filter_tests_regex() {
1582 let mut opts = TestOpts::new();
1583 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1585 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1586 "no::XYZ", "no::abc"];
1590 let tests = names.iter().map(|name| {
1593 name: DynTestName(name.to_string()),
1595 should_fail: ShouldFail::No,
1597 testfn: DynTestFn(Thunk::new(test_fn))
1600 let filtered = filter_tests(&opts, tests);
1602 let expected: Vec<&str> =
1603 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1605 assert_eq!(filtered.len(), expected.len());
1606 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1607 assert_eq!(test.desc.name.as_slice(), *expected_name);
1612 pub fn test_metricmap_compare() {
1613 let mut m1 = MetricMap::new();
1614 let mut m2 = MetricMap::new();
1615 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1616 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1618 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1619 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1621 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1622 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1624 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1625 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1627 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1628 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1630 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1631 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1633 let diff1 = m2.compare_to_old(&m1, None);
1635 assert_eq!(*(diff1.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1636 assert_eq!(*(diff1.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1637 assert_eq!(*(diff1.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1638 assert_eq!(*(diff1.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1640 assert_eq!(*(diff1.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1642 assert_eq!(*(diff1.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1644 assert_eq!(*(diff1.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1645 Improvement(100.0));
1646 assert_eq!(diff1.len(), 7);
1648 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1650 assert_eq!(*(diff2.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1651 assert_eq!(*(diff2.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1652 assert_eq!(*(diff2.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1653 assert_eq!(*(diff2.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1655 assert_eq!(*(diff2.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1657 assert_eq!(*(diff2.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1659 assert_eq!(*(diff2.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1661 assert_eq!(diff2.len(), 7);
1665 pub fn ratchet_test() {
1667 let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
1668 let pth = dpth.path().join("ratchet.json");
1670 let mut m1 = MetricMap::new();
1671 m1.insert_metric("runtime", 1000.0, 2.0);
1672 m1.insert_metric("throughput", 50.0, 2.0);
1674 let mut m2 = MetricMap::new();
1675 m2.insert_metric("runtime", 1100.0, 2.0);
1676 m2.insert_metric("throughput", 50.0, 2.0);
1678 m1.save(&pth).unwrap();
1680 // Ask for a ratchet that should fail to advance.
1681 let (diff1, ok1) = m2.ratchet(&pth, None);
1682 assert_eq!(ok1, false);
1683 assert_eq!(diff1.len(), 2);
1684 assert_eq!(*(diff1.get(&"runtime".to_string()).unwrap()), Regression(10.0));
1685 assert_eq!(*(diff1.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1687 // Check that it was not rewritten.
1688 let m3 = MetricMap::load(&pth);
1689 let MetricMap(m3) = m3;
1690 assert_eq!(m3.len(), 2);
1691 assert_eq!(*(m3.get(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1692 assert_eq!(*(m3.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1694 // Ask for a ratchet with an explicit noise-percentage override,
1695 // that should advance.
1696 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1697 assert_eq!(ok2, true);
1698 assert_eq!(diff2.len(), 2);
1699 assert_eq!(*(diff2.get(&"runtime".to_string()).unwrap()), LikelyNoise);
1700 assert_eq!(*(diff2.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1702 // Check that it was rewritten.
1703 let m4 = MetricMap::load(&pth);
1704 let MetricMap(m4) = m4;
1705 assert_eq!(m4.len(), 2);
1706 assert_eq!(*(m4.get(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1707 assert_eq!(*(m4.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));