1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
32 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![feature(asm, macro_rules, phase, globs, slicing_syntax)]
38 extern crate serialize;
41 pub use self::TestFn::*;
42 pub use self::MetricChange::*;
43 pub use self::ColorConfig::*;
44 pub use self::TestResult::*;
45 pub use self::TestName::*;
46 use self::TestEvent::*;
47 use self::NamePadding::*;
48 use self::OutputLocation::*;
50 use std::any::{Any, AnyRefExt};
51 use std::collections::TreeMap;
53 use getopts::{OptGroup, optflag, optopt};
55 use serialize::{json, Decodable, Encodable};
57 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
63 use std::io::fs::PathExtensions;
64 use std::io::stdio::StdWriter;
65 use std::io::{File, ChanReader, ChanWriter};
67 use std::num::{Float, FloatMath, Int};
69 use std::str::FromStr;
70 use std::string::String;
71 use std::task::TaskBuilder;
72 use std::time::Duration;
74 // to be used by rustc to compile tests in libtest
76 pub use {Bencher, TestName, TestResult, TestDesc,
77 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
78 Metric, MetricMap, MetricAdded, MetricRemoved,
79 MetricChange, Improvement, Regression, LikelyNoise,
80 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
81 run_test, test_main, test_main_static, filter_tests,
82 parse_opts, StaticBenchFn, ShouldFail};
87 // The name of a test. By convention this follows the rules for rust
88 // paths; i.e. it should be a series of identifiers separated by double
89 // colons. This way if some test runner wants to arrange the tests
90 // hierarchically it may.
92 #[deriving(Clone, PartialEq, Eq, Hash)]
94 StaticTestName(&'static str),
98 fn as_slice<'a>(&'a self) -> &'a str {
100 StaticTestName(s) => s,
101 DynTestName(ref s) => s.as_slice()
105 impl Show for TestName {
106 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
107 self.as_slice().fmt(f)
118 impl Copy for NamePadding {}
121 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
122 let mut name = String::from_str(self.name.as_slice());
123 let fill = column_count.saturating_sub(name.len());
124 let mut pad = " ".repeat(fill);
128 pad.push_str(name.as_slice());
132 name.push_str(pad.as_slice());
139 /// Represents a benchmark function.
140 pub trait TDynBenchFn {
141 fn run(&self, harness: &mut Bencher);
144 // A function that runs a test. If the function returns successfully,
145 // the test succeeds; if the function panics then the test fails. We
146 // may need to come up with a more clever definition of test in order
147 // to support isolation of tests into tasks.
150 StaticBenchFn(fn(&mut Bencher)),
151 StaticMetricFn(proc(&mut MetricMap):'static),
152 DynTestFn(proc():Send),
153 DynMetricFn(proc(&mut MetricMap):'static),
154 DynBenchFn(Box<TDynBenchFn+'static>)
158 fn padding(&self) -> NamePadding {
160 &StaticTestFn(..) => PadNone,
161 &StaticBenchFn(..) => PadOnRight,
162 &StaticMetricFn(..) => PadOnRight,
163 &DynTestFn(..) => PadNone,
164 &DynMetricFn(..) => PadOnRight,
165 &DynBenchFn(..) => PadOnRight,
170 impl fmt::Show for TestFn {
171 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
172 f.write(match *self {
173 StaticTestFn(..) => "StaticTestFn(..)",
174 StaticBenchFn(..) => "StaticBenchFn(..)",
175 StaticMetricFn(..) => "StaticMetricFn(..)",
176 DynTestFn(..) => "DynTestFn(..)",
177 DynMetricFn(..) => "DynMetricFn(..)",
178 DynBenchFn(..) => "DynBenchFn(..)"
183 /// Manager of the benchmarking runs.
185 /// This is feed into functions marked with `#[bench]` to allow for
186 /// set-up & tear-down before running a piece of code repeatedly via a
195 #[deriving(Copy, Clone, Show, PartialEq, Eq, Hash)]
196 pub enum ShouldFail {
198 Yes(Option<&'static str>)
201 // The definition of a single test. A test runner will run a list of
203 #[deriving(Clone, Show, PartialEq, Eq, Hash)]
204 pub struct TestDesc {
207 pub should_fail: ShouldFail,
211 pub struct TestDescAndFn {
216 #[deriving(Clone, Encodable, Decodable, PartialEq, Show)]
222 impl Copy for Metric {}
225 pub fn new(value: f64, noise: f64) -> Metric {
226 Metric {value: value, noise: noise}
230 #[deriving(PartialEq)]
231 pub struct MetricMap(TreeMap<String,Metric>);
233 impl Clone for MetricMap {
234 fn clone(&self) -> MetricMap {
235 let MetricMap(ref map) = *self;
236 MetricMap(map.clone())
240 /// Analysis of a single change in metric
241 #[deriving(PartialEq, Show)]
242 pub enum MetricChange {
250 impl Copy for MetricChange {}
252 pub type MetricDiff = TreeMap<String,MetricChange>;
254 // The default console test runner. It accepts the command line
255 // arguments and a vector of test_descs.
256 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
258 match parse_opts(args) {
260 Some(Err(msg)) => panic!("{}", msg),
263 match run_tests_console(&opts, tests) {
265 Ok(false) => panic!("Some tests failed"),
266 Err(e) => panic!("io error when running tests: {}", e),
270 // A variant optimized for invocation with a static test vector.
271 // This will panic (intentionally) when fed any dynamic tests, because
272 // it is copying the static values out into a dynamic vector and cannot
273 // copy dynamic values. It is doing this because from this point on
274 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
275 // semantics into parallel test runners, which in turn requires a ~[]
276 // rather than a &[].
277 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
278 let owned_tests = tests.iter().map(|t| {
280 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
281 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
282 _ => panic!("non-static tests passed to test::test_main_static")
285 test_main(args, owned_tests)
288 pub enum ColorConfig {
294 impl Copy for ColorConfig {}
296 pub struct TestOpts {
297 pub filter: Option<Regex>,
298 pub run_ignored: bool,
300 pub run_benchmarks: bool,
301 pub ratchet_metrics: Option<Path>,
302 pub ratchet_noise_percent: Option<f64>,
303 pub save_metrics: Option<Path>,
304 pub test_shard: Option<(uint,uint)>,
305 pub logfile: Option<Path>,
307 pub color: ColorConfig,
308 pub show_boxplot: bool,
309 pub boxplot_width: uint,
310 pub show_all_stats: bool,
315 fn new() -> TestOpts {
320 run_benchmarks: false,
321 ratchet_metrics: None,
322 ratchet_noise_percent: None,
330 show_all_stats: false,
335 /// Result of parsing the options.
336 pub type OptRes = Result<TestOpts, String>;
338 fn optgroups() -> Vec<getopts::OptGroup> {
339 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
340 getopts::optflag("", "test", "Run tests and not benchmarks"),
341 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
342 getopts::optflag("h", "help", "Display this message (longer with --help)"),
343 getopts::optopt("", "save-metrics", "Location to save bench metrics",
345 getopts::optopt("", "ratchet-metrics",
346 "Location to load and save metrics from. The metrics \
347 loaded are cause benchmarks to fail if they run too \
349 getopts::optopt("", "ratchet-noise-percent",
350 "Tests within N% of the recorded metrics will be \
351 considered as passing", "PERCENTAGE"),
352 getopts::optopt("", "logfile", "Write logs to the specified file instead \
354 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
356 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
357 task, allow printing directly"),
358 getopts::optopt("", "color", "Configure coloring of output:
359 auto = colorize if stdout is a tty and tests are run on serially (default);
360 always = always colorize output;
361 never = never colorize output;", "auto|always|never"),
362 getopts::optflag("", "boxplot", "Display a boxplot of the benchmark statistics"),
363 getopts::optopt("", "boxplot-width", "Set the boxplot width (default 50)", "WIDTH"),
364 getopts::optflag("", "stats", "Display the benchmark min, max, and quartiles"))
367 fn usage(binary: &str) {
368 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
371 The FILTER regex is tested against the name of all tests to run, and
372 only those tests that match are run.
374 By default, all tests are run in parallel. This can be altered with the
375 RUST_TEST_TASKS environment variable when running tests (set it to 1).
377 All tests have their standard output and standard error captured by default.
378 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
379 environment variable. Logging is not captured by default.
383 #[test] - Indicates a function is a test to be run. This function
385 #[bench] - Indicates a function is a benchmark to be run. This
386 function takes one argument (test::Bencher).
387 #[should_fail] - This function (also labeled with #[test]) will only pass if
388 the code causes a failure (an assertion failure or panic!)
389 A message may be provided, which the failure string must
390 contain: #[should_fail(expected = "foo")].
391 #[ignore] - When applied to a function which is already attributed as a
392 test, then the test runner will ignore these tests during
393 normal test runs. Running with --ignored will run these
395 usage = getopts::usage(message.as_slice(),
396 optgroups().as_slice()));
399 // Parses command line arguments into test options
400 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
401 let args_ = args.tail();
403 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
405 Err(f) => return Some(Err(f.to_string()))
408 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
410 let filter = if matches.free.len() > 0 {
411 let s = matches.free[0].as_slice();
412 match Regex::new(s) {
414 Err(e) => return Some(Err(format!("could not parse /{}/: {}", s, e)))
420 let run_ignored = matches.opt_present("ignored");
422 let logfile = matches.opt_str("logfile");
423 let logfile = logfile.map(|s| Path::new(s));
425 let run_benchmarks = matches.opt_present("bench");
426 let run_tests = ! run_benchmarks ||
427 matches.opt_present("test");
429 let ratchet_metrics = matches.opt_str("ratchet-metrics");
430 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
432 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
433 let ratchet_noise_percent =
434 ratchet_noise_percent.map(|s| from_str::<f64>(s.as_slice()).unwrap());
436 let save_metrics = matches.opt_str("save-metrics");
437 let save_metrics = save_metrics.map(|s| Path::new(s));
439 let test_shard = matches.opt_str("test-shard");
440 let test_shard = opt_shard(test_shard);
442 let mut nocapture = matches.opt_present("nocapture");
444 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
447 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
448 Some("auto") | None => AutoColor,
449 Some("always") => AlwaysColor,
450 Some("never") => NeverColor,
452 Some(v) => return Some(Err(format!("argument for --color must be \
453 auto, always, or never (was {})",
457 let show_boxplot = matches.opt_present("boxplot");
458 let boxplot_width = match matches.opt_str("boxplot-width") {
460 match FromStr::from_str(width.as_slice()) {
461 Some(width) => width,
463 return Some(Err(format!("argument for --boxplot-width must be a uint")));
470 let show_all_stats = matches.opt_present("stats");
472 let test_opts = TestOpts {
474 run_ignored: run_ignored,
475 run_tests: run_tests,
476 run_benchmarks: run_benchmarks,
477 ratchet_metrics: ratchet_metrics,
478 ratchet_noise_percent: ratchet_noise_percent,
479 save_metrics: save_metrics,
480 test_shard: test_shard,
482 nocapture: nocapture,
484 show_boxplot: show_boxplot,
485 boxplot_width: boxplot_width,
486 show_all_stats: show_all_stats,
492 pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> {
496 let mut it = s.split('.');
497 match (it.next().and_then(from_str::<uint>), it.next().and_then(from_str::<uint>),
499 (Some(a), Some(b), None) => {
501 panic!("tried to run shard {a}.{b}, but {a} is out of bounds \
502 (should be between 1 and {b}", a=a, b=b)
513 #[deriving(Clone, PartialEq)]
514 pub struct BenchSamples {
515 ns_iter_summ: stats::Summary<f64>,
519 #[deriving(Clone, PartialEq)]
520 pub enum TestResult {
524 TrMetrics(MetricMap),
525 TrBench(BenchSamples),
528 enum OutputLocation<T> {
529 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
533 struct ConsoleTestState<T> {
534 log_out: Option<File>,
535 out: OutputLocation<T>,
539 show_all_stats: bool,
546 failures: Vec<(TestDesc, Vec<u8> )> ,
547 max_name_len: uint, // number of columns to fill when aligning names
550 impl<T: Writer> ConsoleTestState<T> {
551 pub fn new(opts: &TestOpts,
552 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
553 let log_out = match opts.logfile {
554 Some(ref path) => Some(try!(File::create(path))),
557 let out = match term::stdout() {
558 None => Raw(io::stdio::stdout_raw()),
562 Ok(ConsoleTestState {
565 use_color: use_color(opts),
566 show_boxplot: opts.show_boxplot,
567 boxplot_width: opts.boxplot_width,
568 show_all_stats: opts.show_all_stats,
574 metrics: MetricMap::new(),
575 failures: Vec::new(),
580 pub fn write_ok(&mut self) -> io::IoResult<()> {
581 self.write_pretty("ok", term::color::GREEN)
584 pub fn write_failed(&mut self) -> io::IoResult<()> {
585 self.write_pretty("FAILED", term::color::RED)
588 pub fn write_ignored(&mut self) -> io::IoResult<()> {
589 self.write_pretty("ignored", term::color::YELLOW)
592 pub fn write_metric(&mut self) -> io::IoResult<()> {
593 self.write_pretty("metric", term::color::CYAN)
596 pub fn write_bench(&mut self) -> io::IoResult<()> {
597 self.write_pretty("bench", term::color::CYAN)
600 pub fn write_added(&mut self) -> io::IoResult<()> {
601 self.write_pretty("added", term::color::GREEN)
604 pub fn write_improved(&mut self) -> io::IoResult<()> {
605 self.write_pretty("improved", term::color::GREEN)
608 pub fn write_removed(&mut self) -> io::IoResult<()> {
609 self.write_pretty("removed", term::color::YELLOW)
612 pub fn write_regressed(&mut self) -> io::IoResult<()> {
613 self.write_pretty("regressed", term::color::RED)
616 pub fn write_pretty(&mut self,
618 color: term::color::Color) -> io::IoResult<()> {
620 Pretty(ref mut term) => {
622 try!(term.fg(color));
624 try!(term.write(word.as_bytes()));
630 Raw(ref mut stdout) => stdout.write(word.as_bytes())
634 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
636 Pretty(ref mut term) => term.write(s.as_bytes()),
637 Raw(ref mut stdout) => stdout.write(s.as_bytes())
641 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
643 let noun = if len != 1 { "tests" } else { "test" };
644 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
647 pub fn write_test_start(&mut self, test: &TestDesc,
648 align: NamePadding) -> io::IoResult<()> {
649 let name = test.padded_name(self.max_name_len, align);
650 self.write_plain(format!("test {} ... ", name).as_slice())
653 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
655 TrOk => self.write_ok(),
656 TrFailed => self.write_failed(),
657 TrIgnored => self.write_ignored(),
658 TrMetrics(ref mm) => {
659 try!(self.write_metric());
660 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
663 try!(self.write_bench());
665 if self.show_boxplot {
666 let mut wr = Vec::new();
668 try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
670 let s = String::from_utf8(wr).unwrap();
672 try!(self.write_plain(format!(": {}", s).as_slice()));
675 if self.show_all_stats {
676 let mut wr = Vec::new();
678 try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
680 let s = String::from_utf8(wr).unwrap();
682 try!(self.write_plain(format!(": {}", s).as_slice()));
684 try!(self.write_plain(format!(": {}",
685 fmt_bench_samples(bs)).as_slice()));
691 self.write_plain("\n")
694 pub fn write_log(&mut self, test: &TestDesc,
695 result: &TestResult) -> io::IoResult<()> {
699 let s = format!("{} {}\n", match *result {
700 TrOk => "ok".to_string(),
701 TrFailed => "failed".to_string(),
702 TrIgnored => "ignored".to_string(),
703 TrMetrics(ref mm) => fmt_metrics(mm),
704 TrBench(ref bs) => fmt_bench_samples(bs)
705 }, test.name.as_slice());
706 o.write(s.as_bytes())
711 pub fn write_failures(&mut self) -> io::IoResult<()> {
712 try!(self.write_plain("\nfailures:\n"));
713 let mut failures = Vec::new();
714 let mut fail_out = String::new();
715 for &(ref f, ref stdout) in self.failures.iter() {
716 failures.push(f.name.to_string());
717 if stdout.len() > 0 {
718 fail_out.push_str(format!("---- {} stdout ----\n\t",
719 f.name.as_slice()).as_slice());
720 let output = String::from_utf8_lossy(stdout.as_slice());
721 fail_out.push_str(output.as_slice());
722 fail_out.push_str("\n");
725 if fail_out.len() > 0 {
726 try!(self.write_plain("\n"));
727 try!(self.write_plain(fail_out.as_slice()));
730 try!(self.write_plain("\nfailures:\n"));
732 for name in failures.iter() {
733 try!(self.write_plain(format!(" {}\n",
734 name.as_slice()).as_slice()));
739 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
741 let mut improved = 0u;
742 let mut regressed = 0u;
744 let mut removed = 0u;
746 for (k, v) in diff.iter() {
748 LikelyNoise => noise += 1,
751 try!(self.write_added());
752 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
756 try!(self.write_removed());
757 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
759 Improvement(pct) => {
761 try!(self.write_plain(format!(": {} ", *k).as_slice()));
762 try!(self.write_improved());
763 try!(self.write_plain(format!(" by {:.2}%\n",
764 pct as f64).as_slice()));
768 try!(self.write_plain(format!(": {} ", *k).as_slice()));
769 try!(self.write_regressed());
770 try!(self.write_plain(format!(" by {:.2}%\n",
771 pct as f64).as_slice()));
775 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
776 {} removed, {} improved, {} regressed, \
778 added, removed, improved, regressed,
781 try!(self.write_plain("updated ratchet file\n"));
783 try!(self.write_plain("left ratchet file untouched\n"));
788 pub fn write_run_finish(&mut self,
789 ratchet_metrics: &Option<Path>,
790 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
791 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
793 let ratchet_success = match *ratchet_metrics {
796 try!(self.write_plain(format!("\nusing metrics ratchet: {}\n",
797 pth.display()).as_slice()));
801 try!(self.write_plain(format!("with noise-tolerance \
805 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
806 try!(self.write_metric_diff(&diff));
811 let test_success = self.failed == 0u;
813 try!(self.write_failures());
816 let success = ratchet_success && test_success;
818 try!(self.write_plain("\ntest result: "));
820 // There's no parallelism at this point so it's safe to use color
821 try!(self.write_ok());
823 try!(self.write_failed());
825 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
826 self.passed, self.failed, self.ignored, self.measured);
827 try!(self.write_plain(s.as_slice()));
832 pub fn fmt_metrics(mm: &MetricMap) -> String {
833 let MetricMap(ref mm) = *mm;
834 let v : Vec<String> = mm.iter()
835 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
836 v.value as f64, v.noise as f64))
841 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
843 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
844 bs.ns_iter_summ.median as uint,
845 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
848 format!("{:>9} ns/iter (+/- {})",
849 bs.ns_iter_summ.median as uint,
850 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
854 // A simple console test runner
855 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
857 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
858 match (*event).clone() {
859 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
860 TeWait(ref test, padding) => st.write_test_start(test, padding),
861 TeResult(test, result, stdout) => {
862 try!(st.write_log(&test, &result));
863 try!(st.write_result(&result));
865 TrOk => st.passed += 1,
866 TrIgnored => st.ignored += 1,
868 let tname = test.name.as_slice();
869 let MetricMap(mm) = mm;
870 for (k,v) in mm.iter() {
872 .insert_metric(format!("{}.{}",
881 st.metrics.insert_metric(test.name.as_slice(),
882 bs.ns_iter_summ.median,
883 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
888 st.failures.push((test, stdout));
896 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
897 fn len_if_padded(t: &TestDescAndFn) -> uint {
898 match t.testfn.padding() {
900 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
903 match tests.iter().max_by(|t|len_if_padded(*t)) {
905 let n = t.desc.name.as_slice();
906 st.max_name_len = n.len();
910 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
911 match opts.save_metrics {
914 try!(st.metrics.save(pth));
915 try!(st.write_plain(format!("\nmetrics saved to: {}",
916 pth.display()).as_slice()));
919 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
923 fn should_sort_failures_before_printing_them() {
924 let test_a = TestDesc {
925 name: StaticTestName("a"),
927 should_fail: ShouldFail::No
930 let test_b = TestDesc {
931 name: StaticTestName("b"),
933 should_fail: ShouldFail::No
936 let mut st = ConsoleTestState {
938 out: Raw(Vec::new()),
942 show_all_stats: false,
949 metrics: MetricMap::new(),
950 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
953 st.write_failures().unwrap();
954 let s = match st.out {
955 Raw(ref m) => String::from_utf8_lossy(m[]),
956 Pretty(_) => unreachable!()
959 let apos = s.find_str("a").unwrap();
960 let bpos = s.find_str("b").unwrap();
961 assert!(apos < bpos);
964 fn use_color(opts: &TestOpts) -> bool {
966 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
974 TeFiltered(Vec<TestDesc> ),
975 TeWait(TestDesc, NamePadding),
976 TeResult(TestDesc, TestResult, Vec<u8> ),
979 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
981 fn run_tests(opts: &TestOpts,
982 tests: Vec<TestDescAndFn> ,
983 callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
984 let filtered_tests = filter_tests(opts, tests);
985 let filtered_descs = filtered_tests.iter()
986 .map(|t| t.desc.clone())
989 try!(callback(TeFiltered(filtered_descs)));
991 let (filtered_tests, filtered_benchs_and_metrics) =
992 filtered_tests.partition(|e| {
994 StaticTestFn(_) | DynTestFn(_) => true,
999 // It's tempting to just spawn all the tests at once, but since we have
1000 // many tests that run in other processes we would be making a big mess.
1001 let concurrency = get_concurrency();
1003 let mut remaining = filtered_tests;
1004 remaining.reverse();
1005 let mut pending = 0;
1007 let (tx, rx) = channel::<MonitorMsg>();
1009 while pending > 0 || !remaining.is_empty() {
1010 while pending < concurrency && !remaining.is_empty() {
1011 let test = remaining.pop().unwrap();
1012 if concurrency == 1 {
1013 // We are doing one test at a time so we can print the name
1014 // of the test before we run it. Useful for debugging tests
1015 // that hang forever.
1016 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
1018 run_test(opts, !opts.run_tests, test, tx.clone());
1022 let (desc, result, stdout) = rx.recv();
1023 if concurrency != 1 {
1024 try!(callback(TeWait(desc.clone(), PadNone)));
1026 try!(callback(TeResult(desc, result, stdout)));
1030 // All benchmarks run at the end, in serial.
1031 // (this includes metric fns)
1032 for b in filtered_benchs_and_metrics.into_iter() {
1033 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
1034 run_test(opts, !opts.run_benchmarks, b, tx.clone());
1035 let (test, result, stdout) = rx.recv();
1036 try!(callback(TeResult(test, result, stdout)));
1041 fn get_concurrency() -> uint {
1043 match os::getenv("RUST_TEST_TASKS") {
1045 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
1047 Some(n) if n > 0 => n,
1048 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
1052 rt::default_sched_threads()
1057 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1058 let mut filtered = tests;
1060 // Remove tests that don't match the test filter
1061 filtered = match opts.filter {
1064 filtered.into_iter()
1065 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
1069 // Maybe pull out the ignored test and unignore them
1070 filtered = if !opts.run_ignored {
1073 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1074 if test.desc.ignore {
1075 let TestDescAndFn {desc, testfn} = test;
1076 Some(TestDescAndFn {
1077 desc: TestDesc {ignore: false, ..desc},
1084 filtered.into_iter().filter_map(|x| filter(x)).collect()
1087 // Sort the tests alphabetically
1088 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1090 // Shard the remaining tests, if sharding requested.
1091 match opts.test_shard {
1094 filtered.into_iter().enumerate()
1095 // note: using a - 1 so that the valid shards, for example, are
1096 // 1.2 and 2.2 instead of 0.2 and 1.2
1097 .filter(|&(i,_)| i % b == (a - 1))
1104 pub fn run_test(opts: &TestOpts,
1106 test: TestDescAndFn,
1107 monitor_ch: Sender<MonitorMsg>) {
1109 let TestDescAndFn {desc, testfn} = test;
1111 if force_ignore || desc.ignore {
1112 monitor_ch.send((desc, TrIgnored, Vec::new()));
1116 fn run_test_inner(desc: TestDesc,
1117 monitor_ch: Sender<MonitorMsg>,
1119 testfn: proc():Send) {
1121 let (tx, rx) = channel();
1122 let mut reader = ChanReader::new(rx);
1123 let stdout = ChanWriter::new(tx.clone());
1124 let stderr = ChanWriter::new(tx);
1125 let mut task = TaskBuilder::new().named(match desc.name {
1126 DynTestName(ref name) => name.clone().to_string(),
1127 StaticTestName(name) => name.to_string(),
1130 drop((stdout, stderr));
1132 task = task.stdout(box stdout as Box<Writer + Send>);
1133 task = task.stderr(box stderr as Box<Writer + Send>);
1135 let result_future = task.try_future(testfn);
1137 let stdout = reader.read_to_end().unwrap().into_iter().collect();
1138 let task_result = result_future.into_inner();
1139 let test_result = calc_result(&desc, task_result);
1140 monitor_ch.send((desc.clone(), test_result, stdout));
1145 DynBenchFn(bencher) => {
1146 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1147 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1150 StaticBenchFn(benchfn) => {
1151 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1152 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1156 let mut mm = MetricMap::new();
1158 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1161 StaticMetricFn(f) => {
1162 let mut mm = MetricMap::new();
1164 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1167 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1168 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1173 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1174 match (&desc.should_fail, task_result) {
1175 (&ShouldFail::No, Ok(())) |
1176 (&ShouldFail::Yes(None), Err(_)) => TrOk,
1177 (&ShouldFail::Yes(Some(msg)), Err(ref err))
1178 if err.downcast_ref::<String>()
1180 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1181 .map(|e| e.contains(msg))
1182 .unwrap_or(false) => TrOk,
1189 pub fn new() -> MetricMap {
1190 MetricMap(TreeMap::new())
1193 /// Load MetricDiff from a file.
1197 /// This function will panic if the path does not exist or the path does not
1198 /// contain a valid metric map.
1199 pub fn load(p: &Path) -> MetricMap {
1200 assert!(p.exists());
1201 let mut f = File::open(p).unwrap();
1202 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1203 let mut decoder = json::Decoder::new(value);
1204 MetricMap(match Decodable::decode(&mut decoder) {
1206 Err(e) => panic!("failure decoding JSON: {}", e)
1210 /// Write MetricDiff to a file.
1211 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1212 let mut file = try!(File::create(p));
1213 let MetricMap(ref map) = *self;
1214 let mut enc = json::PrettyEncoder::new(&mut file);
1215 map.encode(&mut enc)
1218 /// Compare against another MetricMap. Optionally compare all
1219 /// measurements in the maps using the provided `noise_pct` as a
1220 /// percentage of each value to consider noise. If `None`, each
1221 /// measurement's noise threshold is independently chosen as the
1222 /// maximum of that measurement's recorded noise quantity in either
1224 pub fn compare_to_old(&self, old: &MetricMap,
1225 noise_pct: Option<f64>) -> MetricDiff {
1226 let mut diff : MetricDiff = TreeMap::new();
1227 let MetricMap(ref selfmap) = *self;
1228 let MetricMap(ref old) = *old;
1229 for (k, vold) in old.iter() {
1230 let r = match selfmap.get(k) {
1231 None => MetricRemoved,
1233 let delta = v.value - vold.value;
1234 let noise = match noise_pct {
1235 None => vold.noise.abs().max(v.noise.abs()),
1236 Some(pct) => vold.value * pct / 100.0
1238 if delta.abs() <= noise {
1241 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1242 if vold.noise < 0.0 {
1243 // When 'noise' is negative, it means we want
1244 // to see deltas that go up over time, and can
1245 // only tolerate slight negative movement.
1252 // When 'noise' is positive, it means we want
1253 // to see deltas that go down over time, and
1254 // can only tolerate slight positive movements.
1264 diff.insert((*k).clone(), r);
1266 let MetricMap(ref map) = *self;
1267 for (k, _) in map.iter() {
1268 if !diff.contains_key(k) {
1269 diff.insert((*k).clone(), MetricAdded);
1275 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1276 /// must be non-negative. The `noise` indicates the uncertainty of the
1277 /// metric, which doubles as the "noise range" of acceptable
1278 /// pairwise-regressions on this named value, when comparing from one
1279 /// metric to the next using `compare_to_old`.
1281 /// If `noise` is positive, then it means this metric is of a value
1282 /// you want to see grow smaller, so a change larger than `noise` in the
1283 /// positive direction represents a regression.
1285 /// If `noise` is negative, then it means this metric is of a value
1286 /// you want to see grow larger, so a change larger than `noise` in the
1287 /// negative direction represents a regression.
1288 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1293 let MetricMap(ref mut map) = *self;
1294 map.insert(name.to_string(), m);
1297 /// Attempt to "ratchet" an external metric file. This involves loading
1298 /// metrics from a metric file (if it exists), comparing against
1299 /// the metrics in `self` using `compare_to_old`, and rewriting the
1300 /// file to contain the metrics in `self` if none of the
1301 /// `MetricChange`s are `Regression`. Returns the diff as well
1302 /// as a boolean indicating whether the ratchet succeeded.
1303 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1304 let old = if p.exists() {
1310 let diff : MetricDiff = self.compare_to_old(&old, pct);
1311 let ok = diff.iter().all(|(_, v)| {
1313 Regression(_) => false,
1319 self.save(p).unwrap();
1328 /// A function that is opaque to the optimizer, to allow benchmarks to
1329 /// pretend to use outputs to assist in avoiding dead-code
1332 /// This function is a no-op, and does not even read from `dummy`.
1333 pub fn black_box<T>(dummy: T) {
1334 // we need to "use" the argument in some way LLVM can't
1336 unsafe {asm!("" : : "r"(&dummy))}
1341 /// Callback for benchmark functions to run in their body.
1342 pub fn iter<T>(&mut self, inner: || -> T) {
1343 self.dur = Duration::span(|| {
1344 let k = self.iterations;
1345 for _ in range(0u64, k) {
1351 pub fn ns_elapsed(&mut self) -> u64 {
1352 self.dur.num_nanoseconds().unwrap() as u64
1355 pub fn ns_per_iter(&mut self) -> u64 {
1356 if self.iterations == 0 {
1359 self.ns_elapsed() / cmp::max(self.iterations, 1)
1363 pub fn bench_n(&mut self, n: u64, f: |&mut Bencher|) {
1364 self.iterations = n;
1368 // This is a more statistics-driven benchmark algorithm
1369 pub fn auto_bench(&mut self, f: |&mut Bencher|) -> stats::Summary<f64> {
1371 // Initial bench run to get ballpark figure.
1373 self.bench_n(n, |x| f(x));
1375 // Try to estimate iter count for 1ms falling back to 1m
1376 // iterations if first run took < 1ns.
1377 if self.ns_per_iter() == 0 {
1380 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1382 // if the first run took more than 1ms we don't want to just
1383 // be left doing 0 iterations on every loop. The unfortunate
1384 // side effect of not being able to do as many runs is
1385 // automatically handled by the statistical analysis below
1386 // (i.e. larger error bars).
1387 if n == 0 { n = 1; }
1389 let mut total_run = Duration::nanoseconds(0);
1390 let samples : &mut [f64] = &mut [0.0_f64, ..50];
1392 let mut summ = None;
1393 let mut summ5 = None;
1395 let loop_run = Duration::span(|| {
1397 for p in samples.iter_mut() {
1398 self.bench_n(n, |x| f(x));
1399 *p = self.ns_per_iter() as f64;
1402 stats::winsorize(samples, 5.0);
1403 summ = Some(stats::Summary::new(samples));
1405 for p in samples.iter_mut() {
1406 self.bench_n(5 * n, |x| f(x));
1407 *p = self.ns_per_iter() as f64;
1410 stats::winsorize(samples, 5.0);
1411 summ5 = Some(stats::Summary::new(samples));
1413 let summ = summ.unwrap();
1414 let summ5 = summ5.unwrap();
1416 // If we've run for 100ms and seem to have converged to a
1418 if loop_run.num_milliseconds() > 100 &&
1419 summ.median_abs_dev_pct < 1.0 &&
1420 summ.median - summ5.median < summ5.median_abs_dev {
1424 total_run = total_run + loop_run;
1425 // Longest we ever run for is 3s.
1426 if total_run.num_seconds() > 3 {
1437 use std::time::Duration;
1438 use super::{Bencher, BenchSamples};
1440 pub fn benchmark(f: |&mut Bencher|) -> BenchSamples {
1441 let mut bs = Bencher {
1443 dur: Duration::nanoseconds(0),
1447 let ns_iter_summ = bs.auto_bench(f);
1449 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1450 let iter_s = 1_000_000_000 / ns_iter;
1451 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1454 ns_iter_summ: ns_iter_summ,
1462 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1463 TestDesc, TestDescAndFn, TestOpts, run_test,
1464 Metric, MetricMap, MetricAdded, MetricRemoved,
1465 Improvement, Regression, LikelyNoise,
1466 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1467 use std::io::TempDir;
1470 pub fn do_not_run_ignored_tests() {
1471 fn f() { panic!(); }
1472 let desc = TestDescAndFn {
1474 name: StaticTestName("whatever"),
1476 should_fail: ShouldFail::No,
1478 testfn: DynTestFn(proc() f()),
1480 let (tx, rx) = channel();
1481 run_test(&TestOpts::new(), false, desc, tx);
1482 let (_, res, _) = rx.recv();
1483 assert!(res != TrOk);
1487 pub fn ignored_tests_result_in_ignored() {
1489 let desc = TestDescAndFn {
1491 name: StaticTestName("whatever"),
1493 should_fail: ShouldFail::No,
1495 testfn: DynTestFn(proc() f()),
1497 let (tx, rx) = channel();
1498 run_test(&TestOpts::new(), false, desc, tx);
1499 let (_, res, _) = rx.recv();
1500 assert!(res == TrIgnored);
1504 fn test_should_fail() {
1505 fn f() { panic!(); }
1506 let desc = TestDescAndFn {
1508 name: StaticTestName("whatever"),
1510 should_fail: ShouldFail::Yes(None)
1512 testfn: DynTestFn(proc() f()),
1514 let (tx, rx) = channel();
1515 run_test(&TestOpts::new(), false, desc, tx);
1516 let (_, res, _) = rx.recv();
1517 assert!(res == TrOk);
1521 fn test_should_fail_good_message() {
1522 fn f() { panic!("an error message"); }
1523 let desc = TestDescAndFn {
1525 name: StaticTestName("whatever"),
1527 should_fail: ShouldFail::Yes(Some("error message"))
1529 testfn: DynTestFn(proc() f()),
1531 let (tx, rx) = channel();
1532 run_test(&TestOpts::new(), false, desc, tx);
1533 let (_, res, _) = rx.recv();
1534 assert!(res == TrOk);
1538 fn test_should_fail_bad_message() {
1539 fn f() { panic!("an error message"); }
1540 let desc = TestDescAndFn {
1542 name: StaticTestName("whatever"),
1544 should_fail: ShouldFail::Yes(Some("foobar"))
1546 testfn: DynTestFn(proc() f()),
1548 let (tx, rx) = channel();
1549 run_test(&TestOpts::new(), false, desc, tx);
1550 let (_, res, _) = rx.recv();
1551 assert!(res == TrFailed);
1555 fn test_should_fail_but_succeeds() {
1557 let desc = TestDescAndFn {
1559 name: StaticTestName("whatever"),
1561 should_fail: ShouldFail::Yes(None)
1563 testfn: DynTestFn(proc() f()),
1565 let (tx, rx) = channel();
1566 run_test(&TestOpts::new(), false, desc, tx);
1567 let (_, res, _) = rx.recv();
1568 assert!(res == TrFailed);
1572 fn first_free_arg_should_be_a_filter() {
1573 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1574 let opts = match parse_opts(args.as_slice()) {
1576 _ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
1578 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1582 fn parse_ignored_flag() {
1583 let args = vec!("progname".to_string(),
1584 "filter".to_string(),
1585 "--ignored".to_string());
1586 let opts = match parse_opts(args.as_slice()) {
1588 _ => panic!("Malformed arg in parse_ignored_flag")
1590 assert!((opts.run_ignored));
1594 pub fn filter_for_ignored_option() {
1595 // When we run ignored tests the test filter should filter out all the
1596 // unignored tests and flip the ignore flag on the rest to false
1598 let mut opts = TestOpts::new();
1599 opts.run_tests = true;
1600 opts.run_ignored = true;
1605 name: StaticTestName("1"),
1607 should_fail: ShouldFail::No,
1609 testfn: DynTestFn(proc() {}),
1613 name: StaticTestName("2"),
1615 should_fail: ShouldFail::No,
1617 testfn: DynTestFn(proc() {}),
1619 let filtered = filter_tests(&opts, tests);
1621 assert_eq!(filtered.len(), 1);
1622 assert_eq!(filtered[0].desc.name.to_string(),
1624 assert!(filtered[0].desc.ignore == false);
1628 pub fn sort_tests() {
1629 let mut opts = TestOpts::new();
1630 opts.run_tests = true;
1633 vec!("sha1::test".to_string(),
1634 "int::test_to_str".to_string(),
1635 "int::test_pow".to_string(),
1636 "test::do_not_run_ignored_tests".to_string(),
1637 "test::ignored_tests_result_in_ignored".to_string(),
1638 "test::first_free_arg_should_be_a_filter".to_string(),
1639 "test::parse_ignored_flag".to_string(),
1640 "test::filter_for_ignored_option".to_string(),
1641 "test::sort_tests".to_string());
1645 let mut tests = Vec::new();
1646 for name in names.iter() {
1647 let test = TestDescAndFn {
1649 name: DynTestName((*name).clone()),
1651 should_fail: ShouldFail::No,
1653 testfn: DynTestFn(testfn),
1659 let filtered = filter_tests(&opts, tests);
1662 vec!("int::test_pow".to_string(),
1663 "int::test_to_str".to_string(),
1664 "sha1::test".to_string(),
1665 "test::do_not_run_ignored_tests".to_string(),
1666 "test::filter_for_ignored_option".to_string(),
1667 "test::first_free_arg_should_be_a_filter".to_string(),
1668 "test::ignored_tests_result_in_ignored".to_string(),
1669 "test::parse_ignored_flag".to_string(),
1670 "test::sort_tests".to_string());
1672 for (a, b) in expected.iter().zip(filtered.iter()) {
1673 assert!(*a == b.desc.name.to_string());
1678 pub fn filter_tests_regex() {
1679 let mut opts = TestOpts::new();
1680 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1682 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1683 "no::XYZ", "no::abc"];
1687 let tests = names.iter().map(|name| {
1690 name: DynTestName(name.to_string()),
1692 should_fail: ShouldFail::No,
1694 testfn: DynTestFn(test_fn)
1697 let filtered = filter_tests(&opts, tests);
1699 let expected: Vec<&str> =
1700 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1702 assert_eq!(filtered.len(), expected.len());
1703 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1704 assert_eq!(test.desc.name.as_slice(), *expected_name);
1709 pub fn test_metricmap_compare() {
1710 let mut m1 = MetricMap::new();
1711 let mut m2 = MetricMap::new();
1712 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1713 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1715 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1716 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1718 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1719 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1721 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1722 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1724 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1725 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1727 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1728 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1730 let diff1 = m2.compare_to_old(&m1, None);
1732 assert_eq!(*(diff1.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1733 assert_eq!(*(diff1.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1734 assert_eq!(*(diff1.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1735 assert_eq!(*(diff1.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1737 assert_eq!(*(diff1.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1739 assert_eq!(*(diff1.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1741 assert_eq!(*(diff1.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1742 Improvement(100.0));
1743 assert_eq!(diff1.len(), 7);
1745 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1747 assert_eq!(*(diff2.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1748 assert_eq!(*(diff2.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1749 assert_eq!(*(diff2.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1750 assert_eq!(*(diff2.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1752 assert_eq!(*(diff2.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1754 assert_eq!(*(diff2.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1756 assert_eq!(*(diff2.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1758 assert_eq!(diff2.len(), 7);
1762 pub fn ratchet_test() {
1764 let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
1765 let pth = dpth.path().join("ratchet.json");
1767 let mut m1 = MetricMap::new();
1768 m1.insert_metric("runtime", 1000.0, 2.0);
1769 m1.insert_metric("throughput", 50.0, 2.0);
1771 let mut m2 = MetricMap::new();
1772 m2.insert_metric("runtime", 1100.0, 2.0);
1773 m2.insert_metric("throughput", 50.0, 2.0);
1775 m1.save(&pth).unwrap();
1777 // Ask for a ratchet that should fail to advance.
1778 let (diff1, ok1) = m2.ratchet(&pth, None);
1779 assert_eq!(ok1, false);
1780 assert_eq!(diff1.len(), 2);
1781 assert_eq!(*(diff1.get(&"runtime".to_string()).unwrap()), Regression(10.0));
1782 assert_eq!(*(diff1.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1784 // Check that it was not rewritten.
1785 let m3 = MetricMap::load(&pth);
1786 let MetricMap(m3) = m3;
1787 assert_eq!(m3.len(), 2);
1788 assert_eq!(*(m3.get(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1789 assert_eq!(*(m3.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1791 // Ask for a ratchet with an explicit noise-percentage override,
1792 // that should advance.
1793 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1794 assert_eq!(ok2, true);
1795 assert_eq!(diff2.len(), 2);
1796 assert_eq!(*(diff2.get(&"runtime".to_string()).unwrap()), LikelyNoise);
1797 assert_eq!(*(diff2.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1799 // Check that it was rewritten.
1800 let m4 = MetricMap::load(&pth);
1801 let MetricMap(m4) = m4;
1802 assert_eq!(m4.len(), 2);
1803 assert_eq!(*(m4.get(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1804 assert_eq!(*(m4.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));