1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![allow(unknown_features)]
35 #![feature(asm, slicing_syntax)]
36 #![feature(box_syntax)]
37 #![allow(unknown_features)] #![feature(int_uint)]
42 extern crate serialize;
43 extern crate "serialize" as rustc_serialize;
46 pub use self::TestFn::*;
47 pub use self::MetricChange::*;
48 pub use self::ColorConfig::*;
49 pub use self::TestResult::*;
50 pub use self::TestName::*;
51 use self::TestEvent::*;
52 use self::NamePadding::*;
53 use self::OutputLocation::*;
56 use getopts::{OptGroup, optflag, optopt};
58 use serialize::{json, Decodable, Encodable};
60 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
64 use std::collections::BTreeMap;
67 use std::io::fs::PathExtensions;
68 use std::io::stdio::StdWriter;
69 use std::io::{File, ChanReader, ChanWriter};
71 use std::iter::repeat;
72 use std::num::{Float, Int};
74 use std::str::FromStr;
75 use std::sync::mpsc::{channel, Sender};
76 use std::thread::{self, Thread};
77 use std::thunk::{Thunk, Invoke};
78 use std::time::Duration;
80 // to be used by rustc to compile tests in libtest
82 pub use {Bencher, TestName, TestResult, TestDesc,
83 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
84 Metric, MetricMap, MetricAdded, MetricRemoved,
85 MetricChange, Improvement, Regression, LikelyNoise,
86 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
87 run_test, test_main, test_main_static, filter_tests,
88 parse_opts, StaticBenchFn, ShouldFail};
93 // The name of a test. By convention this follows the rules for rust
94 // paths; i.e. it should be a series of identifiers separated by double
95 // colons. This way if some test runner wants to arrange the tests
96 // hierarchically it may.
98 #[derive(Clone, PartialEq, Eq, Hash, Show)]
100 StaticTestName(&'static str),
104 fn as_slice<'a>(&'a self) -> &'a str {
106 StaticTestName(s) => s,
107 DynTestName(ref s) => s.as_slice()
111 impl fmt::Display for TestName {
112 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
113 fmt::Display::fmt(self.as_slice(), f)
117 #[derive(Clone, Copy)]
125 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
126 let mut name = String::from_str(self.name.as_slice());
127 let fill = column_count.saturating_sub(name.len());
128 let mut pad = repeat(" ").take(fill).collect::<String>();
132 pad.push_str(name.as_slice());
136 name.push_str(pad.as_slice());
143 /// Represents a benchmark function.
144 pub trait TDynBenchFn {
145 fn run(&self, harness: &mut Bencher);
148 // A function that runs a test. If the function returns successfully,
149 // the test succeeds; if the function panics then the test fails. We
150 // may need to come up with a more clever definition of test in order
151 // to support isolation of tests into tasks.
154 StaticBenchFn(fn(&mut Bencher)),
155 StaticMetricFn(fn(&mut MetricMap)),
157 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
158 DynBenchFn(Box<TDynBenchFn+'static>)
162 fn padding(&self) -> NamePadding {
164 &StaticTestFn(..) => PadNone,
165 &StaticBenchFn(..) => PadOnRight,
166 &StaticMetricFn(..) => PadOnRight,
167 &DynTestFn(..) => PadNone,
168 &DynMetricFn(..) => PadOnRight,
169 &DynBenchFn(..) => PadOnRight,
174 impl fmt::Debug for TestFn {
175 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
176 f.write_str(match *self {
177 StaticTestFn(..) => "StaticTestFn(..)",
178 StaticBenchFn(..) => "StaticBenchFn(..)",
179 StaticMetricFn(..) => "StaticMetricFn(..)",
180 DynTestFn(..) => "DynTestFn(..)",
181 DynMetricFn(..) => "DynMetricFn(..)",
182 DynBenchFn(..) => "DynBenchFn(..)"
187 /// Manager of the benchmarking runs.
189 /// This is fed into functions marked with `#[bench]` to allow for
190 /// set-up & tear-down before running a piece of code repeatedly via a
199 #[derive(Copy, Clone, Show, PartialEq, Eq, Hash)]
200 pub enum ShouldFail {
202 Yes(Option<&'static str>)
205 // The definition of a single test. A test runner will run a list of
207 #[derive(Clone, Show, PartialEq, Eq, Hash)]
208 pub struct TestDesc {
211 pub should_fail: ShouldFail,
214 unsafe impl Send for TestDesc {}
217 pub struct TestDescAndFn {
222 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
229 pub fn new(value: f64, noise: f64) -> Metric {
230 Metric {value: value, noise: noise}
235 pub struct MetricMap(BTreeMap<String,Metric>);
237 impl Clone for MetricMap {
238 fn clone(&self) -> MetricMap {
239 let MetricMap(ref map) = *self;
240 MetricMap(map.clone())
244 /// Analysis of a single change in metric
245 #[derive(Copy, PartialEq, Show)]
246 pub enum MetricChange {
254 pub type MetricDiff = BTreeMap<String,MetricChange>;
256 // The default console test runner. It accepts the command line
257 // arguments and a vector of test_descs.
258 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
260 match parse_opts(args) {
262 Some(Err(msg)) => panic!("{:?}", msg),
265 match run_tests_console(&opts, tests) {
267 Ok(false) => panic!("Some tests failed"),
268 Err(e) => panic!("io error when running tests: {:?}", e),
272 // A variant optimized for invocation with a static test vector.
273 // This will panic (intentionally) when fed any dynamic tests, because
274 // it is copying the static values out into a dynamic vector and cannot
275 // copy dynamic values. It is doing this because from this point on
276 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
277 // semantics into parallel test runners, which in turn requires a ~[]
278 // rather than a &[].
279 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
280 let owned_tests = tests.iter().map(|t| {
282 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
283 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
284 _ => panic!("non-static tests passed to test::test_main_static")
287 test_main(args, owned_tests)
291 pub enum ColorConfig {
297 pub struct TestOpts {
298 pub filter: Option<Regex>,
299 pub run_ignored: bool,
301 pub run_benchmarks: bool,
302 pub ratchet_metrics: Option<Path>,
303 pub ratchet_noise_percent: Option<f64>,
304 pub save_metrics: Option<Path>,
305 pub test_shard: Option<(uint,uint)>,
306 pub logfile: Option<Path>,
308 pub color: ColorConfig,
309 pub show_boxplot: bool,
310 pub boxplot_width: uint,
311 pub show_all_stats: bool,
316 fn new() -> TestOpts {
321 run_benchmarks: false,
322 ratchet_metrics: None,
323 ratchet_noise_percent: None,
331 show_all_stats: false,
336 /// Result of parsing the options.
337 pub type OptRes = Result<TestOpts, String>;
339 fn optgroups() -> Vec<getopts::OptGroup> {
340 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
341 getopts::optflag("", "test", "Run tests and not benchmarks"),
342 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
343 getopts::optflag("h", "help", "Display this message (longer with --help)"),
344 getopts::optopt("", "save-metrics", "Location to save bench metrics",
346 getopts::optopt("", "ratchet-metrics",
347 "Location to load and save metrics from. The metrics \
348 loaded are cause benchmarks to fail if they run too \
350 getopts::optopt("", "ratchet-noise-percent",
351 "Tests within N% of the recorded metrics will be \
352 considered as passing", "PERCENTAGE"),
353 getopts::optopt("", "logfile", "Write logs to the specified file instead \
355 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
357 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
358 task, allow printing directly"),
359 getopts::optopt("", "color", "Configure coloring of output:
360 auto = colorize if stdout is a tty and tests are run on serially (default);
361 always = always colorize output;
362 never = never colorize output;", "auto|always|never"),
363 getopts::optflag("", "boxplot", "Display a boxplot of the benchmark statistics"),
364 getopts::optopt("", "boxplot-width", "Set the boxplot width (default 50)", "WIDTH"),
365 getopts::optflag("", "stats", "Display the benchmark min, max, and quartiles"))
368 fn usage(binary: &str) {
369 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
372 The FILTER regex is tested against the name of all tests to run, and
373 only those tests that match are run.
375 By default, all tests are run in parallel. This can be altered with the
376 RUST_TEST_TASKS environment variable when running tests (set it to 1).
378 All tests have their standard output and standard error captured by default.
379 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
380 environment variable. Logging is not captured by default.
384 #[test] - Indicates a function is a test to be run. This function
386 #[bench] - Indicates a function is a benchmark to be run. This
387 function takes one argument (test::Bencher).
388 #[should_fail] - This function (also labeled with #[test]) will only pass if
389 the code causes a failure (an assertion failure or panic!)
390 A message may be provided, which the failure string must
391 contain: #[should_fail(expected = "foo")].
392 #[ignore] - When applied to a function which is already attributed as a
393 test, then the test runner will ignore these tests during
394 normal test runs. Running with --ignored will run these
396 usage = getopts::usage(message.as_slice(),
397 optgroups().as_slice()));
400 // Parses command line arguments into test options
401 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
402 let args_ = args.tail();
404 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
406 Err(f) => return Some(Err(f.to_string()))
409 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
411 let filter = if matches.free.len() > 0 {
412 let s = matches.free[0].as_slice();
413 match Regex::new(s) {
415 Err(e) => return Some(Err(format!("could not parse /{}/: {:?}", s, e)))
421 let run_ignored = matches.opt_present("ignored");
423 let logfile = matches.opt_str("logfile");
424 let logfile = logfile.map(|s| Path::new(s));
426 let run_benchmarks = matches.opt_present("bench");
427 let run_tests = ! run_benchmarks ||
428 matches.opt_present("test");
430 let ratchet_metrics = matches.opt_str("ratchet-metrics");
431 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
433 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
434 let ratchet_noise_percent =
435 ratchet_noise_percent.map(|s| s.as_slice().parse::<f64>().unwrap());
437 let save_metrics = matches.opt_str("save-metrics");
438 let save_metrics = save_metrics.map(|s| Path::new(s));
440 let test_shard = matches.opt_str("test-shard");
441 let test_shard = opt_shard(test_shard);
443 let mut nocapture = matches.opt_present("nocapture");
445 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
448 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
449 Some("auto") | None => AutoColor,
450 Some("always") => AlwaysColor,
451 Some("never") => NeverColor,
453 Some(v) => return Some(Err(format!("argument for --color must be \
454 auto, always, or never (was {})",
458 let show_boxplot = matches.opt_present("boxplot");
459 let boxplot_width = match matches.opt_str("boxplot-width") {
461 match FromStr::from_str(width.as_slice()) {
462 Some(width) => width,
464 return Some(Err(format!("argument for --boxplot-width must be a uint")));
471 let show_all_stats = matches.opt_present("stats");
473 let test_opts = TestOpts {
475 run_ignored: run_ignored,
476 run_tests: run_tests,
477 run_benchmarks: run_benchmarks,
478 ratchet_metrics: ratchet_metrics,
479 ratchet_noise_percent: ratchet_noise_percent,
480 save_metrics: save_metrics,
481 test_shard: test_shard,
483 nocapture: nocapture,
485 show_boxplot: show_boxplot,
486 boxplot_width: boxplot_width,
487 show_all_stats: show_all_stats,
493 pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> {
497 let mut it = s.split('.');
498 match (it.next().and_then(|s| s.parse::<uint>()),
499 it.next().and_then(|s| s.parse::<uint>()),
501 (Some(a), Some(b), None) => {
503 panic!("tried to run shard {a}.{b}, but {a} is out of bounds \
504 (should be between 1 and {b}", a=a, b=b)
515 #[derive(Clone, PartialEq)]
516 pub struct BenchSamples {
517 ns_iter_summ: stats::Summary<f64>,
521 #[derive(Clone, PartialEq)]
522 pub enum TestResult {
526 TrMetrics(MetricMap),
527 TrBench(BenchSamples),
530 unsafe impl Send for TestResult {}
532 enum OutputLocation<T> {
533 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
537 struct ConsoleTestState<T> {
538 log_out: Option<File>,
539 out: OutputLocation<T>,
543 show_all_stats: bool,
550 failures: Vec<(TestDesc, Vec<u8> )> ,
551 max_name_len: uint, // number of columns to fill when aligning names
554 impl<T: Writer> ConsoleTestState<T> {
555 pub fn new(opts: &TestOpts,
556 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
557 let log_out = match opts.logfile {
558 Some(ref path) => Some(try!(File::create(path))),
561 let out = match term::stdout() {
562 None => Raw(io::stdio::stdout_raw()),
566 Ok(ConsoleTestState {
569 use_color: use_color(opts),
570 show_boxplot: opts.show_boxplot,
571 boxplot_width: opts.boxplot_width,
572 show_all_stats: opts.show_all_stats,
578 metrics: MetricMap::new(),
579 failures: Vec::new(),
584 pub fn write_ok(&mut self) -> io::IoResult<()> {
585 self.write_pretty("ok", term::color::GREEN)
588 pub fn write_failed(&mut self) -> io::IoResult<()> {
589 self.write_pretty("FAILED", term::color::RED)
592 pub fn write_ignored(&mut self) -> io::IoResult<()> {
593 self.write_pretty("ignored", term::color::YELLOW)
596 pub fn write_metric(&mut self) -> io::IoResult<()> {
597 self.write_pretty("metric", term::color::CYAN)
600 pub fn write_bench(&mut self) -> io::IoResult<()> {
601 self.write_pretty("bench", term::color::CYAN)
604 pub fn write_added(&mut self) -> io::IoResult<()> {
605 self.write_pretty("added", term::color::GREEN)
608 pub fn write_improved(&mut self) -> io::IoResult<()> {
609 self.write_pretty("improved", term::color::GREEN)
612 pub fn write_removed(&mut self) -> io::IoResult<()> {
613 self.write_pretty("removed", term::color::YELLOW)
616 pub fn write_regressed(&mut self) -> io::IoResult<()> {
617 self.write_pretty("regressed", term::color::RED)
620 pub fn write_pretty(&mut self,
622 color: term::color::Color) -> io::IoResult<()> {
624 Pretty(ref mut term) => {
626 try!(term.fg(color));
628 try!(term.write(word.as_bytes()));
634 Raw(ref mut stdout) => stdout.write(word.as_bytes())
638 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
640 Pretty(ref mut term) => term.write(s.as_bytes()),
641 Raw(ref mut stdout) => stdout.write(s.as_bytes())
645 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
647 let noun = if len != 1 { "tests" } else { "test" };
648 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
651 pub fn write_test_start(&mut self, test: &TestDesc,
652 align: NamePadding) -> io::IoResult<()> {
653 let name = test.padded_name(self.max_name_len, align);
654 self.write_plain(format!("test {} ... ", name).as_slice())
657 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
659 TrOk => self.write_ok(),
660 TrFailed => self.write_failed(),
661 TrIgnored => self.write_ignored(),
662 TrMetrics(ref mm) => {
663 try!(self.write_metric());
664 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
667 try!(self.write_bench());
669 if self.show_boxplot {
670 let mut wr = Vec::new();
672 try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
674 let s = String::from_utf8(wr).unwrap();
676 try!(self.write_plain(format!(": {}", s).as_slice()));
679 if self.show_all_stats {
680 let mut wr = Vec::new();
682 try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
684 let s = String::from_utf8(wr).unwrap();
686 try!(self.write_plain(format!(": {}", s).as_slice()));
688 try!(self.write_plain(format!(": {}",
689 fmt_bench_samples(bs)).as_slice()));
695 self.write_plain("\n")
698 pub fn write_log(&mut self, test: &TestDesc,
699 result: &TestResult) -> io::IoResult<()> {
703 let s = format!("{} {}\n", match *result {
704 TrOk => "ok".to_string(),
705 TrFailed => "failed".to_string(),
706 TrIgnored => "ignored".to_string(),
707 TrMetrics(ref mm) => fmt_metrics(mm),
708 TrBench(ref bs) => fmt_bench_samples(bs)
709 }, test.name.as_slice());
710 o.write(s.as_bytes())
715 pub fn write_failures(&mut self) -> io::IoResult<()> {
716 try!(self.write_plain("\nfailures:\n"));
717 let mut failures = Vec::new();
718 let mut fail_out = String::new();
719 for &(ref f, ref stdout) in self.failures.iter() {
720 failures.push(f.name.to_string());
721 if stdout.len() > 0 {
722 fail_out.push_str(format!("---- {} stdout ----\n\t",
723 f.name.as_slice()).as_slice());
724 let output = String::from_utf8_lossy(stdout.as_slice());
725 fail_out.push_str(output.as_slice());
726 fail_out.push_str("\n");
729 if fail_out.len() > 0 {
730 try!(self.write_plain("\n"));
731 try!(self.write_plain(fail_out.as_slice()));
734 try!(self.write_plain("\nfailures:\n"));
736 for name in failures.iter() {
737 try!(self.write_plain(format!(" {}\n",
738 name.as_slice()).as_slice()));
743 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
745 let mut improved = 0u;
746 let mut regressed = 0u;
748 let mut removed = 0u;
750 for (k, v) in diff.iter() {
752 LikelyNoise => noise += 1,
755 try!(self.write_added());
756 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
760 try!(self.write_removed());
761 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
763 Improvement(pct) => {
765 try!(self.write_plain(format!(": {} ", *k).as_slice()));
766 try!(self.write_improved());
767 try!(self.write_plain(format!(" by {:.2}%\n",
768 pct as f64).as_slice()));
772 try!(self.write_plain(format!(": {} ", *k).as_slice()));
773 try!(self.write_regressed());
774 try!(self.write_plain(format!(" by {:.2}%\n",
775 pct as f64).as_slice()));
779 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
780 {} removed, {} improved, {} regressed, \
782 added, removed, improved, regressed,
785 try!(self.write_plain("updated ratchet file\n"));
787 try!(self.write_plain("left ratchet file untouched\n"));
792 pub fn write_run_finish(&mut self,
793 ratchet_metrics: &Option<Path>,
794 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
795 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
797 let ratchet_success = match *ratchet_metrics {
800 try!(self.write_plain(format!("\nusing metrics ratchet: {:?}\n",
801 pth.display()).as_slice()));
805 try!(self.write_plain(format!("with noise-tolerance \
809 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
810 try!(self.write_metric_diff(&diff));
815 let test_success = self.failed == 0u;
817 try!(self.write_failures());
820 let success = ratchet_success && test_success;
822 try!(self.write_plain("\ntest result: "));
824 // There's no parallelism at this point so it's safe to use color
825 try!(self.write_ok());
827 try!(self.write_failed());
829 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
830 self.passed, self.failed, self.ignored, self.measured);
831 try!(self.write_plain(s.as_slice()));
836 pub fn fmt_metrics(mm: &MetricMap) -> String {
837 let MetricMap(ref mm) = *mm;
838 let v : Vec<String> = mm.iter()
839 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
840 v.value as f64, v.noise as f64))
845 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
847 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
848 bs.ns_iter_summ.median as uint,
849 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
852 format!("{:>9} ns/iter (+/- {})",
853 bs.ns_iter_summ.median as uint,
854 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
858 // A simple console test runner
859 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
861 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
862 match (*event).clone() {
863 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
864 TeWait(ref test, padding) => st.write_test_start(test, padding),
865 TeResult(test, result, stdout) => {
866 try!(st.write_log(&test, &result));
867 try!(st.write_result(&result));
869 TrOk => st.passed += 1,
870 TrIgnored => st.ignored += 1,
872 let tname = test.name.as_slice();
873 let MetricMap(mm) = mm;
874 for (k,v) in mm.iter() {
876 .insert_metric(format!("{}.{}",
885 st.metrics.insert_metric(test.name.as_slice(),
886 bs.ns_iter_summ.median,
887 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
892 st.failures.push((test, stdout));
900 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
901 fn len_if_padded(t: &TestDescAndFn) -> uint {
902 match t.testfn.padding() {
904 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
907 match tests.iter().max_by(|t|len_if_padded(*t)) {
909 let n = t.desc.name.as_slice();
910 st.max_name_len = n.len();
914 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
915 match opts.save_metrics {
918 try!(st.metrics.save(pth));
919 try!(st.write_plain(format!("\nmetrics saved to: {:?}",
920 pth.display()).as_slice()));
923 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
927 fn should_sort_failures_before_printing_them() {
928 let test_a = TestDesc {
929 name: StaticTestName("a"),
931 should_fail: ShouldFail::No
934 let test_b = TestDesc {
935 name: StaticTestName("b"),
937 should_fail: ShouldFail::No
940 let mut st = ConsoleTestState {
942 out: Raw(Vec::new()),
946 show_all_stats: false,
953 metrics: MetricMap::new(),
954 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
957 st.write_failures().unwrap();
958 let s = match st.out {
959 Raw(ref m) => String::from_utf8_lossy(&m[]),
960 Pretty(_) => unreachable!()
963 let apos = s.find_str("a").unwrap();
964 let bpos = s.find_str("b").unwrap();
965 assert!(apos < bpos);
968 fn use_color(opts: &TestOpts) -> bool {
970 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
978 TeFiltered(Vec<TestDesc> ),
979 TeWait(TestDesc, NamePadding),
980 TeResult(TestDesc, TestResult, Vec<u8> ),
983 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
986 fn run_tests<F>(opts: &TestOpts,
987 tests: Vec<TestDescAndFn> ,
988 mut callback: F) -> io::IoResult<()> where
989 F: FnMut(TestEvent) -> io::IoResult<()>,
991 let filtered_tests = filter_tests(opts, tests);
992 let filtered_descs = filtered_tests.iter()
993 .map(|t| t.desc.clone())
996 try!(callback(TeFiltered(filtered_descs)));
998 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
999 filtered_tests.into_iter().partition(|e| {
1001 StaticTestFn(_) | DynTestFn(_) => true,
1006 // It's tempting to just spawn all the tests at once, but since we have
1007 // many tests that run in other processes we would be making a big mess.
1008 let concurrency = get_concurrency();
1010 let mut remaining = filtered_tests;
1011 remaining.reverse();
1012 let mut pending = 0;
1014 let (tx, rx) = channel::<MonitorMsg>();
1016 while pending > 0 || !remaining.is_empty() {
1017 while pending < concurrency && !remaining.is_empty() {
1018 let test = remaining.pop().unwrap();
1019 if concurrency == 1 {
1020 // We are doing one test at a time so we can print the name
1021 // of the test before we run it. Useful for debugging tests
1022 // that hang forever.
1023 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
1025 run_test(opts, !opts.run_tests, test, tx.clone());
1029 let (desc, result, stdout) = rx.recv().unwrap();
1030 if concurrency != 1 {
1031 try!(callback(TeWait(desc.clone(), PadNone)));
1033 try!(callback(TeResult(desc, result, stdout)));
1037 // All benchmarks run at the end, in serial.
1038 // (this includes metric fns)
1039 for b in filtered_benchs_and_metrics.into_iter() {
1040 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
1041 run_test(opts, !opts.run_benchmarks, b, tx.clone());
1042 let (test, result, stdout) = rx.recv().unwrap();
1043 try!(callback(TeResult(test, result, stdout)));
1048 fn get_concurrency() -> uint {
1050 match os::getenv("RUST_TEST_TASKS") {
1052 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
1054 Some(n) if n > 0 => n,
1055 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
1059 rt::default_sched_threads()
1064 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1065 let mut filtered = tests;
1067 // Remove tests that don't match the test filter
1068 filtered = match opts.filter {
1071 filtered.into_iter()
1072 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
1076 // Maybe pull out the ignored test and unignore them
1077 filtered = if !opts.run_ignored {
1080 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1081 if test.desc.ignore {
1082 let TestDescAndFn {desc, testfn} = test;
1083 Some(TestDescAndFn {
1084 desc: TestDesc {ignore: false, ..desc},
1091 filtered.into_iter().filter_map(|x| filter(x)).collect()
1094 // Sort the tests alphabetically
1095 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1097 // Shard the remaining tests, if sharding requested.
1098 match opts.test_shard {
1101 filtered.into_iter().enumerate()
1102 // note: using a - 1 so that the valid shards, for example, are
1103 // 1.2 and 2.2 instead of 0.2 and 1.2
1104 .filter(|&(i,_)| i % b == (a - 1))
1111 pub fn run_test(opts: &TestOpts,
1113 test: TestDescAndFn,
1114 monitor_ch: Sender<MonitorMsg>) {
1116 let TestDescAndFn {desc, testfn} = test;
1118 if force_ignore || desc.ignore {
1119 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1123 fn run_test_inner(desc: TestDesc,
1124 monitor_ch: Sender<MonitorMsg>,
1127 Thread::spawn(move || {
1128 let (tx, rx) = channel();
1129 let mut reader = ChanReader::new(rx);
1130 let stdout = ChanWriter::new(tx.clone());
1131 let stderr = ChanWriter::new(tx);
1132 let mut cfg = thread::Builder::new().name(match desc.name {
1133 DynTestName(ref name) => name.clone().to_string(),
1134 StaticTestName(name) => name.to_string(),
1137 drop((stdout, stderr));
1139 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
1140 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
1143 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
1144 let stdout = reader.read_to_end().unwrap().into_iter().collect();
1145 let test_result = calc_result(&desc, result_guard.join());
1146 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1151 DynBenchFn(bencher) => {
1152 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1153 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1156 StaticBenchFn(benchfn) => {
1157 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1158 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1162 let mut mm = MetricMap::new();
1164 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1167 StaticMetricFn(f) => {
1168 let mut mm = MetricMap::new();
1170 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1173 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1174 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1175 Thunk::new(move|| f()))
1179 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1180 match (&desc.should_fail, task_result) {
1181 (&ShouldFail::No, Ok(())) |
1182 (&ShouldFail::Yes(None), Err(_)) => TrOk,
1183 (&ShouldFail::Yes(Some(msg)), Err(ref err))
1184 if err.downcast_ref::<String>()
1186 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1187 .map(|e| e.contains(msg))
1188 .unwrap_or(false) => TrOk,
1195 pub fn new() -> MetricMap {
1196 MetricMap(BTreeMap::new())
1199 /// Load MetricDiff from a file.
1203 /// This function will panic if the path does not exist or the path does not
1204 /// contain a valid metric map.
1205 pub fn load(p: &Path) -> MetricMap {
1206 assert!(p.exists());
1207 let mut f = File::open(p).unwrap();
1208 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1209 let mut decoder = json::Decoder::new(value);
1210 MetricMap(match Decodable::decode(&mut decoder) {
1212 Err(e) => panic!("failure decoding JSON: {:?}", e)
1216 /// Write MetricDiff to a file.
1217 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1218 let mut file = try!(File::create(p));
1219 let MetricMap(ref map) = *self;
1220 write!(&mut file, "{}", json::as_json(map))
1223 /// Compare against another MetricMap. Optionally compare all
1224 /// measurements in the maps using the provided `noise_pct` as a
1225 /// percentage of each value to consider noise. If `None`, each
1226 /// measurement's noise threshold is independently chosen as the
1227 /// maximum of that measurement's recorded noise quantity in either
1229 pub fn compare_to_old(&self, old: &MetricMap,
1230 noise_pct: Option<f64>) -> MetricDiff {
1231 let mut diff : MetricDiff = BTreeMap::new();
1232 let MetricMap(ref selfmap) = *self;
1233 let MetricMap(ref old) = *old;
1234 for (k, vold) in old.iter() {
1235 let r = match selfmap.get(k) {
1236 None => MetricRemoved,
1238 let delta = v.value - vold.value;
1239 let noise = match noise_pct {
1240 None => vold.noise.abs().max(v.noise.abs()),
1241 Some(pct) => vold.value * pct / 100.0
1243 if delta.abs() <= noise {
1246 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1247 if vold.noise < 0.0 {
1248 // When 'noise' is negative, it means we want
1249 // to see deltas that go up over time, and can
1250 // only tolerate slight negative movement.
1257 // When 'noise' is positive, it means we want
1258 // to see deltas that go down over time, and
1259 // can only tolerate slight positive movements.
1269 diff.insert((*k).clone(), r);
1271 let MetricMap(ref map) = *self;
1272 for (k, _) in map.iter() {
1273 if !diff.contains_key(k) {
1274 diff.insert((*k).clone(), MetricAdded);
1280 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1281 /// must be non-negative. The `noise` indicates the uncertainty of the
1282 /// metric, which doubles as the "noise range" of acceptable
1283 /// pairwise-regressions on this named value, when comparing from one
1284 /// metric to the next using `compare_to_old`.
1286 /// If `noise` is positive, then it means this metric is of a value
1287 /// you want to see grow smaller, so a change larger than `noise` in the
1288 /// positive direction represents a regression.
1290 /// If `noise` is negative, then it means this metric is of a value
1291 /// you want to see grow larger, so a change larger than `noise` in the
1292 /// negative direction represents a regression.
1293 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1298 let MetricMap(ref mut map) = *self;
1299 map.insert(name.to_string(), m);
1302 /// Attempt to "ratchet" an external metric file. This involves loading
1303 /// metrics from a metric file (if it exists), comparing against
1304 /// the metrics in `self` using `compare_to_old`, and rewriting the
1305 /// file to contain the metrics in `self` if none of the
1306 /// `MetricChange`s are `Regression`. Returns the diff as well
1307 /// as a boolean indicating whether the ratchet succeeded.
1308 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1309 let old = if p.exists() {
1315 let diff : MetricDiff = self.compare_to_old(&old, pct);
1316 let ok = diff.iter().all(|(_, v)| {
1318 Regression(_) => false,
1324 self.save(p).unwrap();
1333 /// A function that is opaque to the optimizer, to allow benchmarks to
1334 /// pretend to use outputs to assist in avoiding dead-code
1337 /// This function is a no-op, and does not even read from `dummy`.
1338 pub fn black_box<T>(dummy: T) -> T {
1339 // we need to "use" the argument in some way LLVM can't
1341 unsafe {asm!("" : : "r"(&dummy))}
1347 /// Callback for benchmark functions to run in their body.
1348 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1349 self.dur = Duration::span(|| {
1350 let k = self.iterations;
1351 for _ in range(0u64, k) {
1357 pub fn ns_elapsed(&mut self) -> u64 {
1358 self.dur.num_nanoseconds().unwrap() as u64
1361 pub fn ns_per_iter(&mut self) -> u64 {
1362 if self.iterations == 0 {
1365 self.ns_elapsed() / cmp::max(self.iterations, 1)
1369 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1370 self.iterations = n;
1374 // This is a more statistics-driven benchmark algorithm
1375 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1376 // Initial bench run to get ballpark figure.
1378 self.bench_n(n, |x| f(x));
1380 // Try to estimate iter count for 1ms falling back to 1m
1381 // iterations if first run took < 1ns.
1382 if self.ns_per_iter() == 0 {
1385 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1387 // if the first run took more than 1ms we don't want to just
1388 // be left doing 0 iterations on every loop. The unfortunate
1389 // side effect of not being able to do as many runs is
1390 // automatically handled by the statistical analysis below
1391 // (i.e. larger error bars).
1392 if n == 0 { n = 1; }
1394 let mut total_run = Duration::nanoseconds(0);
1395 let samples : &mut [f64] = &mut [0.0_f64; 50];
1397 let mut summ = None;
1398 let mut summ5 = None;
1400 let loop_run = Duration::span(|| {
1402 for p in samples.iter_mut() {
1403 self.bench_n(n, |x| f(x));
1404 *p = self.ns_per_iter() as f64;
1407 stats::winsorize(samples, 5.0);
1408 summ = Some(stats::Summary::new(samples));
1410 for p in samples.iter_mut() {
1411 self.bench_n(5 * n, |x| f(x));
1412 *p = self.ns_per_iter() as f64;
1415 stats::winsorize(samples, 5.0);
1416 summ5 = Some(stats::Summary::new(samples));
1418 let summ = summ.unwrap();
1419 let summ5 = summ5.unwrap();
1421 // If we've run for 100ms and seem to have converged to a
1423 if loop_run.num_milliseconds() > 100 &&
1424 summ.median_abs_dev_pct < 1.0 &&
1425 summ.median - summ5.median < summ5.median_abs_dev {
1429 total_run = total_run + loop_run;
1430 // Longest we ever run for is 3s.
1431 if total_run.num_seconds() > 3 {
1442 use std::time::Duration;
1443 use super::{Bencher, BenchSamples};
1445 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1446 let mut bs = Bencher {
1448 dur: Duration::nanoseconds(0),
1452 let ns_iter_summ = bs.auto_bench(f);
1454 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1455 let iter_s = 1_000_000_000 / ns_iter;
1456 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1459 ns_iter_summ: ns_iter_summ,
1467 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1468 TestDesc, TestDescAndFn, TestOpts, run_test,
1469 Metric, MetricMap, MetricAdded, MetricRemoved,
1470 Improvement, Regression, LikelyNoise,
1471 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1472 use std::io::TempDir;
1473 use std::thunk::Thunk;
1474 use std::sync::mpsc::channel;
1477 pub fn do_not_run_ignored_tests() {
1478 fn f() { panic!(); }
1479 let desc = TestDescAndFn {
1481 name: StaticTestName("whatever"),
1483 should_fail: ShouldFail::No,
1485 testfn: DynTestFn(Thunk::new(move|| f())),
1487 let (tx, rx) = channel();
1488 run_test(&TestOpts::new(), false, desc, tx);
1489 let (_, res, _) = rx.recv().unwrap();
1490 assert!(res != TrOk);
1494 pub fn ignored_tests_result_in_ignored() {
1496 let desc = TestDescAndFn {
1498 name: StaticTestName("whatever"),
1500 should_fail: ShouldFail::No,
1502 testfn: DynTestFn(Thunk::new(move|| f())),
1504 let (tx, rx) = channel();
1505 run_test(&TestOpts::new(), false, desc, tx);
1506 let (_, res, _) = rx.recv().unwrap();
1507 assert!(res == TrIgnored);
1511 fn test_should_fail() {
1512 fn f() { panic!(); }
1513 let desc = TestDescAndFn {
1515 name: StaticTestName("whatever"),
1517 should_fail: ShouldFail::Yes(None)
1519 testfn: DynTestFn(Thunk::new(move|| f())),
1521 let (tx, rx) = channel();
1522 run_test(&TestOpts::new(), false, desc, tx);
1523 let (_, res, _) = rx.recv().unwrap();
1524 assert!(res == TrOk);
1528 fn test_should_fail_good_message() {
1529 fn f() { panic!("an error message"); }
1530 let desc = TestDescAndFn {
1532 name: StaticTestName("whatever"),
1534 should_fail: ShouldFail::Yes(Some("error message"))
1536 testfn: DynTestFn(Thunk::new(move|| f())),
1538 let (tx, rx) = channel();
1539 run_test(&TestOpts::new(), false, desc, tx);
1540 let (_, res, _) = rx.recv().unwrap();
1541 assert!(res == TrOk);
1545 fn test_should_fail_bad_message() {
1546 fn f() { panic!("an error message"); }
1547 let desc = TestDescAndFn {
1549 name: StaticTestName("whatever"),
1551 should_fail: ShouldFail::Yes(Some("foobar"))
1553 testfn: DynTestFn(Thunk::new(move|| f())),
1555 let (tx, rx) = channel();
1556 run_test(&TestOpts::new(), false, desc, tx);
1557 let (_, res, _) = rx.recv().unwrap();
1558 assert!(res == TrFailed);
1562 fn test_should_fail_but_succeeds() {
1564 let desc = TestDescAndFn {
1566 name: StaticTestName("whatever"),
1568 should_fail: ShouldFail::Yes(None)
1570 testfn: DynTestFn(Thunk::new(move|| f())),
1572 let (tx, rx) = channel();
1573 run_test(&TestOpts::new(), false, desc, tx);
1574 let (_, res, _) = rx.recv().unwrap();
1575 assert!(res == TrFailed);
1579 fn first_free_arg_should_be_a_filter() {
1580 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1581 let opts = match parse_opts(args.as_slice()) {
1583 _ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
1585 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1589 fn parse_ignored_flag() {
1590 let args = vec!("progname".to_string(),
1591 "filter".to_string(),
1592 "--ignored".to_string());
1593 let opts = match parse_opts(args.as_slice()) {
1595 _ => panic!("Malformed arg in parse_ignored_flag")
1597 assert!((opts.run_ignored));
1601 pub fn filter_for_ignored_option() {
1602 // When we run ignored tests the test filter should filter out all the
1603 // unignored tests and flip the ignore flag on the rest to false
1605 let mut opts = TestOpts::new();
1606 opts.run_tests = true;
1607 opts.run_ignored = true;
1612 name: StaticTestName("1"),
1614 should_fail: ShouldFail::No,
1616 testfn: DynTestFn(Thunk::new(move|| {})),
1620 name: StaticTestName("2"),
1622 should_fail: ShouldFail::No,
1624 testfn: DynTestFn(Thunk::new(move|| {})),
1626 let filtered = filter_tests(&opts, tests);
1628 assert_eq!(filtered.len(), 1);
1629 assert_eq!(filtered[0].desc.name.to_string(),
1631 assert!(filtered[0].desc.ignore == false);
1635 pub fn sort_tests() {
1636 let mut opts = TestOpts::new();
1637 opts.run_tests = true;
1640 vec!("sha1::test".to_string(),
1641 "int::test_to_str".to_string(),
1642 "int::test_pow".to_string(),
1643 "test::do_not_run_ignored_tests".to_string(),
1644 "test::ignored_tests_result_in_ignored".to_string(),
1645 "test::first_free_arg_should_be_a_filter".to_string(),
1646 "test::parse_ignored_flag".to_string(),
1647 "test::filter_for_ignored_option".to_string(),
1648 "test::sort_tests".to_string());
1652 let mut tests = Vec::new();
1653 for name in names.iter() {
1654 let test = TestDescAndFn {
1656 name: DynTestName((*name).clone()),
1658 should_fail: ShouldFail::No,
1660 testfn: DynTestFn(Thunk::new(testfn)),
1666 let filtered = filter_tests(&opts, tests);
1669 vec!("int::test_pow".to_string(),
1670 "int::test_to_str".to_string(),
1671 "sha1::test".to_string(),
1672 "test::do_not_run_ignored_tests".to_string(),
1673 "test::filter_for_ignored_option".to_string(),
1674 "test::first_free_arg_should_be_a_filter".to_string(),
1675 "test::ignored_tests_result_in_ignored".to_string(),
1676 "test::parse_ignored_flag".to_string(),
1677 "test::sort_tests".to_string());
1679 for (a, b) in expected.iter().zip(filtered.iter()) {
1680 assert!(*a == b.desc.name.to_string());
1685 pub fn filter_tests_regex() {
1686 let mut opts = TestOpts::new();
1687 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1689 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1690 "no::XYZ", "no::abc"];
1694 let tests = names.iter().map(|name| {
1697 name: DynTestName(name.to_string()),
1699 should_fail: ShouldFail::No,
1701 testfn: DynTestFn(Thunk::new(test_fn))
1704 let filtered = filter_tests(&opts, tests);
1706 let expected: Vec<&str> =
1707 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1709 assert_eq!(filtered.len(), expected.len());
1710 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1711 assert_eq!(test.desc.name.as_slice(), *expected_name);
1716 pub fn test_metricmap_compare() {
1717 let mut m1 = MetricMap::new();
1718 let mut m2 = MetricMap::new();
1719 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1720 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1722 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1723 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1725 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1726 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1728 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1729 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1731 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1732 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1734 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1735 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1737 let diff1 = m2.compare_to_old(&m1, None);
1739 assert_eq!(*(diff1.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1740 assert_eq!(*(diff1.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1741 assert_eq!(*(diff1.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1742 assert_eq!(*(diff1.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1744 assert_eq!(*(diff1.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1746 assert_eq!(*(diff1.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1748 assert_eq!(*(diff1.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1749 Improvement(100.0));
1750 assert_eq!(diff1.len(), 7);
1752 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1754 assert_eq!(*(diff2.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1755 assert_eq!(*(diff2.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1756 assert_eq!(*(diff2.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1757 assert_eq!(*(diff2.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1759 assert_eq!(*(diff2.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1761 assert_eq!(*(diff2.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1763 assert_eq!(*(diff2.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1765 assert_eq!(diff2.len(), 7);
1769 pub fn ratchet_test() {
1771 let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
1772 let pth = dpth.path().join("ratchet.json");
1774 let mut m1 = MetricMap::new();
1775 m1.insert_metric("runtime", 1000.0, 2.0);
1776 m1.insert_metric("throughput", 50.0, 2.0);
1778 let mut m2 = MetricMap::new();
1779 m2.insert_metric("runtime", 1100.0, 2.0);
1780 m2.insert_metric("throughput", 50.0, 2.0);
1782 m1.save(&pth).unwrap();
1784 // Ask for a ratchet that should fail to advance.
1785 let (diff1, ok1) = m2.ratchet(&pth, None);
1786 assert_eq!(ok1, false);
1787 assert_eq!(diff1.len(), 2);
1788 assert_eq!(*(diff1.get(&"runtime".to_string()).unwrap()), Regression(10.0));
1789 assert_eq!(*(diff1.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1791 // Check that it was not rewritten.
1792 let m3 = MetricMap::load(&pth);
1793 let MetricMap(m3) = m3;
1794 assert_eq!(m3.len(), 2);
1795 assert_eq!(*(m3.get(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1796 assert_eq!(*(m3.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1798 // Ask for a ratchet with an explicit noise-percentage override,
1799 // that should advance.
1800 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1801 assert_eq!(ok2, true);
1802 assert_eq!(diff2.len(), 2);
1803 assert_eq!(*(diff2.get(&"runtime".to_string()).unwrap()), LikelyNoise);
1804 assert_eq!(*(diff2.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1806 // Check that it was rewritten.
1807 let m4 = MetricMap::load(&pth);
1808 let MetricMap(m4) = m4;
1809 assert_eq!(m4.len(), 2);
1810 assert_eq!(*(m4.get(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1811 assert_eq!(*(m4.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));