1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![allow(unknown_features)]
35 #![feature(asm, slicing_syntax)]
36 #![feature(box_syntax)]
37 #![allow(unknown_features)] #![feature(int_uint)]
42 extern crate serialize;
43 extern crate "serialize" as rustc_serialize;
46 pub use self::TestFn::*;
47 pub use self::MetricChange::*;
48 pub use self::ColorConfig::*;
49 pub use self::TestResult::*;
50 pub use self::TestName::*;
51 use self::TestEvent::*;
52 use self::NamePadding::*;
53 use self::OutputLocation::*;
56 use getopts::{OptGroup, optflag, optopt};
58 use serialize::{json, Decodable, Encodable};
60 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
64 use std::collections::BTreeMap;
68 use std::io::fs::PathExtensions;
69 use std::io::stdio::StdWriter;
70 use std::io::{File, ChanReader, ChanWriter};
72 use std::iter::repeat;
73 use std::num::{Float, Int};
75 use std::str::FromStr;
76 use std::sync::mpsc::{channel, Sender};
77 use std::thread::{self, Thread};
78 use std::thunk::{Thunk, Invoke};
79 use std::time::Duration;
81 // to be used by rustc to compile tests in libtest
83 pub use {Bencher, TestName, TestResult, TestDesc,
84 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
85 Metric, MetricMap, MetricAdded, MetricRemoved,
86 MetricChange, Improvement, Regression, LikelyNoise,
87 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
88 run_test, test_main, test_main_static, filter_tests,
89 parse_opts, StaticBenchFn, ShouldFail};
94 // The name of a test. By convention this follows the rules for rust
95 // paths; i.e. it should be a series of identifiers separated by double
96 // colons. This way if some test runner wants to arrange the tests
97 // hierarchically it may.
99 #[derive(Clone, PartialEq, Eq, Hash, Show)]
101 StaticTestName(&'static str),
105 fn as_slice<'a>(&'a self) -> &'a str {
107 StaticTestName(s) => s,
108 DynTestName(ref s) => s.as_slice()
112 impl fmt::String for TestName {
113 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
114 fmt::String::fmt(self.as_slice(), f)
118 #[derive(Clone, Copy)]
126 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
127 let mut name = String::from_str(self.name.as_slice());
128 let fill = column_count.saturating_sub(name.len());
129 let mut pad = repeat(" ").take(fill).collect::<String>();
133 pad.push_str(name.as_slice());
137 name.push_str(pad.as_slice());
144 /// Represents a benchmark function.
145 pub trait TDynBenchFn {
146 fn run(&self, harness: &mut Bencher);
149 // A function that runs a test. If the function returns successfully,
150 // the test succeeds; if the function panics then the test fails. We
151 // may need to come up with a more clever definition of test in order
152 // to support isolation of tests into tasks.
155 StaticBenchFn(fn(&mut Bencher)),
156 StaticMetricFn(fn(&mut MetricMap)),
158 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
159 DynBenchFn(Box<TDynBenchFn+'static>)
163 fn padding(&self) -> NamePadding {
165 &StaticTestFn(..) => PadNone,
166 &StaticBenchFn(..) => PadOnRight,
167 &StaticMetricFn(..) => PadOnRight,
168 &DynTestFn(..) => PadNone,
169 &DynMetricFn(..) => PadOnRight,
170 &DynBenchFn(..) => PadOnRight,
175 impl fmt::Show for TestFn {
176 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
177 f.write_str(match *self {
178 StaticTestFn(..) => "StaticTestFn(..)",
179 StaticBenchFn(..) => "StaticBenchFn(..)",
180 StaticMetricFn(..) => "StaticMetricFn(..)",
181 DynTestFn(..) => "DynTestFn(..)",
182 DynMetricFn(..) => "DynMetricFn(..)",
183 DynBenchFn(..) => "DynBenchFn(..)"
188 /// Manager of the benchmarking runs.
190 /// This is feed into functions marked with `#[bench]` to allow for
191 /// set-up & tear-down before running a piece of code repeatedly via a
200 #[derive(Copy, Clone, Show, PartialEq, Eq, Hash)]
201 pub enum ShouldFail {
203 Yes(Option<&'static str>)
206 // The definition of a single test. A test runner will run a list of
208 #[derive(Clone, Show, PartialEq, Eq, Hash)]
209 pub struct TestDesc {
212 pub should_fail: ShouldFail,
215 unsafe impl Send for TestDesc {}
218 pub struct TestDescAndFn {
223 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
230 pub fn new(value: f64, noise: f64) -> Metric {
231 Metric {value: value, noise: noise}
236 pub struct MetricMap(BTreeMap<String,Metric>);
238 impl Clone for MetricMap {
239 fn clone(&self) -> MetricMap {
240 let MetricMap(ref map) = *self;
241 MetricMap(map.clone())
245 /// Analysis of a single change in metric
246 #[derive(Copy, PartialEq, Show)]
247 pub enum MetricChange {
255 pub type MetricDiff = BTreeMap<String,MetricChange>;
257 // The default console test runner. It accepts the command line
258 // arguments and a vector of test_descs.
259 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
261 match parse_opts(args) {
263 Some(Err(msg)) => panic!("{:?}", msg),
266 match run_tests_console(&opts, tests) {
268 Ok(false) => panic!("Some tests failed"),
269 Err(e) => panic!("io error when running tests: {:?}", e),
273 // A variant optimized for invocation with a static test vector.
274 // This will panic (intentionally) when fed any dynamic tests, because
275 // it is copying the static values out into a dynamic vector and cannot
276 // copy dynamic values. It is doing this because from this point on
277 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
278 // semantics into parallel test runners, which in turn requires a ~[]
279 // rather than a &[].
280 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
281 let owned_tests = tests.iter().map(|t| {
283 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
284 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
285 _ => panic!("non-static tests passed to test::test_main_static")
288 test_main(args, owned_tests)
292 pub enum ColorConfig {
298 pub struct TestOpts {
299 pub filter: Option<Regex>,
300 pub run_ignored: bool,
302 pub run_benchmarks: bool,
303 pub ratchet_metrics: Option<Path>,
304 pub ratchet_noise_percent: Option<f64>,
305 pub save_metrics: Option<Path>,
306 pub test_shard: Option<(uint,uint)>,
307 pub logfile: Option<Path>,
309 pub color: ColorConfig,
310 pub show_boxplot: bool,
311 pub boxplot_width: uint,
312 pub show_all_stats: bool,
317 fn new() -> TestOpts {
322 run_benchmarks: false,
323 ratchet_metrics: None,
324 ratchet_noise_percent: None,
332 show_all_stats: false,
337 /// Result of parsing the options.
338 pub type OptRes = Result<TestOpts, String>;
340 fn optgroups() -> Vec<getopts::OptGroup> {
341 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
342 getopts::optflag("", "test", "Run tests and not benchmarks"),
343 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
344 getopts::optflag("h", "help", "Display this message (longer with --help)"),
345 getopts::optopt("", "save-metrics", "Location to save bench metrics",
347 getopts::optopt("", "ratchet-metrics",
348 "Location to load and save metrics from. The metrics \
349 loaded are cause benchmarks to fail if they run too \
351 getopts::optopt("", "ratchet-noise-percent",
352 "Tests within N% of the recorded metrics will be \
353 considered as passing", "PERCENTAGE"),
354 getopts::optopt("", "logfile", "Write logs to the specified file instead \
356 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
358 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
359 task, allow printing directly"),
360 getopts::optopt("", "color", "Configure coloring of output:
361 auto = colorize if stdout is a tty and tests are run on serially (default);
362 always = always colorize output;
363 never = never colorize output;", "auto|always|never"),
364 getopts::optflag("", "boxplot", "Display a boxplot of the benchmark statistics"),
365 getopts::optopt("", "boxplot-width", "Set the boxplot width (default 50)", "WIDTH"),
366 getopts::optflag("", "stats", "Display the benchmark min, max, and quartiles"))
369 fn usage(binary: &str) {
370 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
373 The FILTER regex is tested against the name of all tests to run, and
374 only those tests that match are run.
376 By default, all tests are run in parallel. This can be altered with the
377 RUST_TEST_TASKS environment variable when running tests (set it to 1).
379 All tests have their standard output and standard error captured by default.
380 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
381 environment variable. Logging is not captured by default.
385 #[test] - Indicates a function is a test to be run. This function
387 #[bench] - Indicates a function is a benchmark to be run. This
388 function takes one argument (test::Bencher).
389 #[should_fail] - This function (also labeled with #[test]) will only pass if
390 the code causes a failure (an assertion failure or panic!)
391 A message may be provided, which the failure string must
392 contain: #[should_fail(expected = "foo")].
393 #[ignore] - When applied to a function which is already attributed as a
394 test, then the test runner will ignore these tests during
395 normal test runs. Running with --ignored will run these
397 usage = getopts::usage(message.as_slice(),
398 optgroups().as_slice()));
401 // Parses command line arguments into test options
402 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
403 let args_ = args.tail();
405 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
407 Err(f) => return Some(Err(f.to_string()))
410 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
412 let filter = if matches.free.len() > 0 {
413 let s = matches.free[0].as_slice();
414 match Regex::new(s) {
416 Err(e) => return Some(Err(format!("could not parse /{}/: {:?}", s, e)))
422 let run_ignored = matches.opt_present("ignored");
424 let logfile = matches.opt_str("logfile");
425 let logfile = logfile.map(|s| Path::new(s));
427 let run_benchmarks = matches.opt_present("bench");
428 let run_tests = ! run_benchmarks ||
429 matches.opt_present("test");
431 let ratchet_metrics = matches.opt_str("ratchet-metrics");
432 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
434 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
435 let ratchet_noise_percent =
436 ratchet_noise_percent.map(|s| s.as_slice().parse::<f64>().unwrap());
438 let save_metrics = matches.opt_str("save-metrics");
439 let save_metrics = save_metrics.map(|s| Path::new(s));
441 let test_shard = matches.opt_str("test-shard");
442 let test_shard = opt_shard(test_shard);
444 let mut nocapture = matches.opt_present("nocapture");
446 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
449 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
450 Some("auto") | None => AutoColor,
451 Some("always") => AlwaysColor,
452 Some("never") => NeverColor,
454 Some(v) => return Some(Err(format!("argument for --color must be \
455 auto, always, or never (was {})",
459 let show_boxplot = matches.opt_present("boxplot");
460 let boxplot_width = match matches.opt_str("boxplot-width") {
462 match FromStr::from_str(width.as_slice()) {
463 Some(width) => width,
465 return Some(Err(format!("argument for --boxplot-width must be a uint")));
472 let show_all_stats = matches.opt_present("stats");
474 let test_opts = TestOpts {
476 run_ignored: run_ignored,
477 run_tests: run_tests,
478 run_benchmarks: run_benchmarks,
479 ratchet_metrics: ratchet_metrics,
480 ratchet_noise_percent: ratchet_noise_percent,
481 save_metrics: save_metrics,
482 test_shard: test_shard,
484 nocapture: nocapture,
486 show_boxplot: show_boxplot,
487 boxplot_width: boxplot_width,
488 show_all_stats: show_all_stats,
494 pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> {
498 let mut it = s.split('.');
499 match (it.next().and_then(|s| s.parse::<uint>()),
500 it.next().and_then(|s| s.parse::<uint>()),
502 (Some(a), Some(b), None) => {
504 panic!("tried to run shard {a}.{b}, but {a} is out of bounds \
505 (should be between 1 and {b}", a=a, b=b)
516 #[derive(Clone, PartialEq)]
517 pub struct BenchSamples {
518 ns_iter_summ: stats::Summary<f64>,
522 #[derive(Clone, PartialEq)]
523 pub enum TestResult {
527 TrMetrics(MetricMap),
528 TrBench(BenchSamples),
531 unsafe impl Send for TestResult {}
533 enum OutputLocation<T> {
534 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
538 struct ConsoleTestState<T> {
539 log_out: Option<File>,
540 out: OutputLocation<T>,
544 show_all_stats: bool,
551 failures: Vec<(TestDesc, Vec<u8> )> ,
552 max_name_len: uint, // number of columns to fill when aligning names
555 impl<T: Writer> ConsoleTestState<T> {
556 pub fn new(opts: &TestOpts,
557 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
558 let log_out = match opts.logfile {
559 Some(ref path) => Some(try!(File::create(path))),
562 let out = match term::stdout() {
563 None => Raw(io::stdio::stdout_raw()),
567 Ok(ConsoleTestState {
570 use_color: use_color(opts),
571 show_boxplot: opts.show_boxplot,
572 boxplot_width: opts.boxplot_width,
573 show_all_stats: opts.show_all_stats,
579 metrics: MetricMap::new(),
580 failures: Vec::new(),
585 pub fn write_ok(&mut self) -> io::IoResult<()> {
586 self.write_pretty("ok", term::color::GREEN)
589 pub fn write_failed(&mut self) -> io::IoResult<()> {
590 self.write_pretty("FAILED", term::color::RED)
593 pub fn write_ignored(&mut self) -> io::IoResult<()> {
594 self.write_pretty("ignored", term::color::YELLOW)
597 pub fn write_metric(&mut self) -> io::IoResult<()> {
598 self.write_pretty("metric", term::color::CYAN)
601 pub fn write_bench(&mut self) -> io::IoResult<()> {
602 self.write_pretty("bench", term::color::CYAN)
605 pub fn write_added(&mut self) -> io::IoResult<()> {
606 self.write_pretty("added", term::color::GREEN)
609 pub fn write_improved(&mut self) -> io::IoResult<()> {
610 self.write_pretty("improved", term::color::GREEN)
613 pub fn write_removed(&mut self) -> io::IoResult<()> {
614 self.write_pretty("removed", term::color::YELLOW)
617 pub fn write_regressed(&mut self) -> io::IoResult<()> {
618 self.write_pretty("regressed", term::color::RED)
621 pub fn write_pretty(&mut self,
623 color: term::color::Color) -> io::IoResult<()> {
625 Pretty(ref mut term) => {
627 try!(term.fg(color));
629 try!(term.write(word.as_bytes()));
635 Raw(ref mut stdout) => stdout.write(word.as_bytes())
639 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
641 Pretty(ref mut term) => term.write(s.as_bytes()),
642 Raw(ref mut stdout) => stdout.write(s.as_bytes())
646 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
648 let noun = if len != 1 { "tests" } else { "test" };
649 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
652 pub fn write_test_start(&mut self, test: &TestDesc,
653 align: NamePadding) -> io::IoResult<()> {
654 let name = test.padded_name(self.max_name_len, align);
655 self.write_plain(format!("test {} ... ", name).as_slice())
658 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
660 TrOk => self.write_ok(),
661 TrFailed => self.write_failed(),
662 TrIgnored => self.write_ignored(),
663 TrMetrics(ref mm) => {
664 try!(self.write_metric());
665 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
668 try!(self.write_bench());
670 if self.show_boxplot {
671 let mut wr = Vec::new();
673 try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
675 let s = String::from_utf8(wr).unwrap();
677 try!(self.write_plain(format!(": {}", s).as_slice()));
680 if self.show_all_stats {
681 let mut wr = Vec::new();
683 try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
685 let s = String::from_utf8(wr).unwrap();
687 try!(self.write_plain(format!(": {}", s).as_slice()));
689 try!(self.write_plain(format!(": {}",
690 fmt_bench_samples(bs)).as_slice()));
696 self.write_plain("\n")
699 pub fn write_log(&mut self, test: &TestDesc,
700 result: &TestResult) -> io::IoResult<()> {
704 let s = format!("{} {}\n", match *result {
705 TrOk => "ok".to_string(),
706 TrFailed => "failed".to_string(),
707 TrIgnored => "ignored".to_string(),
708 TrMetrics(ref mm) => fmt_metrics(mm),
709 TrBench(ref bs) => fmt_bench_samples(bs)
710 }, test.name.as_slice());
711 o.write(s.as_bytes())
716 pub fn write_failures(&mut self) -> io::IoResult<()> {
717 try!(self.write_plain("\nfailures:\n"));
718 let mut failures = Vec::new();
719 let mut fail_out = String::new();
720 for &(ref f, ref stdout) in self.failures.iter() {
721 failures.push(f.name.to_string());
722 if stdout.len() > 0 {
723 fail_out.push_str(format!("---- {} stdout ----\n\t",
724 f.name.as_slice()).as_slice());
725 let output = String::from_utf8_lossy(stdout.as_slice());
726 fail_out.push_str(output.as_slice());
727 fail_out.push_str("\n");
730 if fail_out.len() > 0 {
731 try!(self.write_plain("\n"));
732 try!(self.write_plain(fail_out.as_slice()));
735 try!(self.write_plain("\nfailures:\n"));
737 for name in failures.iter() {
738 try!(self.write_plain(format!(" {}\n",
739 name.as_slice()).as_slice()));
744 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
746 let mut improved = 0u;
747 let mut regressed = 0u;
749 let mut removed = 0u;
751 for (k, v) in diff.iter() {
753 LikelyNoise => noise += 1,
756 try!(self.write_added());
757 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
761 try!(self.write_removed());
762 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
764 Improvement(pct) => {
766 try!(self.write_plain(format!(": {} ", *k).as_slice()));
767 try!(self.write_improved());
768 try!(self.write_plain(format!(" by {:.2}%\n",
769 pct as f64).as_slice()));
773 try!(self.write_plain(format!(": {} ", *k).as_slice()));
774 try!(self.write_regressed());
775 try!(self.write_plain(format!(" by {:.2}%\n",
776 pct as f64).as_slice()));
780 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
781 {} removed, {} improved, {} regressed, \
783 added, removed, improved, regressed,
786 try!(self.write_plain("updated ratchet file\n"));
788 try!(self.write_plain("left ratchet file untouched\n"));
793 pub fn write_run_finish(&mut self,
794 ratchet_metrics: &Option<Path>,
795 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
796 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
798 let ratchet_success = match *ratchet_metrics {
801 try!(self.write_plain(format!("\nusing metrics ratchet: {:?}\n",
802 pth.display()).as_slice()));
806 try!(self.write_plain(format!("with noise-tolerance \
810 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
811 try!(self.write_metric_diff(&diff));
816 let test_success = self.failed == 0u;
818 try!(self.write_failures());
821 let success = ratchet_success && test_success;
823 try!(self.write_plain("\ntest result: "));
825 // There's no parallelism at this point so it's safe to use color
826 try!(self.write_ok());
828 try!(self.write_failed());
830 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
831 self.passed, self.failed, self.ignored, self.measured);
832 try!(self.write_plain(s.as_slice()));
837 pub fn fmt_metrics(mm: &MetricMap) -> String {
838 let MetricMap(ref mm) = *mm;
839 let v : Vec<String> = mm.iter()
840 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
841 v.value as f64, v.noise as f64))
846 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
848 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
849 bs.ns_iter_summ.median as uint,
850 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
853 format!("{:>9} ns/iter (+/- {})",
854 bs.ns_iter_summ.median as uint,
855 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
859 // A simple console test runner
860 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
862 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
863 match (*event).clone() {
864 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
865 TeWait(ref test, padding) => st.write_test_start(test, padding),
866 TeResult(test, result, stdout) => {
867 try!(st.write_log(&test, &result));
868 try!(st.write_result(&result));
870 TrOk => st.passed += 1,
871 TrIgnored => st.ignored += 1,
873 let tname = test.name.as_slice();
874 let MetricMap(mm) = mm;
875 for (k,v) in mm.iter() {
877 .insert_metric(format!("{}.{}",
886 st.metrics.insert_metric(test.name.as_slice(),
887 bs.ns_iter_summ.median,
888 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
893 st.failures.push((test, stdout));
901 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
902 fn len_if_padded(t: &TestDescAndFn) -> uint {
903 match t.testfn.padding() {
905 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
908 match tests.iter().max_by(|t|len_if_padded(*t)) {
910 let n = t.desc.name.as_slice();
911 st.max_name_len = n.len();
915 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
916 match opts.save_metrics {
919 try!(st.metrics.save(pth));
920 try!(st.write_plain(format!("\nmetrics saved to: {:?}",
921 pth.display()).as_slice()));
924 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
928 fn should_sort_failures_before_printing_them() {
929 let test_a = TestDesc {
930 name: StaticTestName("a"),
932 should_fail: ShouldFail::No
935 let test_b = TestDesc {
936 name: StaticTestName("b"),
938 should_fail: ShouldFail::No
941 let mut st = ConsoleTestState {
943 out: Raw(Vec::new()),
947 show_all_stats: false,
954 metrics: MetricMap::new(),
955 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
958 st.write_failures().unwrap();
959 let s = match st.out {
960 Raw(ref m) => String::from_utf8_lossy(&m[]),
961 Pretty(_) => unreachable!()
964 let apos = s.find_str("a").unwrap();
965 let bpos = s.find_str("b").unwrap();
966 assert!(apos < bpos);
969 fn use_color(opts: &TestOpts) -> bool {
971 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
979 TeFiltered(Vec<TestDesc> ),
980 TeWait(TestDesc, NamePadding),
981 TeResult(TestDesc, TestResult, Vec<u8> ),
984 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
987 fn run_tests<F>(opts: &TestOpts,
988 tests: Vec<TestDescAndFn> ,
989 mut callback: F) -> io::IoResult<()> where
990 F: FnMut(TestEvent) -> io::IoResult<()>,
992 let filtered_tests = filter_tests(opts, tests);
993 let filtered_descs = filtered_tests.iter()
994 .map(|t| t.desc.clone())
997 try!(callback(TeFiltered(filtered_descs)));
999 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
1000 filtered_tests.into_iter().partition(|e| {
1002 StaticTestFn(_) | DynTestFn(_) => true,
1007 // It's tempting to just spawn all the tests at once, but since we have
1008 // many tests that run in other processes we would be making a big mess.
1009 let concurrency = get_concurrency();
1011 let mut remaining = filtered_tests;
1012 remaining.reverse();
1013 let mut pending = 0;
1015 let (tx, rx) = channel::<MonitorMsg>();
1017 while pending > 0 || !remaining.is_empty() {
1018 while pending < concurrency && !remaining.is_empty() {
1019 let test = remaining.pop().unwrap();
1020 if concurrency == 1 {
1021 // We are doing one test at a time so we can print the name
1022 // of the test before we run it. Useful for debugging tests
1023 // that hang forever.
1024 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
1026 run_test(opts, !opts.run_tests, test, tx.clone());
1030 let (desc, result, stdout) = rx.recv().unwrap();
1031 if concurrency != 1 {
1032 try!(callback(TeWait(desc.clone(), PadNone)));
1034 try!(callback(TeResult(desc, result, stdout)));
1038 // All benchmarks run at the end, in serial.
1039 // (this includes metric fns)
1040 for b in filtered_benchs_and_metrics.into_iter() {
1041 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
1042 run_test(opts, !opts.run_benchmarks, b, tx.clone());
1043 let (test, result, stdout) = rx.recv().unwrap();
1044 try!(callback(TeResult(test, result, stdout)));
1049 fn get_concurrency() -> uint {
1051 match os::getenv("RUST_TEST_TASKS") {
1053 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
1055 Some(n) if n > 0 => n,
1056 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
1060 rt::default_sched_threads()
1065 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1066 let mut filtered = tests;
1068 // Remove tests that don't match the test filter
1069 filtered = match opts.filter {
1072 filtered.into_iter()
1073 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
1077 // Maybe pull out the ignored test and unignore them
1078 filtered = if !opts.run_ignored {
1081 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1082 if test.desc.ignore {
1083 let TestDescAndFn {desc, testfn} = test;
1084 Some(TestDescAndFn {
1085 desc: TestDesc {ignore: false, ..desc},
1092 filtered.into_iter().filter_map(|x| filter(x)).collect()
1095 // Sort the tests alphabetically
1096 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1098 // Shard the remaining tests, if sharding requested.
1099 match opts.test_shard {
1102 filtered.into_iter().enumerate()
1103 // note: using a - 1 so that the valid shards, for example, are
1104 // 1.2 and 2.2 instead of 0.2 and 1.2
1105 .filter(|&(i,_)| i % b == (a - 1))
1112 pub fn run_test(opts: &TestOpts,
1114 test: TestDescAndFn,
1115 monitor_ch: Sender<MonitorMsg>) {
1117 let TestDescAndFn {desc, testfn} = test;
1119 if force_ignore || desc.ignore {
1120 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1124 fn run_test_inner(desc: TestDesc,
1125 monitor_ch: Sender<MonitorMsg>,
1128 Thread::spawn(move || {
1129 let (tx, rx) = channel();
1130 let mut reader = ChanReader::new(rx);
1131 let stdout = ChanWriter::new(tx.clone());
1132 let stderr = ChanWriter::new(tx);
1133 let mut cfg = thread::Builder::new().name(match desc.name {
1134 DynTestName(ref name) => name.clone().to_string(),
1135 StaticTestName(name) => name.to_string(),
1138 drop((stdout, stderr));
1140 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
1141 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
1144 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
1145 let stdout = reader.read_to_end().unwrap().into_iter().collect();
1146 let test_result = calc_result(&desc, result_guard.join());
1147 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1152 DynBenchFn(bencher) => {
1153 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1154 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1157 StaticBenchFn(benchfn) => {
1158 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1159 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1163 let mut mm = MetricMap::new();
1165 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1168 StaticMetricFn(f) => {
1169 let mut mm = MetricMap::new();
1171 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1174 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1175 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1176 Thunk::new(move|| f()))
1180 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1181 match (&desc.should_fail, task_result) {
1182 (&ShouldFail::No, Ok(())) |
1183 (&ShouldFail::Yes(None), Err(_)) => TrOk,
1184 (&ShouldFail::Yes(Some(msg)), Err(ref err))
1185 if err.downcast_ref::<String>()
1187 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1188 .map(|e| e.contains(msg))
1189 .unwrap_or(false) => TrOk,
1196 pub fn new() -> MetricMap {
1197 MetricMap(BTreeMap::new())
1200 /// Load MetricDiff from a file.
1204 /// This function will panic if the path does not exist or the path does not
1205 /// contain a valid metric map.
1206 pub fn load(p: &Path) -> MetricMap {
1207 assert!(p.exists());
1208 let mut f = File::open(p).unwrap();
1209 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1210 let mut decoder = json::Decoder::new(value);
1211 MetricMap(match Decodable::decode(&mut decoder) {
1213 Err(e) => panic!("failure decoding JSON: {:?}", e)
1217 /// Write MetricDiff to a file.
1218 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1219 let mut file = try!(File::create(p));
1220 let MetricMap(ref map) = *self;
1221 write!(&mut file, "{}", json::as_json(map))
1224 /// Compare against another MetricMap. Optionally compare all
1225 /// measurements in the maps using the provided `noise_pct` as a
1226 /// percentage of each value to consider noise. If `None`, each
1227 /// measurement's noise threshold is independently chosen as the
1228 /// maximum of that measurement's recorded noise quantity in either
1230 pub fn compare_to_old(&self, old: &MetricMap,
1231 noise_pct: Option<f64>) -> MetricDiff {
1232 let mut diff : MetricDiff = BTreeMap::new();
1233 let MetricMap(ref selfmap) = *self;
1234 let MetricMap(ref old) = *old;
1235 for (k, vold) in old.iter() {
1236 let r = match selfmap.get(k) {
1237 None => MetricRemoved,
1239 let delta = v.value - vold.value;
1240 let noise = match noise_pct {
1241 None => vold.noise.abs().max(v.noise.abs()),
1242 Some(pct) => vold.value * pct / 100.0
1244 if delta.abs() <= noise {
1247 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1248 if vold.noise < 0.0 {
1249 // When 'noise' is negative, it means we want
1250 // to see deltas that go up over time, and can
1251 // only tolerate slight negative movement.
1258 // When 'noise' is positive, it means we want
1259 // to see deltas that go down over time, and
1260 // can only tolerate slight positive movements.
1270 diff.insert((*k).clone(), r);
1272 let MetricMap(ref map) = *self;
1273 for (k, _) in map.iter() {
1274 if !diff.contains_key(k) {
1275 diff.insert((*k).clone(), MetricAdded);
1281 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1282 /// must be non-negative. The `noise` indicates the uncertainty of the
1283 /// metric, which doubles as the "noise range" of acceptable
1284 /// pairwise-regressions on this named value, when comparing from one
1285 /// metric to the next using `compare_to_old`.
1287 /// If `noise` is positive, then it means this metric is of a value
1288 /// you want to see grow smaller, so a change larger than `noise` in the
1289 /// positive direction represents a regression.
1291 /// If `noise` is negative, then it means this metric is of a value
1292 /// you want to see grow larger, so a change larger than `noise` in the
1293 /// negative direction represents a regression.
1294 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1299 let MetricMap(ref mut map) = *self;
1300 map.insert(name.to_string(), m);
1303 /// Attempt to "ratchet" an external metric file. This involves loading
1304 /// metrics from a metric file (if it exists), comparing against
1305 /// the metrics in `self` using `compare_to_old`, and rewriting the
1306 /// file to contain the metrics in `self` if none of the
1307 /// `MetricChange`s are `Regression`. Returns the diff as well
1308 /// as a boolean indicating whether the ratchet succeeded.
1309 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1310 let old = if p.exists() {
1316 let diff : MetricDiff = self.compare_to_old(&old, pct);
1317 let ok = diff.iter().all(|(_, v)| {
1319 Regression(_) => false,
1325 self.save(p).unwrap();
1334 /// A function that is opaque to the optimizer, to allow benchmarks to
1335 /// pretend to use outputs to assist in avoiding dead-code
1338 /// This function is a no-op, and does not even read from `dummy`.
1339 pub fn black_box<T>(dummy: T) -> T {
1340 // we need to "use" the argument in some way LLVM can't
1342 unsafe {asm!("" : : "r"(&dummy))}
1348 /// Callback for benchmark functions to run in their body.
1349 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1350 self.dur = Duration::span(|| {
1351 let k = self.iterations;
1352 for _ in range(0u64, k) {
1358 pub fn ns_elapsed(&mut self) -> u64 {
1359 self.dur.num_nanoseconds().unwrap() as u64
1362 pub fn ns_per_iter(&mut self) -> u64 {
1363 if self.iterations == 0 {
1366 self.ns_elapsed() / cmp::max(self.iterations, 1)
1370 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1371 self.iterations = n;
1375 // This is a more statistics-driven benchmark algorithm
1376 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1377 // Initial bench run to get ballpark figure.
1379 self.bench_n(n, |x| f(x));
1381 // Try to estimate iter count for 1ms falling back to 1m
1382 // iterations if first run took < 1ns.
1383 if self.ns_per_iter() == 0 {
1386 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1388 // if the first run took more than 1ms we don't want to just
1389 // be left doing 0 iterations on every loop. The unfortunate
1390 // side effect of not being able to do as many runs is
1391 // automatically handled by the statistical analysis below
1392 // (i.e. larger error bars).
1393 if n == 0 { n = 1; }
1395 let mut total_run = Duration::nanoseconds(0);
1396 let samples : &mut [f64] = &mut [0.0_f64; 50];
1398 let mut summ = None;
1399 let mut summ5 = None;
1401 let loop_run = Duration::span(|| {
1403 for p in samples.iter_mut() {
1404 self.bench_n(n, |x| f(x));
1405 *p = self.ns_per_iter() as f64;
1408 stats::winsorize(samples, 5.0);
1409 summ = Some(stats::Summary::new(samples));
1411 for p in samples.iter_mut() {
1412 self.bench_n(5 * n, |x| f(x));
1413 *p = self.ns_per_iter() as f64;
1416 stats::winsorize(samples, 5.0);
1417 summ5 = Some(stats::Summary::new(samples));
1419 let summ = summ.unwrap();
1420 let summ5 = summ5.unwrap();
1422 // If we've run for 100ms and seem to have converged to a
1424 if loop_run.num_milliseconds() > 100 &&
1425 summ.median_abs_dev_pct < 1.0 &&
1426 summ.median - summ5.median < summ5.median_abs_dev {
1430 total_run = total_run + loop_run;
1431 // Longest we ever run for is 3s.
1432 if total_run.num_seconds() > 3 {
1443 use std::time::Duration;
1444 use super::{Bencher, BenchSamples};
1446 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1447 let mut bs = Bencher {
1449 dur: Duration::nanoseconds(0),
1453 let ns_iter_summ = bs.auto_bench(f);
1455 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1456 let iter_s = 1_000_000_000 / ns_iter;
1457 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1460 ns_iter_summ: ns_iter_summ,
1468 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1469 TestDesc, TestDescAndFn, TestOpts, run_test,
1470 Metric, MetricMap, MetricAdded, MetricRemoved,
1471 Improvement, Regression, LikelyNoise,
1472 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1473 use std::io::TempDir;
1474 use std::thunk::Thunk;
1475 use std::sync::mpsc::channel;
1478 pub fn do_not_run_ignored_tests() {
1479 fn f() { panic!(); }
1480 let desc = TestDescAndFn {
1482 name: StaticTestName("whatever"),
1484 should_fail: ShouldFail::No,
1486 testfn: DynTestFn(Thunk::new(move|| f())),
1488 let (tx, rx) = channel();
1489 run_test(&TestOpts::new(), false, desc, tx);
1490 let (_, res, _) = rx.recv().unwrap();
1491 assert!(res != TrOk);
1495 pub fn ignored_tests_result_in_ignored() {
1497 let desc = TestDescAndFn {
1499 name: StaticTestName("whatever"),
1501 should_fail: ShouldFail::No,
1503 testfn: DynTestFn(Thunk::new(move|| f())),
1505 let (tx, rx) = channel();
1506 run_test(&TestOpts::new(), false, desc, tx);
1507 let (_, res, _) = rx.recv().unwrap();
1508 assert!(res == TrIgnored);
1512 fn test_should_fail() {
1513 fn f() { panic!(); }
1514 let desc = TestDescAndFn {
1516 name: StaticTestName("whatever"),
1518 should_fail: ShouldFail::Yes(None)
1520 testfn: DynTestFn(Thunk::new(move|| f())),
1522 let (tx, rx) = channel();
1523 run_test(&TestOpts::new(), false, desc, tx);
1524 let (_, res, _) = rx.recv().unwrap();
1525 assert!(res == TrOk);
1529 fn test_should_fail_good_message() {
1530 fn f() { panic!("an error message"); }
1531 let desc = TestDescAndFn {
1533 name: StaticTestName("whatever"),
1535 should_fail: ShouldFail::Yes(Some("error message"))
1537 testfn: DynTestFn(Thunk::new(move|| f())),
1539 let (tx, rx) = channel();
1540 run_test(&TestOpts::new(), false, desc, tx);
1541 let (_, res, _) = rx.recv().unwrap();
1542 assert!(res == TrOk);
1546 fn test_should_fail_bad_message() {
1547 fn f() { panic!("an error message"); }
1548 let desc = TestDescAndFn {
1550 name: StaticTestName("whatever"),
1552 should_fail: ShouldFail::Yes(Some("foobar"))
1554 testfn: DynTestFn(Thunk::new(move|| f())),
1556 let (tx, rx) = channel();
1557 run_test(&TestOpts::new(), false, desc, tx);
1558 let (_, res, _) = rx.recv().unwrap();
1559 assert!(res == TrFailed);
1563 fn test_should_fail_but_succeeds() {
1565 let desc = TestDescAndFn {
1567 name: StaticTestName("whatever"),
1569 should_fail: ShouldFail::Yes(None)
1571 testfn: DynTestFn(Thunk::new(move|| f())),
1573 let (tx, rx) = channel();
1574 run_test(&TestOpts::new(), false, desc, tx);
1575 let (_, res, _) = rx.recv().unwrap();
1576 assert!(res == TrFailed);
1580 fn first_free_arg_should_be_a_filter() {
1581 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1582 let opts = match parse_opts(args.as_slice()) {
1584 _ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
1586 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1590 fn parse_ignored_flag() {
1591 let args = vec!("progname".to_string(),
1592 "filter".to_string(),
1593 "--ignored".to_string());
1594 let opts = match parse_opts(args.as_slice()) {
1596 _ => panic!("Malformed arg in parse_ignored_flag")
1598 assert!((opts.run_ignored));
1602 pub fn filter_for_ignored_option() {
1603 // When we run ignored tests the test filter should filter out all the
1604 // unignored tests and flip the ignore flag on the rest to false
1606 let mut opts = TestOpts::new();
1607 opts.run_tests = true;
1608 opts.run_ignored = true;
1613 name: StaticTestName("1"),
1615 should_fail: ShouldFail::No,
1617 testfn: DynTestFn(Thunk::new(move|| {})),
1621 name: StaticTestName("2"),
1623 should_fail: ShouldFail::No,
1625 testfn: DynTestFn(Thunk::new(move|| {})),
1627 let filtered = filter_tests(&opts, tests);
1629 assert_eq!(filtered.len(), 1);
1630 assert_eq!(filtered[0].desc.name.to_string(),
1632 assert!(filtered[0].desc.ignore == false);
1636 pub fn sort_tests() {
1637 let mut opts = TestOpts::new();
1638 opts.run_tests = true;
1641 vec!("sha1::test".to_string(),
1642 "int::test_to_str".to_string(),
1643 "int::test_pow".to_string(),
1644 "test::do_not_run_ignored_tests".to_string(),
1645 "test::ignored_tests_result_in_ignored".to_string(),
1646 "test::first_free_arg_should_be_a_filter".to_string(),
1647 "test::parse_ignored_flag".to_string(),
1648 "test::filter_for_ignored_option".to_string(),
1649 "test::sort_tests".to_string());
1653 let mut tests = Vec::new();
1654 for name in names.iter() {
1655 let test = TestDescAndFn {
1657 name: DynTestName((*name).clone()),
1659 should_fail: ShouldFail::No,
1661 testfn: DynTestFn(Thunk::new(testfn)),
1667 let filtered = filter_tests(&opts, tests);
1670 vec!("int::test_pow".to_string(),
1671 "int::test_to_str".to_string(),
1672 "sha1::test".to_string(),
1673 "test::do_not_run_ignored_tests".to_string(),
1674 "test::filter_for_ignored_option".to_string(),
1675 "test::first_free_arg_should_be_a_filter".to_string(),
1676 "test::ignored_tests_result_in_ignored".to_string(),
1677 "test::parse_ignored_flag".to_string(),
1678 "test::sort_tests".to_string());
1680 for (a, b) in expected.iter().zip(filtered.iter()) {
1681 assert!(*a == b.desc.name.to_string());
1686 pub fn filter_tests_regex() {
1687 let mut opts = TestOpts::new();
1688 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1690 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1691 "no::XYZ", "no::abc"];
1695 let tests = names.iter().map(|name| {
1698 name: DynTestName(name.to_string()),
1700 should_fail: ShouldFail::No,
1702 testfn: DynTestFn(Thunk::new(test_fn))
1705 let filtered = filter_tests(&opts, tests);
1707 let expected: Vec<&str> =
1708 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1710 assert_eq!(filtered.len(), expected.len());
1711 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1712 assert_eq!(test.desc.name.as_slice(), *expected_name);
1717 pub fn test_metricmap_compare() {
1718 let mut m1 = MetricMap::new();
1719 let mut m2 = MetricMap::new();
1720 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1721 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1723 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1724 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1726 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1727 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1729 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1730 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1732 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1733 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1735 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1736 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1738 let diff1 = m2.compare_to_old(&m1, None);
1740 assert_eq!(*(diff1.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1741 assert_eq!(*(diff1.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1742 assert_eq!(*(diff1.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1743 assert_eq!(*(diff1.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1745 assert_eq!(*(diff1.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1747 assert_eq!(*(diff1.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1749 assert_eq!(*(diff1.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1750 Improvement(100.0));
1751 assert_eq!(diff1.len(), 7);
1753 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1755 assert_eq!(*(diff2.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1756 assert_eq!(*(diff2.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1757 assert_eq!(*(diff2.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1758 assert_eq!(*(diff2.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1760 assert_eq!(*(diff2.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1762 assert_eq!(*(diff2.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1764 assert_eq!(*(diff2.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1766 assert_eq!(diff2.len(), 7);
1770 pub fn ratchet_test() {
1772 let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
1773 let pth = dpth.path().join("ratchet.json");
1775 let mut m1 = MetricMap::new();
1776 m1.insert_metric("runtime", 1000.0, 2.0);
1777 m1.insert_metric("throughput", 50.0, 2.0);
1779 let mut m2 = MetricMap::new();
1780 m2.insert_metric("runtime", 1100.0, 2.0);
1781 m2.insert_metric("throughput", 50.0, 2.0);
1783 m1.save(&pth).unwrap();
1785 // Ask for a ratchet that should fail to advance.
1786 let (diff1, ok1) = m2.ratchet(&pth, None);
1787 assert_eq!(ok1, false);
1788 assert_eq!(diff1.len(), 2);
1789 assert_eq!(*(diff1.get(&"runtime".to_string()).unwrap()), Regression(10.0));
1790 assert_eq!(*(diff1.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1792 // Check that it was not rewritten.
1793 let m3 = MetricMap::load(&pth);
1794 let MetricMap(m3) = m3;
1795 assert_eq!(m3.len(), 2);
1796 assert_eq!(*(m3.get(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1797 assert_eq!(*(m3.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1799 // Ask for a ratchet with an explicit noise-percentage override,
1800 // that should advance.
1801 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1802 assert_eq!(ok2, true);
1803 assert_eq!(diff2.len(), 2);
1804 assert_eq!(*(diff2.get(&"runtime".to_string()).unwrap()), LikelyNoise);
1805 assert_eq!(*(diff2.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1807 // Check that it was rewritten.
1808 let m4 = MetricMap::load(&pth);
1809 let MetricMap(m4) = m4;
1810 assert_eq!(m4.len(), 2);
1811 assert_eq!(*(m4.get(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1812 assert_eq!(*(m4.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));