1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![allow(unknown_features)]
35 #![feature(asm, slicing_syntax)]
36 #![feature(box_syntax)]
37 #![allow(unknown_features)] #![feature(int_uint)]
41 extern crate serialize;
42 extern crate "serialize" as rustc_serialize;
45 pub use self::TestFn::*;
46 pub use self::MetricChange::*;
47 pub use self::ColorConfig::*;
48 pub use self::TestResult::*;
49 pub use self::TestName::*;
50 use self::TestEvent::*;
51 use self::NamePadding::*;
52 use self::OutputLocation::*;
55 use getopts::{OptGroup, optflag, optopt};
57 use serialize::{json, Decodable, Encodable};
59 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
63 use std::collections::BTreeMap;
67 use std::io::fs::PathExtensions;
68 use std::io::stdio::StdWriter;
69 use std::io::{File, ChanReader, ChanWriter};
71 use std::iter::repeat;
72 use std::num::{Float, Int};
74 use std::str::FromStr;
75 use std::sync::mpsc::{channel, Sender};
76 use std::thread::{self, Thread};
77 use std::thunk::{Thunk, Invoke};
78 use std::time::Duration;
80 // to be used by rustc to compile tests in libtest
82 pub use {Bencher, TestName, TestResult, TestDesc,
83 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
84 Metric, MetricMap, MetricAdded, MetricRemoved,
85 MetricChange, Improvement, Regression, LikelyNoise,
86 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
87 run_test, test_main, test_main_static, filter_tests,
88 parse_opts, StaticBenchFn, ShouldFail};
93 // The name of a test. By convention this follows the rules for rust
94 // paths; i.e. it should be a series of identifiers separated by double
95 // colons. This way if some test runner wants to arrange the tests
96 // hierarchically it may.
98 #[derive(Clone, PartialEq, Eq, Hash, Show)]
100 StaticTestName(&'static str),
104 fn as_slice<'a>(&'a self) -> &'a str {
106 StaticTestName(s) => s,
107 DynTestName(ref s) => s.as_slice()
111 impl fmt::String for TestName {
112 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
113 fmt::String::fmt(self.as_slice(), f)
117 #[derive(Clone, Copy)]
125 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
126 let mut name = String::from_str(self.name.as_slice());
127 let fill = column_count.saturating_sub(name.len());
128 let mut pad = repeat(" ").take(fill).collect::<String>();
132 pad.push_str(name.as_slice());
136 name.push_str(pad.as_slice());
143 /// Represents a benchmark function.
144 pub trait TDynBenchFn {
145 fn run(&self, harness: &mut Bencher);
148 // A function that runs a test. If the function returns successfully,
149 // the test succeeds; if the function panics then the test fails. We
150 // may need to come up with a more clever definition of test in order
151 // to support isolation of tests into tasks.
154 StaticBenchFn(fn(&mut Bencher)),
155 StaticMetricFn(fn(&mut MetricMap)),
157 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
158 DynBenchFn(Box<TDynBenchFn+'static>)
162 fn padding(&self) -> NamePadding {
164 &StaticTestFn(..) => PadNone,
165 &StaticBenchFn(..) => PadOnRight,
166 &StaticMetricFn(..) => PadOnRight,
167 &DynTestFn(..) => PadNone,
168 &DynMetricFn(..) => PadOnRight,
169 &DynBenchFn(..) => PadOnRight,
174 impl fmt::Show for TestFn {
175 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
176 f.write_str(match *self {
177 StaticTestFn(..) => "StaticTestFn(..)",
178 StaticBenchFn(..) => "StaticBenchFn(..)",
179 StaticMetricFn(..) => "StaticMetricFn(..)",
180 DynTestFn(..) => "DynTestFn(..)",
181 DynMetricFn(..) => "DynMetricFn(..)",
182 DynBenchFn(..) => "DynBenchFn(..)"
187 /// Manager of the benchmarking runs.
189 /// This is feed into functions marked with `#[bench]` to allow for
190 /// set-up & tear-down before running a piece of code repeatedly via a
199 #[derive(Copy, Clone, Show, PartialEq, Eq, Hash)]
200 pub enum ShouldFail {
202 Yes(Option<&'static str>)
205 // The definition of a single test. A test runner will run a list of
207 #[derive(Clone, Show, PartialEq, Eq, Hash)]
208 pub struct TestDesc {
211 pub should_fail: ShouldFail,
215 pub struct TestDescAndFn {
220 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
227 pub fn new(value: f64, noise: f64) -> Metric {
228 Metric {value: value, noise: noise}
233 pub struct MetricMap(BTreeMap<String,Metric>);
235 impl Clone for MetricMap {
236 fn clone(&self) -> MetricMap {
237 let MetricMap(ref map) = *self;
238 MetricMap(map.clone())
242 /// Analysis of a single change in metric
243 #[derive(Copy, PartialEq, Show)]
244 pub enum MetricChange {
252 pub type MetricDiff = BTreeMap<String,MetricChange>;
254 // The default console test runner. It accepts the command line
255 // arguments and a vector of test_descs.
256 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
258 match parse_opts(args) {
260 Some(Err(msg)) => panic!("{:?}", msg),
263 match run_tests_console(&opts, tests) {
265 Ok(false) => panic!("Some tests failed"),
266 Err(e) => panic!("io error when running tests: {:?}", e),
270 // A variant optimized for invocation with a static test vector.
271 // This will panic (intentionally) when fed any dynamic tests, because
272 // it is copying the static values out into a dynamic vector and cannot
273 // copy dynamic values. It is doing this because from this point on
274 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
275 // semantics into parallel test runners, which in turn requires a ~[]
276 // rather than a &[].
277 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
278 let owned_tests = tests.iter().map(|t| {
280 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
281 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
282 _ => panic!("non-static tests passed to test::test_main_static")
285 test_main(args, owned_tests)
289 pub enum ColorConfig {
295 pub struct TestOpts {
296 pub filter: Option<Regex>,
297 pub run_ignored: bool,
299 pub run_benchmarks: bool,
300 pub ratchet_metrics: Option<Path>,
301 pub ratchet_noise_percent: Option<f64>,
302 pub save_metrics: Option<Path>,
303 pub test_shard: Option<(uint,uint)>,
304 pub logfile: Option<Path>,
306 pub color: ColorConfig,
307 pub show_boxplot: bool,
308 pub boxplot_width: uint,
309 pub show_all_stats: bool,
314 fn new() -> TestOpts {
319 run_benchmarks: false,
320 ratchet_metrics: None,
321 ratchet_noise_percent: None,
329 show_all_stats: false,
334 /// Result of parsing the options.
335 pub type OptRes = Result<TestOpts, String>;
337 fn optgroups() -> Vec<getopts::OptGroup> {
338 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
339 getopts::optflag("", "test", "Run tests and not benchmarks"),
340 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
341 getopts::optflag("h", "help", "Display this message (longer with --help)"),
342 getopts::optopt("", "save-metrics", "Location to save bench metrics",
344 getopts::optopt("", "ratchet-metrics",
345 "Location to load and save metrics from. The metrics \
346 loaded are cause benchmarks to fail if they run too \
348 getopts::optopt("", "ratchet-noise-percent",
349 "Tests within N% of the recorded metrics will be \
350 considered as passing", "PERCENTAGE"),
351 getopts::optopt("", "logfile", "Write logs to the specified file instead \
353 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
355 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
356 task, allow printing directly"),
357 getopts::optopt("", "color", "Configure coloring of output:
358 auto = colorize if stdout is a tty and tests are run on serially (default);
359 always = always colorize output;
360 never = never colorize output;", "auto|always|never"),
361 getopts::optflag("", "boxplot", "Display a boxplot of the benchmark statistics"),
362 getopts::optopt("", "boxplot-width", "Set the boxplot width (default 50)", "WIDTH"),
363 getopts::optflag("", "stats", "Display the benchmark min, max, and quartiles"))
366 fn usage(binary: &str) {
367 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
370 The FILTER regex is tested against the name of all tests to run, and
371 only those tests that match are run.
373 By default, all tests are run in parallel. This can be altered with the
374 RUST_TEST_TASKS environment variable when running tests (set it to 1).
376 All tests have their standard output and standard error captured by default.
377 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
378 environment variable. Logging is not captured by default.
382 #[test] - Indicates a function is a test to be run. This function
384 #[bench] - Indicates a function is a benchmark to be run. This
385 function takes one argument (test::Bencher).
386 #[should_fail] - This function (also labeled with #[test]) will only pass if
387 the code causes a failure (an assertion failure or panic!)
388 A message may be provided, which the failure string must
389 contain: #[should_fail(expected = "foo")].
390 #[ignore] - When applied to a function which is already attributed as a
391 test, then the test runner will ignore these tests during
392 normal test runs. Running with --ignored will run these
394 usage = getopts::usage(message.as_slice(),
395 optgroups().as_slice()));
398 // Parses command line arguments into test options
399 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
400 let args_ = args.tail();
402 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
404 Err(f) => return Some(Err(f.to_string()))
407 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
409 let filter = if matches.free.len() > 0 {
410 let s = matches.free[0].as_slice();
411 match Regex::new(s) {
413 Err(e) => return Some(Err(format!("could not parse /{}/: {:?}", s, e)))
419 let run_ignored = matches.opt_present("ignored");
421 let logfile = matches.opt_str("logfile");
422 let logfile = logfile.map(|s| Path::new(s));
424 let run_benchmarks = matches.opt_present("bench");
425 let run_tests = ! run_benchmarks ||
426 matches.opt_present("test");
428 let ratchet_metrics = matches.opt_str("ratchet-metrics");
429 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
431 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
432 let ratchet_noise_percent =
433 ratchet_noise_percent.map(|s| s.as_slice().parse::<f64>().unwrap());
435 let save_metrics = matches.opt_str("save-metrics");
436 let save_metrics = save_metrics.map(|s| Path::new(s));
438 let test_shard = matches.opt_str("test-shard");
439 let test_shard = opt_shard(test_shard);
441 let mut nocapture = matches.opt_present("nocapture");
443 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
446 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
447 Some("auto") | None => AutoColor,
448 Some("always") => AlwaysColor,
449 Some("never") => NeverColor,
451 Some(v) => return Some(Err(format!("argument for --color must be \
452 auto, always, or never (was {})",
456 let show_boxplot = matches.opt_present("boxplot");
457 let boxplot_width = match matches.opt_str("boxplot-width") {
459 match FromStr::from_str(width.as_slice()) {
460 Some(width) => width,
462 return Some(Err(format!("argument for --boxplot-width must be a uint")));
469 let show_all_stats = matches.opt_present("stats");
471 let test_opts = TestOpts {
473 run_ignored: run_ignored,
474 run_tests: run_tests,
475 run_benchmarks: run_benchmarks,
476 ratchet_metrics: ratchet_metrics,
477 ratchet_noise_percent: ratchet_noise_percent,
478 save_metrics: save_metrics,
479 test_shard: test_shard,
481 nocapture: nocapture,
483 show_boxplot: show_boxplot,
484 boxplot_width: boxplot_width,
485 show_all_stats: show_all_stats,
491 pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> {
495 let mut it = s.split('.');
496 match (it.next().and_then(|s| s.parse::<uint>()),
497 it.next().and_then(|s| s.parse::<uint>()),
499 (Some(a), Some(b), None) => {
501 panic!("tried to run shard {a}.{b}, but {a} is out of bounds \
502 (should be between 1 and {b}", a=a, b=b)
513 #[derive(Clone, PartialEq)]
514 pub struct BenchSamples {
515 ns_iter_summ: stats::Summary<f64>,
519 #[derive(Clone, PartialEq)]
520 pub enum TestResult {
524 TrMetrics(MetricMap),
525 TrBench(BenchSamples),
528 enum OutputLocation<T> {
529 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
533 struct ConsoleTestState<T> {
534 log_out: Option<File>,
535 out: OutputLocation<T>,
539 show_all_stats: bool,
546 failures: Vec<(TestDesc, Vec<u8> )> ,
547 max_name_len: uint, // number of columns to fill when aligning names
550 impl<T: Writer> ConsoleTestState<T> {
551 pub fn new(opts: &TestOpts,
552 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
553 let log_out = match opts.logfile {
554 Some(ref path) => Some(try!(File::create(path))),
557 let out = match term::stdout() {
558 None => Raw(io::stdio::stdout_raw()),
562 Ok(ConsoleTestState {
565 use_color: use_color(opts),
566 show_boxplot: opts.show_boxplot,
567 boxplot_width: opts.boxplot_width,
568 show_all_stats: opts.show_all_stats,
574 metrics: MetricMap::new(),
575 failures: Vec::new(),
580 pub fn write_ok(&mut self) -> io::IoResult<()> {
581 self.write_pretty("ok", term::color::GREEN)
584 pub fn write_failed(&mut self) -> io::IoResult<()> {
585 self.write_pretty("FAILED", term::color::RED)
588 pub fn write_ignored(&mut self) -> io::IoResult<()> {
589 self.write_pretty("ignored", term::color::YELLOW)
592 pub fn write_metric(&mut self) -> io::IoResult<()> {
593 self.write_pretty("metric", term::color::CYAN)
596 pub fn write_bench(&mut self) -> io::IoResult<()> {
597 self.write_pretty("bench", term::color::CYAN)
600 pub fn write_added(&mut self) -> io::IoResult<()> {
601 self.write_pretty("added", term::color::GREEN)
604 pub fn write_improved(&mut self) -> io::IoResult<()> {
605 self.write_pretty("improved", term::color::GREEN)
608 pub fn write_removed(&mut self) -> io::IoResult<()> {
609 self.write_pretty("removed", term::color::YELLOW)
612 pub fn write_regressed(&mut self) -> io::IoResult<()> {
613 self.write_pretty("regressed", term::color::RED)
616 pub fn write_pretty(&mut self,
618 color: term::color::Color) -> io::IoResult<()> {
620 Pretty(ref mut term) => {
622 try!(term.fg(color));
624 try!(term.write(word.as_bytes()));
630 Raw(ref mut stdout) => stdout.write(word.as_bytes())
634 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
636 Pretty(ref mut term) => term.write(s.as_bytes()),
637 Raw(ref mut stdout) => stdout.write(s.as_bytes())
641 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
643 let noun = if len != 1 { "tests" } else { "test" };
644 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
647 pub fn write_test_start(&mut self, test: &TestDesc,
648 align: NamePadding) -> io::IoResult<()> {
649 let name = test.padded_name(self.max_name_len, align);
650 self.write_plain(format!("test {} ... ", name).as_slice())
653 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
655 TrOk => self.write_ok(),
656 TrFailed => self.write_failed(),
657 TrIgnored => self.write_ignored(),
658 TrMetrics(ref mm) => {
659 try!(self.write_metric());
660 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
663 try!(self.write_bench());
665 if self.show_boxplot {
666 let mut wr = Vec::new();
668 try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
670 let s = String::from_utf8(wr).unwrap();
672 try!(self.write_plain(format!(": {}", s).as_slice()));
675 if self.show_all_stats {
676 let mut wr = Vec::new();
678 try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
680 let s = String::from_utf8(wr).unwrap();
682 try!(self.write_plain(format!(": {}", s).as_slice()));
684 try!(self.write_plain(format!(": {}",
685 fmt_bench_samples(bs)).as_slice()));
691 self.write_plain("\n")
694 pub fn write_log(&mut self, test: &TestDesc,
695 result: &TestResult) -> io::IoResult<()> {
699 let s = format!("{} {}\n", match *result {
700 TrOk => "ok".to_string(),
701 TrFailed => "failed".to_string(),
702 TrIgnored => "ignored".to_string(),
703 TrMetrics(ref mm) => fmt_metrics(mm),
704 TrBench(ref bs) => fmt_bench_samples(bs)
705 }, test.name.as_slice());
706 o.write(s.as_bytes())
711 pub fn write_failures(&mut self) -> io::IoResult<()> {
712 try!(self.write_plain("\nfailures:\n"));
713 let mut failures = Vec::new();
714 let mut fail_out = String::new();
715 for &(ref f, ref stdout) in self.failures.iter() {
716 failures.push(f.name.to_string());
717 if stdout.len() > 0 {
718 fail_out.push_str(format!("---- {} stdout ----\n\t",
719 f.name.as_slice()).as_slice());
720 let output = String::from_utf8_lossy(stdout.as_slice());
721 fail_out.push_str(output.as_slice());
722 fail_out.push_str("\n");
725 if fail_out.len() > 0 {
726 try!(self.write_plain("\n"));
727 try!(self.write_plain(fail_out.as_slice()));
730 try!(self.write_plain("\nfailures:\n"));
732 for name in failures.iter() {
733 try!(self.write_plain(format!(" {}\n",
734 name.as_slice()).as_slice()));
739 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
741 let mut improved = 0u;
742 let mut regressed = 0u;
744 let mut removed = 0u;
746 for (k, v) in diff.iter() {
748 LikelyNoise => noise += 1,
751 try!(self.write_added());
752 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
756 try!(self.write_removed());
757 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
759 Improvement(pct) => {
761 try!(self.write_plain(format!(": {} ", *k).as_slice()));
762 try!(self.write_improved());
763 try!(self.write_plain(format!(" by {:.2}%\n",
764 pct as f64).as_slice()));
768 try!(self.write_plain(format!(": {} ", *k).as_slice()));
769 try!(self.write_regressed());
770 try!(self.write_plain(format!(" by {:.2}%\n",
771 pct as f64).as_slice()));
775 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
776 {} removed, {} improved, {} regressed, \
778 added, removed, improved, regressed,
781 try!(self.write_plain("updated ratchet file\n"));
783 try!(self.write_plain("left ratchet file untouched\n"));
788 pub fn write_run_finish(&mut self,
789 ratchet_metrics: &Option<Path>,
790 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
791 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
793 let ratchet_success = match *ratchet_metrics {
796 try!(self.write_plain(format!("\nusing metrics ratchet: {:?}\n",
797 pth.display()).as_slice()));
801 try!(self.write_plain(format!("with noise-tolerance \
805 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
806 try!(self.write_metric_diff(&diff));
811 let test_success = self.failed == 0u;
813 try!(self.write_failures());
816 let success = ratchet_success && test_success;
818 try!(self.write_plain("\ntest result: "));
820 // There's no parallelism at this point so it's safe to use color
821 try!(self.write_ok());
823 try!(self.write_failed());
825 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
826 self.passed, self.failed, self.ignored, self.measured);
827 try!(self.write_plain(s.as_slice()));
832 pub fn fmt_metrics(mm: &MetricMap) -> String {
833 let MetricMap(ref mm) = *mm;
834 let v : Vec<String> = mm.iter()
835 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
836 v.value as f64, v.noise as f64))
841 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
843 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
844 bs.ns_iter_summ.median as uint,
845 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
848 format!("{:>9} ns/iter (+/- {})",
849 bs.ns_iter_summ.median as uint,
850 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
854 // A simple console test runner
855 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
857 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
858 match (*event).clone() {
859 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
860 TeWait(ref test, padding) => st.write_test_start(test, padding),
861 TeResult(test, result, stdout) => {
862 try!(st.write_log(&test, &result));
863 try!(st.write_result(&result));
865 TrOk => st.passed += 1,
866 TrIgnored => st.ignored += 1,
868 let tname = test.name.as_slice();
869 let MetricMap(mm) = mm;
870 for (k,v) in mm.iter() {
872 .insert_metric(format!("{}.{}",
881 st.metrics.insert_metric(test.name.as_slice(),
882 bs.ns_iter_summ.median,
883 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
888 st.failures.push((test, stdout));
896 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
897 fn len_if_padded(t: &TestDescAndFn) -> uint {
898 match t.testfn.padding() {
900 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
903 match tests.iter().max_by(|t|len_if_padded(*t)) {
905 let n = t.desc.name.as_slice();
906 st.max_name_len = n.len();
910 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
911 match opts.save_metrics {
914 try!(st.metrics.save(pth));
915 try!(st.write_plain(format!("\nmetrics saved to: {:?}",
916 pth.display()).as_slice()));
919 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
923 fn should_sort_failures_before_printing_them() {
924 let test_a = TestDesc {
925 name: StaticTestName("a"),
927 should_fail: ShouldFail::No
930 let test_b = TestDesc {
931 name: StaticTestName("b"),
933 should_fail: ShouldFail::No
936 let mut st = ConsoleTestState {
938 out: Raw(Vec::new()),
942 show_all_stats: false,
949 metrics: MetricMap::new(),
950 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
953 st.write_failures().unwrap();
954 let s = match st.out {
955 Raw(ref m) => String::from_utf8_lossy(&m[]),
956 Pretty(_) => unreachable!()
959 let apos = s.find_str("a").unwrap();
960 let bpos = s.find_str("b").unwrap();
961 assert!(apos < bpos);
964 fn use_color(opts: &TestOpts) -> bool {
966 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
974 TeFiltered(Vec<TestDesc> ),
975 TeWait(TestDesc, NamePadding),
976 TeResult(TestDesc, TestResult, Vec<u8> ),
979 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
981 unsafe impl Send for MonitorMsg {}
983 fn run_tests<F>(opts: &TestOpts,
984 tests: Vec<TestDescAndFn> ,
985 mut callback: F) -> io::IoResult<()> where
986 F: FnMut(TestEvent) -> io::IoResult<()>,
988 let filtered_tests = filter_tests(opts, tests);
989 let filtered_descs = filtered_tests.iter()
990 .map(|t| t.desc.clone())
993 try!(callback(TeFiltered(filtered_descs)));
995 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
996 filtered_tests.into_iter().partition(|e| {
998 StaticTestFn(_) | DynTestFn(_) => true,
1003 // It's tempting to just spawn all the tests at once, but since we have
1004 // many tests that run in other processes we would be making a big mess.
1005 let concurrency = get_concurrency();
1007 let mut remaining = filtered_tests;
1008 remaining.reverse();
1009 let mut pending = 0;
1011 let (tx, rx) = channel::<MonitorMsg>();
1013 while pending > 0 || !remaining.is_empty() {
1014 while pending < concurrency && !remaining.is_empty() {
1015 let test = remaining.pop().unwrap();
1016 if concurrency == 1 {
1017 // We are doing one test at a time so we can print the name
1018 // of the test before we run it. Useful for debugging tests
1019 // that hang forever.
1020 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
1022 run_test(opts, !opts.run_tests, test, tx.clone());
1026 let (desc, result, stdout) = rx.recv().unwrap();
1027 if concurrency != 1 {
1028 try!(callback(TeWait(desc.clone(), PadNone)));
1030 try!(callback(TeResult(desc, result, stdout)));
1034 // All benchmarks run at the end, in serial.
1035 // (this includes metric fns)
1036 for b in filtered_benchs_and_metrics.into_iter() {
1037 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
1038 run_test(opts, !opts.run_benchmarks, b, tx.clone());
1039 let (test, result, stdout) = rx.recv().unwrap();
1040 try!(callback(TeResult(test, result, stdout)));
1045 fn get_concurrency() -> uint {
1047 match os::getenv("RUST_TEST_TASKS") {
1049 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
1051 Some(n) if n > 0 => n,
1052 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
1056 rt::default_sched_threads()
1061 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1062 let mut filtered = tests;
1064 // Remove tests that don't match the test filter
1065 filtered = match opts.filter {
1068 filtered.into_iter()
1069 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
1073 // Maybe pull out the ignored test and unignore them
1074 filtered = if !opts.run_ignored {
1077 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1078 if test.desc.ignore {
1079 let TestDescAndFn {desc, testfn} = test;
1080 Some(TestDescAndFn {
1081 desc: TestDesc {ignore: false, ..desc},
1088 filtered.into_iter().filter_map(|x| filter(x)).collect()
1091 // Sort the tests alphabetically
1092 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1094 // Shard the remaining tests, if sharding requested.
1095 match opts.test_shard {
1098 filtered.into_iter().enumerate()
1099 // note: using a - 1 so that the valid shards, for example, are
1100 // 1.2 and 2.2 instead of 0.2 and 1.2
1101 .filter(|&(i,_)| i % b == (a - 1))
1108 pub fn run_test(opts: &TestOpts,
1110 test: TestDescAndFn,
1111 monitor_ch: Sender<MonitorMsg>) {
1113 let TestDescAndFn {desc, testfn} = test;
1115 if force_ignore || desc.ignore {
1116 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1120 fn run_test_inner(desc: TestDesc,
1121 monitor_ch: Sender<MonitorMsg>,
1124 Thread::spawn(move || {
1125 let (tx, rx) = channel();
1126 let mut reader = ChanReader::new(rx);
1127 let stdout = ChanWriter::new(tx.clone());
1128 let stderr = ChanWriter::new(tx);
1129 let mut cfg = thread::Builder::new().name(match desc.name {
1130 DynTestName(ref name) => name.clone().to_string(),
1131 StaticTestName(name) => name.to_string(),
1134 drop((stdout, stderr));
1136 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
1137 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
1140 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
1141 let stdout = reader.read_to_end().unwrap().into_iter().collect();
1142 let test_result = calc_result(&desc, result_guard.join());
1143 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1148 DynBenchFn(bencher) => {
1149 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1150 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1153 StaticBenchFn(benchfn) => {
1154 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1155 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1159 let mut mm = MetricMap::new();
1161 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1164 StaticMetricFn(f) => {
1165 let mut mm = MetricMap::new();
1167 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1170 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1171 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1172 Thunk::new(move|| f()))
1176 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1177 match (&desc.should_fail, task_result) {
1178 (&ShouldFail::No, Ok(())) |
1179 (&ShouldFail::Yes(None), Err(_)) => TrOk,
1180 (&ShouldFail::Yes(Some(msg)), Err(ref err))
1181 if err.downcast_ref::<String>()
1183 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1184 .map(|e| e.contains(msg))
1185 .unwrap_or(false) => TrOk,
1192 pub fn new() -> MetricMap {
1193 MetricMap(BTreeMap::new())
1196 /// Load MetricDiff from a file.
1200 /// This function will panic if the path does not exist or the path does not
1201 /// contain a valid metric map.
1202 pub fn load(p: &Path) -> MetricMap {
1203 assert!(p.exists());
1204 let mut f = File::open(p).unwrap();
1205 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1206 let mut decoder = json::Decoder::new(value);
1207 MetricMap(match Decodable::decode(&mut decoder) {
1209 Err(e) => panic!("failure decoding JSON: {:?}", e)
1213 /// Write MetricDiff to a file.
1214 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1215 let mut file = try!(File::create(p));
1216 let MetricMap(ref map) = *self;
1217 write!(&mut file, "{}", json::as_json(map))
1220 /// Compare against another MetricMap. Optionally compare all
1221 /// measurements in the maps using the provided `noise_pct` as a
1222 /// percentage of each value to consider noise. If `None`, each
1223 /// measurement's noise threshold is independently chosen as the
1224 /// maximum of that measurement's recorded noise quantity in either
1226 pub fn compare_to_old(&self, old: &MetricMap,
1227 noise_pct: Option<f64>) -> MetricDiff {
1228 let mut diff : MetricDiff = BTreeMap::new();
1229 let MetricMap(ref selfmap) = *self;
1230 let MetricMap(ref old) = *old;
1231 for (k, vold) in old.iter() {
1232 let r = match selfmap.get(k) {
1233 None => MetricRemoved,
1235 let delta = v.value - vold.value;
1236 let noise = match noise_pct {
1237 None => vold.noise.abs().max(v.noise.abs()),
1238 Some(pct) => vold.value * pct / 100.0
1240 if delta.abs() <= noise {
1243 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1244 if vold.noise < 0.0 {
1245 // When 'noise' is negative, it means we want
1246 // to see deltas that go up over time, and can
1247 // only tolerate slight negative movement.
1254 // When 'noise' is positive, it means we want
1255 // to see deltas that go down over time, and
1256 // can only tolerate slight positive movements.
1266 diff.insert((*k).clone(), r);
1268 let MetricMap(ref map) = *self;
1269 for (k, _) in map.iter() {
1270 if !diff.contains_key(k) {
1271 diff.insert((*k).clone(), MetricAdded);
1277 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1278 /// must be non-negative. The `noise` indicates the uncertainty of the
1279 /// metric, which doubles as the "noise range" of acceptable
1280 /// pairwise-regressions on this named value, when comparing from one
1281 /// metric to the next using `compare_to_old`.
1283 /// If `noise` is positive, then it means this metric is of a value
1284 /// you want to see grow smaller, so a change larger than `noise` in the
1285 /// positive direction represents a regression.
1287 /// If `noise` is negative, then it means this metric is of a value
1288 /// you want to see grow larger, so a change larger than `noise` in the
1289 /// negative direction represents a regression.
1290 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1295 let MetricMap(ref mut map) = *self;
1296 map.insert(name.to_string(), m);
1299 /// Attempt to "ratchet" an external metric file. This involves loading
1300 /// metrics from a metric file (if it exists), comparing against
1301 /// the metrics in `self` using `compare_to_old`, and rewriting the
1302 /// file to contain the metrics in `self` if none of the
1303 /// `MetricChange`s are `Regression`. Returns the diff as well
1304 /// as a boolean indicating whether the ratchet succeeded.
1305 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1306 let old = if p.exists() {
1312 let diff : MetricDiff = self.compare_to_old(&old, pct);
1313 let ok = diff.iter().all(|(_, v)| {
1315 Regression(_) => false,
1321 self.save(p).unwrap();
1330 /// A function that is opaque to the optimizer, to allow benchmarks to
1331 /// pretend to use outputs to assist in avoiding dead-code
1334 /// This function is a no-op, and does not even read from `dummy`.
1335 pub fn black_box<T>(dummy: T) -> T {
1336 // we need to "use" the argument in some way LLVM can't
1338 unsafe {asm!("" : : "r"(&dummy))}
1344 /// Callback for benchmark functions to run in their body.
1345 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1346 self.dur = Duration::span(|| {
1347 let k = self.iterations;
1348 for _ in range(0u64, k) {
1354 pub fn ns_elapsed(&mut self) -> u64 {
1355 self.dur.num_nanoseconds().unwrap() as u64
1358 pub fn ns_per_iter(&mut self) -> u64 {
1359 if self.iterations == 0 {
1362 self.ns_elapsed() / cmp::max(self.iterations, 1)
1366 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1367 self.iterations = n;
1371 // This is a more statistics-driven benchmark algorithm
1372 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1373 // Initial bench run to get ballpark figure.
1375 self.bench_n(n, |x| f(x));
1377 // Try to estimate iter count for 1ms falling back to 1m
1378 // iterations if first run took < 1ns.
1379 if self.ns_per_iter() == 0 {
1382 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1384 // if the first run took more than 1ms we don't want to just
1385 // be left doing 0 iterations on every loop. The unfortunate
1386 // side effect of not being able to do as many runs is
1387 // automatically handled by the statistical analysis below
1388 // (i.e. larger error bars).
1389 if n == 0 { n = 1; }
1391 let mut total_run = Duration::nanoseconds(0);
1392 let samples : &mut [f64] = &mut [0.0_f64; 50];
1394 let mut summ = None;
1395 let mut summ5 = None;
1397 let loop_run = Duration::span(|| {
1399 for p in samples.iter_mut() {
1400 self.bench_n(n, |x| f(x));
1401 *p = self.ns_per_iter() as f64;
1404 stats::winsorize(samples, 5.0);
1405 summ = Some(stats::Summary::new(samples));
1407 for p in samples.iter_mut() {
1408 self.bench_n(5 * n, |x| f(x));
1409 *p = self.ns_per_iter() as f64;
1412 stats::winsorize(samples, 5.0);
1413 summ5 = Some(stats::Summary::new(samples));
1415 let summ = summ.unwrap();
1416 let summ5 = summ5.unwrap();
1418 // If we've run for 100ms and seem to have converged to a
1420 if loop_run.num_milliseconds() > 100 &&
1421 summ.median_abs_dev_pct < 1.0 &&
1422 summ.median - summ5.median < summ5.median_abs_dev {
1426 total_run = total_run + loop_run;
1427 // Longest we ever run for is 3s.
1428 if total_run.num_seconds() > 3 {
1439 use std::time::Duration;
1440 use super::{Bencher, BenchSamples};
1442 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1443 let mut bs = Bencher {
1445 dur: Duration::nanoseconds(0),
1449 let ns_iter_summ = bs.auto_bench(f);
1451 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1452 let iter_s = 1_000_000_000 / ns_iter;
1453 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1456 ns_iter_summ: ns_iter_summ,
1464 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1465 TestDesc, TestDescAndFn, TestOpts, run_test,
1466 Metric, MetricMap, MetricAdded, MetricRemoved,
1467 Improvement, Regression, LikelyNoise,
1468 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1469 use std::io::TempDir;
1470 use std::thunk::Thunk;
1471 use std::sync::mpsc::channel;
1474 pub fn do_not_run_ignored_tests() {
1475 fn f() { panic!(); }
1476 let desc = TestDescAndFn {
1478 name: StaticTestName("whatever"),
1480 should_fail: ShouldFail::No,
1482 testfn: DynTestFn(Thunk::new(move|| f())),
1484 let (tx, rx) = channel();
1485 run_test(&TestOpts::new(), false, desc, tx);
1486 let (_, res, _) = rx.recv().unwrap();
1487 assert!(res != TrOk);
1491 pub fn ignored_tests_result_in_ignored() {
1493 let desc = TestDescAndFn {
1495 name: StaticTestName("whatever"),
1497 should_fail: ShouldFail::No,
1499 testfn: DynTestFn(Thunk::new(move|| f())),
1501 let (tx, rx) = channel();
1502 run_test(&TestOpts::new(), false, desc, tx);
1503 let (_, res, _) = rx.recv().unwrap();
1504 assert!(res == TrIgnored);
1508 fn test_should_fail() {
1509 fn f() { panic!(); }
1510 let desc = TestDescAndFn {
1512 name: StaticTestName("whatever"),
1514 should_fail: ShouldFail::Yes(None)
1516 testfn: DynTestFn(Thunk::new(move|| f())),
1518 let (tx, rx) = channel();
1519 run_test(&TestOpts::new(), false, desc, tx);
1520 let (_, res, _) = rx.recv().unwrap();
1521 assert!(res == TrOk);
1525 fn test_should_fail_good_message() {
1526 fn f() { panic!("an error message"); }
1527 let desc = TestDescAndFn {
1529 name: StaticTestName("whatever"),
1531 should_fail: ShouldFail::Yes(Some("error message"))
1533 testfn: DynTestFn(Thunk::new(move|| f())),
1535 let (tx, rx) = channel();
1536 run_test(&TestOpts::new(), false, desc, tx);
1537 let (_, res, _) = rx.recv().unwrap();
1538 assert!(res == TrOk);
1542 fn test_should_fail_bad_message() {
1543 fn f() { panic!("an error message"); }
1544 let desc = TestDescAndFn {
1546 name: StaticTestName("whatever"),
1548 should_fail: ShouldFail::Yes(Some("foobar"))
1550 testfn: DynTestFn(Thunk::new(move|| f())),
1552 let (tx, rx) = channel();
1553 run_test(&TestOpts::new(), false, desc, tx);
1554 let (_, res, _) = rx.recv().unwrap();
1555 assert!(res == TrFailed);
1559 fn test_should_fail_but_succeeds() {
1561 let desc = TestDescAndFn {
1563 name: StaticTestName("whatever"),
1565 should_fail: ShouldFail::Yes(None)
1567 testfn: DynTestFn(Thunk::new(move|| f())),
1569 let (tx, rx) = channel();
1570 run_test(&TestOpts::new(), false, desc, tx);
1571 let (_, res, _) = rx.recv().unwrap();
1572 assert!(res == TrFailed);
1576 fn first_free_arg_should_be_a_filter() {
1577 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1578 let opts = match parse_opts(args.as_slice()) {
1580 _ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
1582 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1586 fn parse_ignored_flag() {
1587 let args = vec!("progname".to_string(),
1588 "filter".to_string(),
1589 "--ignored".to_string());
1590 let opts = match parse_opts(args.as_slice()) {
1592 _ => panic!("Malformed arg in parse_ignored_flag")
1594 assert!((opts.run_ignored));
1598 pub fn filter_for_ignored_option() {
1599 // When we run ignored tests the test filter should filter out all the
1600 // unignored tests and flip the ignore flag on the rest to false
1602 let mut opts = TestOpts::new();
1603 opts.run_tests = true;
1604 opts.run_ignored = true;
1609 name: StaticTestName("1"),
1611 should_fail: ShouldFail::No,
1613 testfn: DynTestFn(Thunk::new(move|| {})),
1617 name: StaticTestName("2"),
1619 should_fail: ShouldFail::No,
1621 testfn: DynTestFn(Thunk::new(move|| {})),
1623 let filtered = filter_tests(&opts, tests);
1625 assert_eq!(filtered.len(), 1);
1626 assert_eq!(filtered[0].desc.name.to_string(),
1628 assert!(filtered[0].desc.ignore == false);
1632 pub fn sort_tests() {
1633 let mut opts = TestOpts::new();
1634 opts.run_tests = true;
1637 vec!("sha1::test".to_string(),
1638 "int::test_to_str".to_string(),
1639 "int::test_pow".to_string(),
1640 "test::do_not_run_ignored_tests".to_string(),
1641 "test::ignored_tests_result_in_ignored".to_string(),
1642 "test::first_free_arg_should_be_a_filter".to_string(),
1643 "test::parse_ignored_flag".to_string(),
1644 "test::filter_for_ignored_option".to_string(),
1645 "test::sort_tests".to_string());
1649 let mut tests = Vec::new();
1650 for name in names.iter() {
1651 let test = TestDescAndFn {
1653 name: DynTestName((*name).clone()),
1655 should_fail: ShouldFail::No,
1657 testfn: DynTestFn(Thunk::new(testfn)),
1663 let filtered = filter_tests(&opts, tests);
1666 vec!("int::test_pow".to_string(),
1667 "int::test_to_str".to_string(),
1668 "sha1::test".to_string(),
1669 "test::do_not_run_ignored_tests".to_string(),
1670 "test::filter_for_ignored_option".to_string(),
1671 "test::first_free_arg_should_be_a_filter".to_string(),
1672 "test::ignored_tests_result_in_ignored".to_string(),
1673 "test::parse_ignored_flag".to_string(),
1674 "test::sort_tests".to_string());
1676 for (a, b) in expected.iter().zip(filtered.iter()) {
1677 assert!(*a == b.desc.name.to_string());
1682 pub fn filter_tests_regex() {
1683 let mut opts = TestOpts::new();
1684 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1686 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1687 "no::XYZ", "no::abc"];
1691 let tests = names.iter().map(|name| {
1694 name: DynTestName(name.to_string()),
1696 should_fail: ShouldFail::No,
1698 testfn: DynTestFn(Thunk::new(test_fn))
1701 let filtered = filter_tests(&opts, tests);
1703 let expected: Vec<&str> =
1704 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1706 assert_eq!(filtered.len(), expected.len());
1707 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1708 assert_eq!(test.desc.name.as_slice(), *expected_name);
1713 pub fn test_metricmap_compare() {
1714 let mut m1 = MetricMap::new();
1715 let mut m2 = MetricMap::new();
1716 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1717 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1719 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1720 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1722 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1723 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1725 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1726 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1728 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1729 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1731 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1732 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1734 let diff1 = m2.compare_to_old(&m1, None);
1736 assert_eq!(*(diff1.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1737 assert_eq!(*(diff1.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1738 assert_eq!(*(diff1.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1739 assert_eq!(*(diff1.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1741 assert_eq!(*(diff1.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1743 assert_eq!(*(diff1.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1745 assert_eq!(*(diff1.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1746 Improvement(100.0));
1747 assert_eq!(diff1.len(), 7);
1749 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1751 assert_eq!(*(diff2.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1752 assert_eq!(*(diff2.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1753 assert_eq!(*(diff2.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1754 assert_eq!(*(diff2.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1756 assert_eq!(*(diff2.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1758 assert_eq!(*(diff2.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1760 assert_eq!(*(diff2.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1762 assert_eq!(diff2.len(), 7);
1766 pub fn ratchet_test() {
1768 let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
1769 let pth = dpth.path().join("ratchet.json");
1771 let mut m1 = MetricMap::new();
1772 m1.insert_metric("runtime", 1000.0, 2.0);
1773 m1.insert_metric("throughput", 50.0, 2.0);
1775 let mut m2 = MetricMap::new();
1776 m2.insert_metric("runtime", 1100.0, 2.0);
1777 m2.insert_metric("throughput", 50.0, 2.0);
1779 m1.save(&pth).unwrap();
1781 // Ask for a ratchet that should fail to advance.
1782 let (diff1, ok1) = m2.ratchet(&pth, None);
1783 assert_eq!(ok1, false);
1784 assert_eq!(diff1.len(), 2);
1785 assert_eq!(*(diff1.get(&"runtime".to_string()).unwrap()), Regression(10.0));
1786 assert_eq!(*(diff1.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1788 // Check that it was not rewritten.
1789 let m3 = MetricMap::load(&pth);
1790 let MetricMap(m3) = m3;
1791 assert_eq!(m3.len(), 2);
1792 assert_eq!(*(m3.get(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1793 assert_eq!(*(m3.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1795 // Ask for a ratchet with an explicit noise-percentage override,
1796 // that should advance.
1797 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1798 assert_eq!(ok2, true);
1799 assert_eq!(diff2.len(), 2);
1800 assert_eq!(*(diff2.get(&"runtime".to_string()).unwrap()), LikelyNoise);
1801 assert_eq!(*(diff2.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1803 // Check that it was rewritten.
1804 let m4 = MetricMap::load(&pth);
1805 let MetricMap(m4) = m4;
1806 assert_eq!(m4.len(), 2);
1807 assert_eq!(*(m4.get(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1808 assert_eq!(*(m4.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));