1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
32 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![allow(unknown_features)]
35 #![feature(asm, macro_rules, phase, globs, slicing_syntax)]
36 #![feature(unboxed_closures, default_type_params)]
37 #![feature(old_orphan_check)]
41 extern crate serialize;
42 extern crate "serialize" as rustc_serialize;
45 pub use self::TestFn::*;
46 pub use self::MetricChange::*;
47 pub use self::ColorConfig::*;
48 pub use self::TestResult::*;
49 pub use self::TestName::*;
50 use self::TestEvent::*;
51 use self::NamePadding::*;
52 use self::OutputLocation::*;
55 use getopts::{OptGroup, optflag, optopt};
57 use serialize::{json, Decodable, Encodable};
59 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
61 use std::any::{Any, AnyRefExt};
63 use std::collections::BTreeMap;
67 use std::io::fs::PathExtensions;
68 use std::io::stdio::StdWriter;
69 use std::io::{File, ChanReader, ChanWriter};
71 use std::iter::repeat;
72 use std::num::{Float, FloatMath, Int};
74 use std::str::{FromStr, from_str};
75 use std::sync::mpsc::{channel, Sender};
76 use std::thread::{mod, Thread};
77 use std::thunk::{Thunk, Invoke};
78 use std::time::Duration;
80 // to be used by rustc to compile tests in libtest
82 pub use {Bencher, TestName, TestResult, TestDesc,
83 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
84 Metric, MetricMap, MetricAdded, MetricRemoved,
85 MetricChange, Improvement, Regression, LikelyNoise,
86 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
87 run_test, test_main, test_main_static, filter_tests,
88 parse_opts, StaticBenchFn, ShouldFail};
93 // The name of a test. By convention this follows the rules for rust
94 // paths; i.e. it should be a series of identifiers separated by double
95 // colons. This way if some test runner wants to arrange the tests
96 // hierarchically it may.
98 #[deriving(Clone, PartialEq, Eq, Hash)]
100 StaticTestName(&'static str),
104 fn as_slice<'a>(&'a self) -> &'a str {
106 StaticTestName(s) => s,
107 DynTestName(ref s) => s.as_slice()
111 impl Show for TestName {
112 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
113 self.as_slice().fmt(f)
117 #[deriving(Clone, Copy)]
125 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
126 let mut name = String::from_str(self.name.as_slice());
127 let fill = column_count.saturating_sub(name.len());
128 let mut pad = repeat(" ").take(fill).collect::<String>();
132 pad.push_str(name.as_slice());
136 name.push_str(pad.as_slice());
143 /// Represents a benchmark function.
144 pub trait TDynBenchFn {
145 fn run(&self, harness: &mut Bencher);
148 // A function that runs a test. If the function returns successfully,
149 // the test succeeds; if the function panics then the test fails. We
150 // may need to come up with a more clever definition of test in order
151 // to support isolation of tests into tasks.
154 StaticBenchFn(fn(&mut Bencher)),
155 StaticMetricFn(fn(&mut MetricMap)),
157 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
158 DynBenchFn(Box<TDynBenchFn+'static>)
162 fn padding(&self) -> NamePadding {
164 &StaticTestFn(..) => PadNone,
165 &StaticBenchFn(..) => PadOnRight,
166 &StaticMetricFn(..) => PadOnRight,
167 &DynTestFn(..) => PadNone,
168 &DynMetricFn(..) => PadOnRight,
169 &DynBenchFn(..) => PadOnRight,
174 impl fmt::Show for TestFn {
175 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
176 f.write_str(match *self {
177 StaticTestFn(..) => "StaticTestFn(..)",
178 StaticBenchFn(..) => "StaticBenchFn(..)",
179 StaticMetricFn(..) => "StaticMetricFn(..)",
180 DynTestFn(..) => "DynTestFn(..)",
181 DynMetricFn(..) => "DynMetricFn(..)",
182 DynBenchFn(..) => "DynBenchFn(..)"
187 /// Manager of the benchmarking runs.
189 /// This is feed into functions marked with `#[bench]` to allow for
190 /// set-up & tear-down before running a piece of code repeatedly via a
199 #[deriving(Copy, Clone, Show, PartialEq, Eq, Hash)]
200 pub enum ShouldFail {
202 Yes(Option<&'static str>)
205 // The definition of a single test. A test runner will run a list of
207 #[deriving(Clone, Show, PartialEq, Eq, Hash)]
208 pub struct TestDesc {
211 pub should_fail: ShouldFail,
215 pub struct TestDescAndFn {
220 #[deriving(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
227 pub fn new(value: f64, noise: f64) -> Metric {
228 Metric {value: value, noise: noise}
232 #[deriving(PartialEq)]
233 pub struct MetricMap(BTreeMap<String,Metric>);
235 impl Clone for MetricMap {
236 fn clone(&self) -> MetricMap {
237 let MetricMap(ref map) = *self;
238 MetricMap(map.clone())
242 /// Analysis of a single change in metric
243 #[deriving(Copy, PartialEq, Show)]
244 pub enum MetricChange {
252 pub type MetricDiff = BTreeMap<String,MetricChange>;
254 // The default console test runner. It accepts the command line
255 // arguments and a vector of test_descs.
256 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
258 match parse_opts(args) {
260 Some(Err(msg)) => panic!("{}", msg),
263 match run_tests_console(&opts, tests) {
265 Ok(false) => panic!("Some tests failed"),
266 Err(e) => panic!("io error when running tests: {}", e),
270 // A variant optimized for invocation with a static test vector.
271 // This will panic (intentionally) when fed any dynamic tests, because
272 // it is copying the static values out into a dynamic vector and cannot
273 // copy dynamic values. It is doing this because from this point on
274 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
275 // semantics into parallel test runners, which in turn requires a ~[]
276 // rather than a &[].
277 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
278 let owned_tests = tests.iter().map(|t| {
280 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
281 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
282 _ => panic!("non-static tests passed to test::test_main_static")
285 test_main(args, owned_tests)
289 pub enum ColorConfig {
295 pub struct TestOpts {
296 pub filter: Option<Regex>,
297 pub run_ignored: bool,
299 pub run_benchmarks: bool,
300 pub ratchet_metrics: Option<Path>,
301 pub ratchet_noise_percent: Option<f64>,
302 pub save_metrics: Option<Path>,
303 pub test_shard: Option<(uint,uint)>,
304 pub logfile: Option<Path>,
306 pub color: ColorConfig,
307 pub show_boxplot: bool,
308 pub boxplot_width: uint,
309 pub show_all_stats: bool,
314 fn new() -> TestOpts {
319 run_benchmarks: false,
320 ratchet_metrics: None,
321 ratchet_noise_percent: None,
329 show_all_stats: false,
334 /// Result of parsing the options.
335 pub type OptRes = Result<TestOpts, String>;
337 fn optgroups() -> Vec<getopts::OptGroup> {
338 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
339 getopts::optflag("", "test", "Run tests and not benchmarks"),
340 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
341 getopts::optflag("h", "help", "Display this message (longer with --help)"),
342 getopts::optopt("", "save-metrics", "Location to save bench metrics",
344 getopts::optopt("", "ratchet-metrics",
345 "Location to load and save metrics from. The metrics \
346 loaded are cause benchmarks to fail if they run too \
348 getopts::optopt("", "ratchet-noise-percent",
349 "Tests within N% of the recorded metrics will be \
350 considered as passing", "PERCENTAGE"),
351 getopts::optopt("", "logfile", "Write logs to the specified file instead \
353 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
355 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
356 task, allow printing directly"),
357 getopts::optopt("", "color", "Configure coloring of output:
358 auto = colorize if stdout is a tty and tests are run on serially (default);
359 always = always colorize output;
360 never = never colorize output;", "auto|always|never"),
361 getopts::optflag("", "boxplot", "Display a boxplot of the benchmark statistics"),
362 getopts::optopt("", "boxplot-width", "Set the boxplot width (default 50)", "WIDTH"),
363 getopts::optflag("", "stats", "Display the benchmark min, max, and quartiles"))
366 fn usage(binary: &str) {
367 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
370 The FILTER regex is tested against the name of all tests to run, and
371 only those tests that match are run.
373 By default, all tests are run in parallel. This can be altered with the
374 RUST_TEST_TASKS environment variable when running tests (set it to 1).
376 All tests have their standard output and standard error captured by default.
377 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
378 environment variable. Logging is not captured by default.
382 #[test] - Indicates a function is a test to be run. This function
384 #[bench] - Indicates a function is a benchmark to be run. This
385 function takes one argument (test::Bencher).
386 #[should_fail] - This function (also labeled with #[test]) will only pass if
387 the code causes a failure (an assertion failure or panic!)
388 A message may be provided, which the failure string must
389 contain: #[should_fail(expected = "foo")].
390 #[ignore] - When applied to a function which is already attributed as a
391 test, then the test runner will ignore these tests during
392 normal test runs. Running with --ignored will run these
394 usage = getopts::usage(message.as_slice(),
395 optgroups().as_slice()));
398 // Parses command line arguments into test options
399 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
400 let args_ = args.tail();
402 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
404 Err(f) => return Some(Err(f.to_string()))
407 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
409 let filter = if matches.free.len() > 0 {
410 let s = matches.free[0].as_slice();
411 match Regex::new(s) {
413 Err(e) => return Some(Err(format!("could not parse /{}/: {}", s, e)))
419 let run_ignored = matches.opt_present("ignored");
421 let logfile = matches.opt_str("logfile");
422 let logfile = logfile.map(|s| Path::new(s));
424 let run_benchmarks = matches.opt_present("bench");
425 let run_tests = ! run_benchmarks ||
426 matches.opt_present("test");
428 let ratchet_metrics = matches.opt_str("ratchet-metrics");
429 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
431 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
432 let ratchet_noise_percent =
433 ratchet_noise_percent.map(|s| s.as_slice().parse::<f64>().unwrap());
435 let save_metrics = matches.opt_str("save-metrics");
436 let save_metrics = save_metrics.map(|s| Path::new(s));
438 let test_shard = matches.opt_str("test-shard");
439 let test_shard = opt_shard(test_shard);
441 let mut nocapture = matches.opt_present("nocapture");
443 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
446 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
447 Some("auto") | None => AutoColor,
448 Some("always") => AlwaysColor,
449 Some("never") => NeverColor,
451 Some(v) => return Some(Err(format!("argument for --color must be \
452 auto, always, or never (was {})",
456 let show_boxplot = matches.opt_present("boxplot");
457 let boxplot_width = match matches.opt_str("boxplot-width") {
459 match FromStr::from_str(width.as_slice()) {
460 Some(width) => width,
462 return Some(Err(format!("argument for --boxplot-width must be a uint")));
469 let show_all_stats = matches.opt_present("stats");
471 let test_opts = TestOpts {
473 run_ignored: run_ignored,
474 run_tests: run_tests,
475 run_benchmarks: run_benchmarks,
476 ratchet_metrics: ratchet_metrics,
477 ratchet_noise_percent: ratchet_noise_percent,
478 save_metrics: save_metrics,
479 test_shard: test_shard,
481 nocapture: nocapture,
483 show_boxplot: show_boxplot,
484 boxplot_width: boxplot_width,
485 show_all_stats: show_all_stats,
491 pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> {
495 let mut it = s.split('.');
496 match (it.next().and_then(|s| s.parse::<uint>()),
497 it.next().and_then(|s| s.parse::<uint>()),
499 (Some(a), Some(b), None) => {
501 panic!("tried to run shard {a}.{b}, but {a} is out of bounds \
502 (should be between 1 and {b}", a=a, b=b)
513 #[deriving(Clone, PartialEq)]
514 pub struct BenchSamples {
515 ns_iter_summ: stats::Summary<f64>,
519 #[deriving(Clone, PartialEq)]
520 pub enum TestResult {
524 TrMetrics(MetricMap),
525 TrBench(BenchSamples),
528 enum OutputLocation<T> {
529 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
533 struct ConsoleTestState<T> {
534 log_out: Option<File>,
535 out: OutputLocation<T>,
539 show_all_stats: bool,
546 failures: Vec<(TestDesc, Vec<u8> )> ,
547 max_name_len: uint, // number of columns to fill when aligning names
550 impl<T: Writer> ConsoleTestState<T> {
551 pub fn new(opts: &TestOpts,
552 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
553 let log_out = match opts.logfile {
554 Some(ref path) => Some(try!(File::create(path))),
557 let out = match term::stdout() {
558 None => Raw(io::stdio::stdout_raw()),
562 Ok(ConsoleTestState {
565 use_color: use_color(opts),
566 show_boxplot: opts.show_boxplot,
567 boxplot_width: opts.boxplot_width,
568 show_all_stats: opts.show_all_stats,
574 metrics: MetricMap::new(),
575 failures: Vec::new(),
580 pub fn write_ok(&mut self) -> io::IoResult<()> {
581 self.write_pretty("ok", term::color::GREEN)
584 pub fn write_failed(&mut self) -> io::IoResult<()> {
585 self.write_pretty("FAILED", term::color::RED)
588 pub fn write_ignored(&mut self) -> io::IoResult<()> {
589 self.write_pretty("ignored", term::color::YELLOW)
592 pub fn write_metric(&mut self) -> io::IoResult<()> {
593 self.write_pretty("metric", term::color::CYAN)
596 pub fn write_bench(&mut self) -> io::IoResult<()> {
597 self.write_pretty("bench", term::color::CYAN)
600 pub fn write_added(&mut self) -> io::IoResult<()> {
601 self.write_pretty("added", term::color::GREEN)
604 pub fn write_improved(&mut self) -> io::IoResult<()> {
605 self.write_pretty("improved", term::color::GREEN)
608 pub fn write_removed(&mut self) -> io::IoResult<()> {
609 self.write_pretty("removed", term::color::YELLOW)
612 pub fn write_regressed(&mut self) -> io::IoResult<()> {
613 self.write_pretty("regressed", term::color::RED)
616 pub fn write_pretty(&mut self,
618 color: term::color::Color) -> io::IoResult<()> {
620 Pretty(ref mut term) => {
622 try!(term.fg(color));
624 try!(term.write(word.as_bytes()));
630 Raw(ref mut stdout) => stdout.write(word.as_bytes())
634 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
636 Pretty(ref mut term) => term.write(s.as_bytes()),
637 Raw(ref mut stdout) => stdout.write(s.as_bytes())
641 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
643 let noun = if len != 1 { "tests" } else { "test" };
644 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
647 pub fn write_test_start(&mut self, test: &TestDesc,
648 align: NamePadding) -> io::IoResult<()> {
649 let name = test.padded_name(self.max_name_len, align);
650 self.write_plain(format!("test {} ... ", name).as_slice())
653 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
655 TrOk => self.write_ok(),
656 TrFailed => self.write_failed(),
657 TrIgnored => self.write_ignored(),
658 TrMetrics(ref mm) => {
659 try!(self.write_metric());
660 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
663 try!(self.write_bench());
665 if self.show_boxplot {
666 let mut wr = Vec::new();
668 try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
670 let s = String::from_utf8(wr).unwrap();
672 try!(self.write_plain(format!(": {}", s).as_slice()));
675 if self.show_all_stats {
676 let mut wr = Vec::new();
678 try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
680 let s = String::from_utf8(wr).unwrap();
682 try!(self.write_plain(format!(": {}", s).as_slice()));
684 try!(self.write_plain(format!(": {}",
685 fmt_bench_samples(bs)).as_slice()));
691 self.write_plain("\n")
694 pub fn write_log(&mut self, test: &TestDesc,
695 result: &TestResult) -> io::IoResult<()> {
699 let s = format!("{} {}\n", match *result {
700 TrOk => "ok".to_string(),
701 TrFailed => "failed".to_string(),
702 TrIgnored => "ignored".to_string(),
703 TrMetrics(ref mm) => fmt_metrics(mm),
704 TrBench(ref bs) => fmt_bench_samples(bs)
705 }, test.name.as_slice());
706 o.write(s.as_bytes())
711 pub fn write_failures(&mut self) -> io::IoResult<()> {
712 try!(self.write_plain("\nfailures:\n"));
713 let mut failures = Vec::new();
714 let mut fail_out = String::new();
715 for &(ref f, ref stdout) in self.failures.iter() {
716 failures.push(f.name.to_string());
717 if stdout.len() > 0 {
718 fail_out.push_str(format!("---- {} stdout ----\n\t",
719 f.name.as_slice()).as_slice());
720 let output = String::from_utf8_lossy(stdout.as_slice());
721 fail_out.push_str(output.as_slice());
722 fail_out.push_str("\n");
725 if fail_out.len() > 0 {
726 try!(self.write_plain("\n"));
727 try!(self.write_plain(fail_out.as_slice()));
730 try!(self.write_plain("\nfailures:\n"));
732 for name in failures.iter() {
733 try!(self.write_plain(format!(" {}\n",
734 name.as_slice()).as_slice()));
739 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
741 let mut improved = 0u;
742 let mut regressed = 0u;
744 let mut removed = 0u;
746 for (k, v) in diff.iter() {
748 LikelyNoise => noise += 1,
751 try!(self.write_added());
752 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
756 try!(self.write_removed());
757 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
759 Improvement(pct) => {
761 try!(self.write_plain(format!(": {} ", *k).as_slice()));
762 try!(self.write_improved());
763 try!(self.write_plain(format!(" by {:.2}%\n",
764 pct as f64).as_slice()));
768 try!(self.write_plain(format!(": {} ", *k).as_slice()));
769 try!(self.write_regressed());
770 try!(self.write_plain(format!(" by {:.2}%\n",
771 pct as f64).as_slice()));
775 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
776 {} removed, {} improved, {} regressed, \
778 added, removed, improved, regressed,
781 try!(self.write_plain("updated ratchet file\n"));
783 try!(self.write_plain("left ratchet file untouched\n"));
788 pub fn write_run_finish(&mut self,
789 ratchet_metrics: &Option<Path>,
790 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
791 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
793 let ratchet_success = match *ratchet_metrics {
796 try!(self.write_plain(format!("\nusing metrics ratchet: {}\n",
797 pth.display()).as_slice()));
801 try!(self.write_plain(format!("with noise-tolerance \
805 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
806 try!(self.write_metric_diff(&diff));
811 let test_success = self.failed == 0u;
813 try!(self.write_failures());
816 let success = ratchet_success && test_success;
818 try!(self.write_plain("\ntest result: "));
820 // There's no parallelism at this point so it's safe to use color
821 try!(self.write_ok());
823 try!(self.write_failed());
825 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
826 self.passed, self.failed, self.ignored, self.measured);
827 try!(self.write_plain(s.as_slice()));
832 pub fn fmt_metrics(mm: &MetricMap) -> String {
833 let MetricMap(ref mm) = *mm;
834 let v : Vec<String> = mm.iter()
835 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
836 v.value as f64, v.noise as f64))
841 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
843 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
844 bs.ns_iter_summ.median as uint,
845 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
848 format!("{:>9} ns/iter (+/- {})",
849 bs.ns_iter_summ.median as uint,
850 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
854 // A simple console test runner
855 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
857 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
858 match (*event).clone() {
859 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
860 TeWait(ref test, padding) => st.write_test_start(test, padding),
861 TeResult(test, result, stdout) => {
862 try!(st.write_log(&test, &result));
863 try!(st.write_result(&result));
865 TrOk => st.passed += 1,
866 TrIgnored => st.ignored += 1,
868 let tname = test.name.as_slice();
869 let MetricMap(mm) = mm;
870 for (k,v) in mm.iter() {
872 .insert_metric(format!("{}.{}",
881 st.metrics.insert_metric(test.name.as_slice(),
882 bs.ns_iter_summ.median,
883 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
888 st.failures.push((test, stdout));
896 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
897 fn len_if_padded(t: &TestDescAndFn) -> uint {
898 match t.testfn.padding() {
900 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
903 match tests.iter().max_by(|t|len_if_padded(*t)) {
905 let n = t.desc.name.as_slice();
906 st.max_name_len = n.len();
910 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
911 match opts.save_metrics {
914 try!(st.metrics.save(pth));
915 try!(st.write_plain(format!("\nmetrics saved to: {}",
916 pth.display()).as_slice()));
919 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
923 fn should_sort_failures_before_printing_them() {
924 let test_a = TestDesc {
925 name: StaticTestName("a"),
927 should_fail: ShouldFail::No
930 let test_b = TestDesc {
931 name: StaticTestName("b"),
933 should_fail: ShouldFail::No
936 let mut st = ConsoleTestState {
938 out: Raw(Vec::new()),
942 show_all_stats: false,
949 metrics: MetricMap::new(),
950 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
953 st.write_failures().unwrap();
954 let s = match st.out {
955 Raw(ref m) => String::from_utf8_lossy(m[]),
956 Pretty(_) => unreachable!()
959 let apos = s.find_str("a").unwrap();
960 let bpos = s.find_str("b").unwrap();
961 assert!(apos < bpos);
964 fn use_color(opts: &TestOpts) -> bool {
966 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
974 TeFiltered(Vec<TestDesc> ),
975 TeWait(TestDesc, NamePadding),
976 TeResult(TestDesc, TestResult, Vec<u8> ),
979 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
981 unsafe impl Send for MonitorMsg {}
983 fn run_tests<F>(opts: &TestOpts,
984 tests: Vec<TestDescAndFn> ,
985 mut callback: F) -> io::IoResult<()> where
986 F: FnMut(TestEvent) -> io::IoResult<()>,
988 let filtered_tests = filter_tests(opts, tests);
989 let filtered_descs = filtered_tests.iter()
990 .map(|t| t.desc.clone())
993 try!(callback(TeFiltered(filtered_descs)));
995 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
996 filtered_tests.into_iter().partition(|e| {
998 StaticTestFn(_) | DynTestFn(_) => true,
1003 // It's tempting to just spawn all the tests at once, but since we have
1004 // many tests that run in other processes we would be making a big mess.
1005 let concurrency = get_concurrency();
1007 let mut remaining = filtered_tests;
1008 remaining.reverse();
1009 let mut pending = 0;
1011 let (tx, rx) = channel::<MonitorMsg>();
1013 while pending > 0 || !remaining.is_empty() {
1014 while pending < concurrency && !remaining.is_empty() {
1015 let test = remaining.pop().unwrap();
1016 if concurrency == 1 {
1017 // We are doing one test at a time so we can print the name
1018 // of the test before we run it. Useful for debugging tests
1019 // that hang forever.
1020 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
1022 run_test(opts, !opts.run_tests, test, tx.clone());
1026 let (desc, result, stdout) = rx.recv().unwrap();
1027 if concurrency != 1 {
1028 try!(callback(TeWait(desc.clone(), PadNone)));
1030 try!(callback(TeResult(desc, result, stdout)));
1034 // All benchmarks run at the end, in serial.
1035 // (this includes metric fns)
1036 for b in filtered_benchs_and_metrics.into_iter() {
1037 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
1038 run_test(opts, !opts.run_benchmarks, b, tx.clone());
1039 let (test, result, stdout) = rx.recv().unwrap();
1040 try!(callback(TeResult(test, result, stdout)));
1045 fn get_concurrency() -> uint {
1047 match os::getenv("RUST_TEST_TASKS") {
1049 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
1051 Some(n) if n > 0 => n,
1052 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
1056 rt::default_sched_threads()
1061 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1062 let mut filtered = tests;
1064 // Remove tests that don't match the test filter
1065 filtered = match opts.filter {
1068 filtered.into_iter()
1069 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
1073 // Maybe pull out the ignored test and unignore them
1074 filtered = if !opts.run_ignored {
1077 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1078 if test.desc.ignore {
1079 let TestDescAndFn {desc, testfn} = test;
1080 Some(TestDescAndFn {
1081 desc: TestDesc {ignore: false, ..desc},
1088 filtered.into_iter().filter_map(|x| filter(x)).collect()
1091 // Sort the tests alphabetically
1092 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1094 // Shard the remaining tests, if sharding requested.
1095 match opts.test_shard {
1098 filtered.into_iter().enumerate()
1099 // note: using a - 1 so that the valid shards, for example, are
1100 // 1.2 and 2.2 instead of 0.2 and 1.2
1101 .filter(|&(i,_)| i % b == (a - 1))
1108 pub fn run_test(opts: &TestOpts,
1110 test: TestDescAndFn,
1111 monitor_ch: Sender<MonitorMsg>) {
1113 let TestDescAndFn {desc, testfn} = test;
1115 if force_ignore || desc.ignore {
1116 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1120 fn run_test_inner(desc: TestDesc,
1121 monitor_ch: Sender<MonitorMsg>,
1124 Thread::spawn(move || {
1125 let (tx, rx) = channel();
1126 let mut reader = ChanReader::new(rx);
1127 let stdout = ChanWriter::new(tx.clone());
1128 let stderr = ChanWriter::new(tx);
1129 let mut cfg = thread::Builder::new().name(match desc.name {
1130 DynTestName(ref name) => name.clone().to_string(),
1131 StaticTestName(name) => name.to_string(),
1134 drop((stdout, stderr));
1136 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
1137 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
1140 let result_guard = cfg.spawn(move || { testfn.invoke(()) });
1141 let stdout = reader.read_to_end().unwrap().into_iter().collect();
1142 let test_result = calc_result(&desc, result_guard.join());
1143 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1148 DynBenchFn(bencher) => {
1149 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1150 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1153 StaticBenchFn(benchfn) => {
1154 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1155 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1159 let mut mm = MetricMap::new();
1161 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1164 StaticMetricFn(f) => {
1165 let mut mm = MetricMap::new();
1167 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1170 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1171 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1172 Thunk::new(move|| f()))
1176 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1177 match (&desc.should_fail, task_result) {
1178 (&ShouldFail::No, Ok(())) |
1179 (&ShouldFail::Yes(None), Err(_)) => TrOk,
1180 (&ShouldFail::Yes(Some(msg)), Err(ref err))
1181 if err.downcast_ref::<String>()
1183 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1184 .map(|e| e.contains(msg))
1185 .unwrap_or(false) => TrOk,
1192 pub fn new() -> MetricMap {
1193 MetricMap(BTreeMap::new())
1196 /// Load MetricDiff from a file.
1200 /// This function will panic if the path does not exist or the path does not
1201 /// contain a valid metric map.
1202 pub fn load(p: &Path) -> MetricMap {
1203 assert!(p.exists());
1204 let mut f = File::open(p).unwrap();
1205 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1206 let mut decoder = json::Decoder::new(value);
1207 MetricMap(match Decodable::decode(&mut decoder) {
1209 Err(e) => panic!("failure decoding JSON: {}", e)
1213 /// Write MetricDiff to a file.
1214 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1215 let mut file = try!(File::create(p));
1216 let MetricMap(ref map) = *self;
1217 write!(&mut file, "{}", json::as_json(map))
1220 /// Compare against another MetricMap. Optionally compare all
1221 /// measurements in the maps using the provided `noise_pct` as a
1222 /// percentage of each value to consider noise. If `None`, each
1223 /// measurement's noise threshold is independently chosen as the
1224 /// maximum of that measurement's recorded noise quantity in either
1226 pub fn compare_to_old(&self, old: &MetricMap,
1227 noise_pct: Option<f64>) -> MetricDiff {
1228 let mut diff : MetricDiff = BTreeMap::new();
1229 let MetricMap(ref selfmap) = *self;
1230 let MetricMap(ref old) = *old;
1231 for (k, vold) in old.iter() {
1232 let r = match selfmap.get(k) {
1233 None => MetricRemoved,
1235 let delta = v.value - vold.value;
1236 let noise = match noise_pct {
1237 None => vold.noise.abs().max(v.noise.abs()),
1238 Some(pct) => vold.value * pct / 100.0
1240 if delta.abs() <= noise {
1243 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1244 if vold.noise < 0.0 {
1245 // When 'noise' is negative, it means we want
1246 // to see deltas that go up over time, and can
1247 // only tolerate slight negative movement.
1254 // When 'noise' is positive, it means we want
1255 // to see deltas that go down over time, and
1256 // can only tolerate slight positive movements.
1266 diff.insert((*k).clone(), r);
1268 let MetricMap(ref map) = *self;
1269 for (k, _) in map.iter() {
1270 if !diff.contains_key(k) {
1271 diff.insert((*k).clone(), MetricAdded);
1277 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1278 /// must be non-negative. The `noise` indicates the uncertainty of the
1279 /// metric, which doubles as the "noise range" of acceptable
1280 /// pairwise-regressions on this named value, when comparing from one
1281 /// metric to the next using `compare_to_old`.
1283 /// If `noise` is positive, then it means this metric is of a value
1284 /// you want to see grow smaller, so a change larger than `noise` in the
1285 /// positive direction represents a regression.
1287 /// If `noise` is negative, then it means this metric is of a value
1288 /// you want to see grow larger, so a change larger than `noise` in the
1289 /// negative direction represents a regression.
1290 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1295 let MetricMap(ref mut map) = *self;
1296 map.insert(name.to_string(), m);
1299 /// Attempt to "ratchet" an external metric file. This involves loading
1300 /// metrics from a metric file (if it exists), comparing against
1301 /// the metrics in `self` using `compare_to_old`, and rewriting the
1302 /// file to contain the metrics in `self` if none of the
1303 /// `MetricChange`s are `Regression`. Returns the diff as well
1304 /// as a boolean indicating whether the ratchet succeeded.
1305 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1306 let old = if p.exists() {
1312 let diff : MetricDiff = self.compare_to_old(&old, pct);
1313 let ok = diff.iter().all(|(_, v)| {
1315 Regression(_) => false,
1321 self.save(p).unwrap();
1330 /// A function that is opaque to the optimizer, to allow benchmarks to
1331 /// pretend to use outputs to assist in avoiding dead-code
1334 /// This function is a no-op, and does not even read from `dummy`.
1335 pub fn black_box<T>(dummy: T) {
1336 // we need to "use" the argument in some way LLVM can't
1338 unsafe {asm!("" : : "r"(&dummy))}
1343 /// Callback for benchmark functions to run in their body.
1344 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1345 self.dur = Duration::span(|| {
1346 let k = self.iterations;
1347 for _ in range(0u64, k) {
1353 pub fn ns_elapsed(&mut self) -> u64 {
1354 self.dur.num_nanoseconds().unwrap() as u64
1357 pub fn ns_per_iter(&mut self) -> u64 {
1358 if self.iterations == 0 {
1361 self.ns_elapsed() / cmp::max(self.iterations, 1)
1365 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1366 self.iterations = n;
1370 // This is a more statistics-driven benchmark algorithm
1371 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1372 // Initial bench run to get ballpark figure.
1374 self.bench_n(n, |x| f(x));
1376 // Try to estimate iter count for 1ms falling back to 1m
1377 // iterations if first run took < 1ns.
1378 if self.ns_per_iter() == 0 {
1381 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1383 // if the first run took more than 1ms we don't want to just
1384 // be left doing 0 iterations on every loop. The unfortunate
1385 // side effect of not being able to do as many runs is
1386 // automatically handled by the statistical analysis below
1387 // (i.e. larger error bars).
1388 if n == 0 { n = 1; }
1390 let mut total_run = Duration::nanoseconds(0);
1391 let samples : &mut [f64] = &mut [0.0_f64; 50];
1393 let mut summ = None;
1394 let mut summ5 = None;
1396 let loop_run = Duration::span(|| {
1398 for p in samples.iter_mut() {
1399 self.bench_n(n, |x| f(x));
1400 *p = self.ns_per_iter() as f64;
1403 stats::winsorize(samples, 5.0);
1404 summ = Some(stats::Summary::new(samples));
1406 for p in samples.iter_mut() {
1407 self.bench_n(5 * n, |x| f(x));
1408 *p = self.ns_per_iter() as f64;
1411 stats::winsorize(samples, 5.0);
1412 summ5 = Some(stats::Summary::new(samples));
1414 let summ = summ.unwrap();
1415 let summ5 = summ5.unwrap();
1417 // If we've run for 100ms and seem to have converged to a
1419 if loop_run.num_milliseconds() > 100 &&
1420 summ.median_abs_dev_pct < 1.0 &&
1421 summ.median - summ5.median < summ5.median_abs_dev {
1425 total_run = total_run + loop_run;
1426 // Longest we ever run for is 3s.
1427 if total_run.num_seconds() > 3 {
1438 use std::time::Duration;
1439 use super::{Bencher, BenchSamples};
1441 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1442 let mut bs = Bencher {
1444 dur: Duration::nanoseconds(0),
1448 let ns_iter_summ = bs.auto_bench(f);
1450 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1451 let iter_s = 1_000_000_000 / ns_iter;
1452 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1455 ns_iter_summ: ns_iter_summ,
1463 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1464 TestDesc, TestDescAndFn, TestOpts, run_test,
1465 Metric, MetricMap, MetricAdded, MetricRemoved,
1466 Improvement, Regression, LikelyNoise,
1467 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1468 use std::io::TempDir;
1469 use std::thunk::Thunk;
1470 use std::sync::mpsc::channel;
1473 pub fn do_not_run_ignored_tests() {
1474 fn f() { panic!(); }
1475 let desc = TestDescAndFn {
1477 name: StaticTestName("whatever"),
1479 should_fail: ShouldFail::No,
1481 testfn: DynTestFn(Thunk::new(move|| f())),
1483 let (tx, rx) = channel();
1484 run_test(&TestOpts::new(), false, desc, tx);
1485 let (_, res, _) = rx.recv().unwrap();
1486 assert!(res != TrOk);
1490 pub fn ignored_tests_result_in_ignored() {
1492 let desc = TestDescAndFn {
1494 name: StaticTestName("whatever"),
1496 should_fail: ShouldFail::No,
1498 testfn: DynTestFn(Thunk::new(move|| f())),
1500 let (tx, rx) = channel();
1501 run_test(&TestOpts::new(), false, desc, tx);
1502 let (_, res, _) = rx.recv().unwrap();
1503 assert!(res == TrIgnored);
1507 fn test_should_fail() {
1508 fn f() { panic!(); }
1509 let desc = TestDescAndFn {
1511 name: StaticTestName("whatever"),
1513 should_fail: ShouldFail::Yes(None)
1515 testfn: DynTestFn(Thunk::new(move|| f())),
1517 let (tx, rx) = channel();
1518 run_test(&TestOpts::new(), false, desc, tx);
1519 let (_, res, _) = rx.recv().unwrap();
1520 assert!(res == TrOk);
1524 fn test_should_fail_good_message() {
1525 fn f() { panic!("an error message"); }
1526 let desc = TestDescAndFn {
1528 name: StaticTestName("whatever"),
1530 should_fail: ShouldFail::Yes(Some("error message"))
1532 testfn: DynTestFn(Thunk::new(move|| f())),
1534 let (tx, rx) = channel();
1535 run_test(&TestOpts::new(), false, desc, tx);
1536 let (_, res, _) = rx.recv().unwrap();
1537 assert!(res == TrOk);
1541 fn test_should_fail_bad_message() {
1542 fn f() { panic!("an error message"); }
1543 let desc = TestDescAndFn {
1545 name: StaticTestName("whatever"),
1547 should_fail: ShouldFail::Yes(Some("foobar"))
1549 testfn: DynTestFn(Thunk::new(move|| f())),
1551 let (tx, rx) = channel();
1552 run_test(&TestOpts::new(), false, desc, tx);
1553 let (_, res, _) = rx.recv().unwrap();
1554 assert!(res == TrFailed);
1558 fn test_should_fail_but_succeeds() {
1560 let desc = TestDescAndFn {
1562 name: StaticTestName("whatever"),
1564 should_fail: ShouldFail::Yes(None)
1566 testfn: DynTestFn(Thunk::new(move|| f())),
1568 let (tx, rx) = channel();
1569 run_test(&TestOpts::new(), false, desc, tx);
1570 let (_, res, _) = rx.recv().unwrap();
1571 assert!(res == TrFailed);
1575 fn first_free_arg_should_be_a_filter() {
1576 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1577 let opts = match parse_opts(args.as_slice()) {
1579 _ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
1581 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1585 fn parse_ignored_flag() {
1586 let args = vec!("progname".to_string(),
1587 "filter".to_string(),
1588 "--ignored".to_string());
1589 let opts = match parse_opts(args.as_slice()) {
1591 _ => panic!("Malformed arg in parse_ignored_flag")
1593 assert!((opts.run_ignored));
1597 pub fn filter_for_ignored_option() {
1598 // When we run ignored tests the test filter should filter out all the
1599 // unignored tests and flip the ignore flag on the rest to false
1601 let mut opts = TestOpts::new();
1602 opts.run_tests = true;
1603 opts.run_ignored = true;
1608 name: StaticTestName("1"),
1610 should_fail: ShouldFail::No,
1612 testfn: DynTestFn(Thunk::new(move|| {})),
1616 name: StaticTestName("2"),
1618 should_fail: ShouldFail::No,
1620 testfn: DynTestFn(Thunk::new(move|| {})),
1622 let filtered = filter_tests(&opts, tests);
1624 assert_eq!(filtered.len(), 1);
1625 assert_eq!(filtered[0].desc.name.to_string(),
1627 assert!(filtered[0].desc.ignore == false);
1631 pub fn sort_tests() {
1632 let mut opts = TestOpts::new();
1633 opts.run_tests = true;
1636 vec!("sha1::test".to_string(),
1637 "int::test_to_str".to_string(),
1638 "int::test_pow".to_string(),
1639 "test::do_not_run_ignored_tests".to_string(),
1640 "test::ignored_tests_result_in_ignored".to_string(),
1641 "test::first_free_arg_should_be_a_filter".to_string(),
1642 "test::parse_ignored_flag".to_string(),
1643 "test::filter_for_ignored_option".to_string(),
1644 "test::sort_tests".to_string());
1648 let mut tests = Vec::new();
1649 for name in names.iter() {
1650 let test = TestDescAndFn {
1652 name: DynTestName((*name).clone()),
1654 should_fail: ShouldFail::No,
1656 testfn: DynTestFn(Thunk::new(testfn)),
1662 let filtered = filter_tests(&opts, tests);
1665 vec!("int::test_pow".to_string(),
1666 "int::test_to_str".to_string(),
1667 "sha1::test".to_string(),
1668 "test::do_not_run_ignored_tests".to_string(),
1669 "test::filter_for_ignored_option".to_string(),
1670 "test::first_free_arg_should_be_a_filter".to_string(),
1671 "test::ignored_tests_result_in_ignored".to_string(),
1672 "test::parse_ignored_flag".to_string(),
1673 "test::sort_tests".to_string());
1675 for (a, b) in expected.iter().zip(filtered.iter()) {
1676 assert!(*a == b.desc.name.to_string());
1681 pub fn filter_tests_regex() {
1682 let mut opts = TestOpts::new();
1683 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1685 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1686 "no::XYZ", "no::abc"];
1690 let tests = names.iter().map(|name| {
1693 name: DynTestName(name.to_string()),
1695 should_fail: ShouldFail::No,
1697 testfn: DynTestFn(Thunk::new(test_fn))
1700 let filtered = filter_tests(&opts, tests);
1702 let expected: Vec<&str> =
1703 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1705 assert_eq!(filtered.len(), expected.len());
1706 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1707 assert_eq!(test.desc.name.as_slice(), *expected_name);
1712 pub fn test_metricmap_compare() {
1713 let mut m1 = MetricMap::new();
1714 let mut m2 = MetricMap::new();
1715 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1716 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1718 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1719 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1721 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1722 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1724 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1725 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1727 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1728 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1730 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1731 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1733 let diff1 = m2.compare_to_old(&m1, None);
1735 assert_eq!(*(diff1.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1736 assert_eq!(*(diff1.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1737 assert_eq!(*(diff1.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1738 assert_eq!(*(diff1.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1740 assert_eq!(*(diff1.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1742 assert_eq!(*(diff1.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1744 assert_eq!(*(diff1.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1745 Improvement(100.0));
1746 assert_eq!(diff1.len(), 7);
1748 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1750 assert_eq!(*(diff2.get(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1751 assert_eq!(*(diff2.get(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1752 assert_eq!(*(diff2.get(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1753 assert_eq!(*(diff2.get(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1755 assert_eq!(*(diff2.get(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1757 assert_eq!(*(diff2.get(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1759 assert_eq!(*(diff2.get(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1761 assert_eq!(diff2.len(), 7);
1765 pub fn ratchet_test() {
1767 let dpth = TempDir::new("test-ratchet").ok().expect("missing test for ratchet");
1768 let pth = dpth.path().join("ratchet.json");
1770 let mut m1 = MetricMap::new();
1771 m1.insert_metric("runtime", 1000.0, 2.0);
1772 m1.insert_metric("throughput", 50.0, 2.0);
1774 let mut m2 = MetricMap::new();
1775 m2.insert_metric("runtime", 1100.0, 2.0);
1776 m2.insert_metric("throughput", 50.0, 2.0);
1778 m1.save(&pth).unwrap();
1780 // Ask for a ratchet that should fail to advance.
1781 let (diff1, ok1) = m2.ratchet(&pth, None);
1782 assert_eq!(ok1, false);
1783 assert_eq!(diff1.len(), 2);
1784 assert_eq!(*(diff1.get(&"runtime".to_string()).unwrap()), Regression(10.0));
1785 assert_eq!(*(diff1.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1787 // Check that it was not rewritten.
1788 let m3 = MetricMap::load(&pth);
1789 let MetricMap(m3) = m3;
1790 assert_eq!(m3.len(), 2);
1791 assert_eq!(*(m3.get(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1792 assert_eq!(*(m3.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1794 // Ask for a ratchet with an explicit noise-percentage override,
1795 // that should advance.
1796 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1797 assert_eq!(ok2, true);
1798 assert_eq!(diff2.len(), 2);
1799 assert_eq!(*(diff2.get(&"runtime".to_string()).unwrap()), LikelyNoise);
1800 assert_eq!(*(diff2.get(&"throughput".to_string()).unwrap()), LikelyNoise);
1802 // Check that it was rewritten.
1803 let m4 = MetricMap::load(&pth);
1804 let MetricMap(m4) = m4;
1805 assert_eq!(m4.len(), 2);
1806 assert_eq!(*(m4.get(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1807 assert_eq!(*(m4.get(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));