1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `BenchHarness` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #[crate_id = "test#0.10-pre"];
27 #[comment = "Rust internal test library only used by rustc"];
28 #[license = "MIT/ASL2"];
29 #[crate_type = "rlib"];
30 #[crate_type = "dylib"];
31 #[doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://static.rust-lang.org/doc/master")];
35 #[feature(asm, macro_rules)];
36 #[allow(deprecated_owned_vector)]; // NOTE: remove after stage0
38 extern crate collections;
40 extern crate serialize;
44 use collections::TreeMap;
46 use time::precise_time_ns;
47 use getopts::{OptGroup, optflag, optopt};
48 use serialize::{json, Decodable};
49 use serialize::json::ToJson;
51 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
56 use std::from_str::FromStr;
57 use std::io::stdio::StdWriter;
58 use std::io::{File, ChanReader, ChanWriter};
64 // to be used by rustc to compile tests in libtest
66 pub use {BenchHarness, TestName, TestResult, TestDesc,
67 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
68 Metric, MetricMap, MetricAdded, MetricRemoved,
69 MetricChange, Improvement, Regression, LikelyNoise,
70 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
71 run_test, test_main, test_main_static, filter_tests,
72 parse_opts, StaticBenchFn};
77 // The name of a test. By convention this follows the rules for rust
78 // paths; i.e. it should be a series of identifiers separated by double
79 // colons. This way if some test runner wants to arrange the tests
80 // hierarchically it may.
84 StaticTestName(&'static str),
87 impl fmt::Show for TestName {
88 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
90 StaticTestName(s) => f.buf.write_str(s),
91 DynTestName(ref s) => f.buf.write_str(s.as_slice()),
97 enum NamePadding { PadNone, PadOnLeft, PadOnRight }
100 fn padded_name(&self, column_count: uint, align: NamePadding) -> ~str {
101 use std::num::Saturating;
102 let name = self.name.to_str();
103 let fill = column_count.saturating_sub(name.len());
104 let pad = " ".repeat(fill);
107 PadOnLeft => pad.append(name),
108 PadOnRight => name.append(pad),
113 /// Represents a benchmark function.
114 pub trait TDynBenchFn {
115 fn run(&self, harness: &mut BenchHarness);
118 // A function that runs a test. If the function returns successfully,
119 // the test succeeds; if the function fails then the test fails. We
120 // may need to come up with a more clever definition of test in order
121 // to support isolation of tests into tasks.
124 StaticBenchFn(fn(&mut BenchHarness)),
125 StaticMetricFn(proc(&mut MetricMap)),
127 DynMetricFn(proc(&mut MetricMap)),
128 DynBenchFn(~TDynBenchFn)
132 fn padding(&self) -> NamePadding {
134 &StaticTestFn(..) => PadNone,
135 &StaticBenchFn(..) => PadOnRight,
136 &StaticMetricFn(..) => PadOnRight,
137 &DynTestFn(..) => PadNone,
138 &DynMetricFn(..) => PadOnRight,
139 &DynBenchFn(..) => PadOnRight,
144 /// Manager of the benchmarking runs.
146 /// This is feed into functions marked with `#[bench]` to allow for
147 /// set-up & tear-down before running a piece of code repeatedly via a
149 pub struct BenchHarness {
150 priv iterations: u64,
156 // The definition of a single test. A test runner will run a list of
159 pub struct TestDesc {
165 pub struct TestDescAndFn {
170 #[deriving(Clone, Encodable, Decodable, Eq, Show)]
177 pub fn new(value: f64, noise: f64) -> Metric {
178 Metric {value: value, noise: noise}
183 pub struct MetricMap(TreeMap<~str,Metric>);
185 impl Clone for MetricMap {
186 fn clone(&self) -> MetricMap {
187 let MetricMap(ref map) = *self;
188 MetricMap(map.clone())
192 /// Analysis of a single change in metric
193 #[deriving(Eq, Show)]
194 pub enum MetricChange {
202 pub type MetricDiff = TreeMap<~str,MetricChange>;
204 // The default console test runner. It accepts the command line
205 // arguments and a vector of test_descs.
206 pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
208 match parse_opts(args) {
210 Some(Err(msg)) => fail!("{}", msg),
213 match run_tests_console(&opts, tests) {
215 Ok(false) => fail!("Some tests failed"),
216 Err(e) => fail!("io error when running tests: {}", e),
220 // A variant optimized for invocation with a static test vector.
221 // This will fail (intentionally) when fed any dynamic tests, because
222 // it is copying the static values out into a dynamic vector and cannot
223 // copy dynamic values. It is doing this because from this point on
224 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
225 // semantics into parallel test runners, which in turn requires a ~[]
226 // rather than a &[].
227 pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
228 let owned_tests = tests.map(|t| {
231 TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
234 TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
237 fail!("non-static tests passed to test::test_main_static");
241 test_main(args, owned_tests)
244 pub struct TestOpts {
245 filter: Option<~str>,
248 run_benchmarks: bool,
249 ratchet_metrics: Option<Path>,
250 ratchet_noise_percent: Option<f64>,
251 save_metrics: Option<Path>,
252 test_shard: Option<(uint,uint)>,
253 logfile: Option<Path>
256 /// Result of parsing the options.
257 pub type OptRes = Result<TestOpts, ~str>;
259 fn optgroups() -> ~[getopts::OptGroup] {
260 ~[getopts::optflag("", "ignored", "Run ignored tests"),
261 getopts::optflag("", "test", "Run tests and not benchmarks"),
262 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
263 getopts::optflag("h", "help", "Display this message (longer with --help)"),
264 getopts::optopt("", "save-metrics", "Location to save bench metrics",
266 getopts::optopt("", "ratchet-metrics",
267 "Location to load and save metrics from. The metrics \
268 loaded are cause benchmarks to fail if they run too \
270 getopts::optopt("", "ratchet-noise-percent",
271 "Tests within N% of the recorded metrics will be \
272 considered as passing", "PERCENTAGE"),
273 getopts::optopt("", "logfile", "Write logs to the specified file instead \
275 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
279 fn usage(binary: &str, helpstr: &str) {
280 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
281 println!("{}", getopts::usage(message, optgroups()));
283 if helpstr == "help" {
285 The FILTER is matched against the name of all tests to run, and if any tests
286 have a substring match, only those tests are run.
288 By default, all tests are run in parallel. This can be altered with the
289 RUST_TEST_TASKS environment variable when running tests (set it to 1).
293 #[test] - Indicates a function is a test to be run. This function
295 #[bench] - Indicates a function is a benchmark to be run. This
296 function takes one argument (test::BenchHarness).
297 #[should_fail] - This function (also labeled with #[test]) will only pass if
298 the code causes a failure (an assertion failure or fail!)
299 #[ignore] - When applied to a function which is already attributed as a
300 test, then the test runner will ignore these tests during
301 normal test runs. Running with --ignored will run these
302 tests. This may also be written as #[ignore(cfg(...))] to
303 ignore the test on certain configurations.");
307 // Parses command line arguments into test options
308 pub fn parse_opts(args: &[~str]) -> Option<OptRes> {
309 let args_ = args.tail();
311 match getopts::getopts(args_, optgroups()) {
313 Err(f) => return Some(Err(f.to_err_msg()))
316 if matches.opt_present("h") { usage(args[0], "h"); return None; }
317 if matches.opt_present("help") { usage(args[0], "help"); return None; }
320 if matches.free.len() > 0 {
321 Some((matches).free[0].clone())
326 let run_ignored = matches.opt_present("ignored");
328 let logfile = matches.opt_str("logfile");
329 let logfile = logfile.map(|s| Path::new(s));
331 let run_benchmarks = matches.opt_present("bench");
332 let run_tests = ! run_benchmarks ||
333 matches.opt_present("test");
335 let ratchet_metrics = matches.opt_str("ratchet-metrics");
336 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
338 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
339 let ratchet_noise_percent = ratchet_noise_percent.map(|s| from_str::<f64>(s).unwrap());
341 let save_metrics = matches.opt_str("save-metrics");
342 let save_metrics = save_metrics.map(|s| Path::new(s));
344 let test_shard = matches.opt_str("test-shard");
345 let test_shard = opt_shard(test_shard);
347 let test_opts = TestOpts {
349 run_ignored: run_ignored,
350 run_tests: run_tests,
351 run_benchmarks: run_benchmarks,
352 ratchet_metrics: ratchet_metrics,
353 ratchet_noise_percent: ratchet_noise_percent,
354 save_metrics: save_metrics,
355 test_shard: test_shard,
362 pub fn opt_shard(maybestr: Option<~str>) -> Option<(uint,uint)> {
366 let vector = s.split('.').to_owned_vec();
367 if vector.len() == 2 {
368 match (from_str::<uint>(vector[0]),
369 from_str::<uint>(vector[1])) {
370 (Some(a), Some(b)) => Some((a, b)),
381 #[deriving(Clone, Eq)]
382 pub struct BenchSamples {
383 priv ns_iter_summ: stats::Summary,
387 #[deriving(Clone, Eq)]
388 pub enum TestResult {
392 TrMetrics(MetricMap),
393 TrBench(BenchSamples),
396 enum OutputLocation<T> {
397 Pretty(term::Terminal<T>),
401 struct ConsoleTestState<T> {
402 log_out: Option<File>,
403 out: OutputLocation<T>,
411 failures: ~[(TestDesc, ~[u8])],
412 max_name_len: uint, // number of columns to fill when aligning names
415 impl<T: Writer> ConsoleTestState<T> {
416 pub fn new(opts: &TestOpts,
417 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
418 let log_out = match opts.logfile {
419 Some(ref path) => Some(try!(File::create(path))),
422 let out = match term::Terminal::new(io::stdio::stdout_raw()) {
423 Err(_) => Raw(io::stdio::stdout_raw()),
426 Ok(ConsoleTestState {
429 use_color: use_color(),
435 metrics: MetricMap::new(),
441 pub fn write_ok(&mut self) -> io::IoResult<()> {
442 self.write_pretty("ok", term::color::GREEN)
445 pub fn write_failed(&mut self) -> io::IoResult<()> {
446 self.write_pretty("FAILED", term::color::RED)
449 pub fn write_ignored(&mut self) -> io::IoResult<()> {
450 self.write_pretty("ignored", term::color::YELLOW)
453 pub fn write_metric(&mut self) -> io::IoResult<()> {
454 self.write_pretty("metric", term::color::CYAN)
457 pub fn write_bench(&mut self) -> io::IoResult<()> {
458 self.write_pretty("bench", term::color::CYAN)
461 pub fn write_added(&mut self) -> io::IoResult<()> {
462 self.write_pretty("added", term::color::GREEN)
465 pub fn write_improved(&mut self) -> io::IoResult<()> {
466 self.write_pretty("improved", term::color::GREEN)
469 pub fn write_removed(&mut self) -> io::IoResult<()> {
470 self.write_pretty("removed", term::color::YELLOW)
473 pub fn write_regressed(&mut self) -> io::IoResult<()> {
474 self.write_pretty("regressed", term::color::RED)
477 pub fn write_pretty(&mut self,
479 color: term::color::Color) -> io::IoResult<()> {
481 Pretty(ref mut term) => {
483 try!(term.fg(color));
485 try!(term.write(word.as_bytes()));
491 Raw(ref mut stdout) => stdout.write(word.as_bytes())
495 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
497 Pretty(ref mut term) => term.write(s.as_bytes()),
498 Raw(ref mut stdout) => stdout.write(s.as_bytes())
502 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
504 let noun = if len != 1 { &"tests" } else { &"test" };
505 self.write_plain(format!("\nrunning {} {}\n", len, noun))
508 pub fn write_test_start(&mut self, test: &TestDesc,
509 align: NamePadding) -> io::IoResult<()> {
510 let name = test.padded_name(self.max_name_len, align);
511 self.write_plain(format!("test {} ... ", name))
514 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
516 TrOk => self.write_ok(),
517 TrFailed => self.write_failed(),
518 TrIgnored => self.write_ignored(),
519 TrMetrics(ref mm) => {
520 try!(self.write_metric());
521 self.write_plain(format!(": {}", fmt_metrics(mm)))
524 try!(self.write_bench());
525 self.write_plain(format!(": {}", fmt_bench_samples(bs)))
528 self.write_plain("\n")
531 pub fn write_log(&mut self, test: &TestDesc,
532 result: &TestResult) -> io::IoResult<()> {
536 let s = format!("{} {}\n", match *result {
538 TrFailed => ~"failed",
539 TrIgnored => ~"ignored",
540 TrMetrics(ref mm) => fmt_metrics(mm),
541 TrBench(ref bs) => fmt_bench_samples(bs)
542 }, test.name.to_str());
543 o.write(s.as_bytes())
548 pub fn write_failures(&mut self) -> io::IoResult<()> {
549 try!(self.write_plain("\nfailures:\n"));
550 let mut failures = ~[];
551 let mut fail_out = ~"";
552 for &(ref f, ref stdout) in self.failures.iter() {
553 failures.push(f.name.to_str());
554 if stdout.len() > 0 {
555 fail_out.push_str(format!("---- {} stdout ----\n\t",
557 let output = str::from_utf8_lossy(*stdout);
558 fail_out.push_str(output.as_slice().replace("\n", "\n\t"));
559 fail_out.push_str("\n");
562 if fail_out.len() > 0 {
563 try!(self.write_plain("\n"));
564 try!(self.write_plain(fail_out));
567 try!(self.write_plain("\nfailures:\n"));
569 for name in failures.iter() {
570 try!(self.write_plain(format!(" {}\n", name.to_str())));
575 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
577 let mut improved = 0;
578 let mut regressed = 0;
582 for (k, v) in diff.iter() {
584 LikelyNoise => noise += 1,
587 try!(self.write_added());
588 try!(self.write_plain(format!(": {}\n", *k)));
592 try!(self.write_removed());
593 try!(self.write_plain(format!(": {}\n", *k)));
595 Improvement(pct) => {
597 try!(self.write_plain(format!(": {}", *k)));
598 try!(self.write_improved());
599 try!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
603 try!(self.write_plain(format!(": {}", *k)));
604 try!(self.write_regressed());
605 try!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
609 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
610 {} removed, {} improved, {} regressed, \
612 added, removed, improved, regressed,
615 try!(self.write_plain("updated ratchet file\n"));
617 try!(self.write_plain("left ratchet file untouched\n"));
622 pub fn write_run_finish(&mut self,
623 ratchet_metrics: &Option<Path>,
624 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
625 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
627 let ratchet_success = match *ratchet_metrics {
630 try!(self.write_plain(format!("\nusing metrics ratchet: {}\n",
635 try!(self.write_plain(format!("with noise-tolerance \
639 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
640 try!(self.write_metric_diff(&diff));
645 let test_success = self.failed == 0u;
647 try!(self.write_failures());
650 let success = ratchet_success && test_success;
652 try!(self.write_plain("\ntest result: "));
654 // There's no parallelism at this point so it's safe to use color
655 try!(self.write_ok());
657 try!(self.write_failed());
659 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
660 self.passed, self.failed, self.ignored, self.measured);
661 try!(self.write_plain(s));
666 pub fn fmt_metrics(mm: &MetricMap) -> ~str {
667 let MetricMap(ref mm) = *mm;
668 let v : ~[~str] = mm.iter()
669 .map(|(k,v)| format!("{}: {} (+/- {})",
677 pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
679 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
680 bs.ns_iter_summ.median as uint,
681 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
684 format!("{:>9} ns/iter (+/- {})",
685 bs.ns_iter_summ.median as uint,
686 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
690 // A simple console test runner
691 pub fn run_tests_console(opts: &TestOpts,
692 tests: ~[TestDescAndFn]) -> io::IoResult<bool> {
693 fn callback<T: Writer>(event: &TestEvent,
694 st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
695 match (*event).clone() {
696 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
697 TeWait(ref test, padding) => st.write_test_start(test, padding),
698 TeResult(test, result, stdout) => {
699 try!(st.write_log(&test, &result));
700 try!(st.write_result(&result));
702 TrOk => st.passed += 1,
703 TrIgnored => st.ignored += 1,
705 let tname = test.name.to_str();
706 let MetricMap(mm) = mm;
707 for (k,v) in mm.iter() {
708 st.metrics.insert_metric(tname + "." + *k,
714 st.metrics.insert_metric(test.name.to_str(),
715 bs.ns_iter_summ.median,
716 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
721 st.failures.push((test, stdout));
728 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
729 fn len_if_padded(t: &TestDescAndFn) -> uint {
730 match t.testfn.padding() {
732 PadOnLeft | PadOnRight => t.desc.name.to_str().len(),
735 match tests.iter().max_by(|t|len_if_padded(*t)) {
737 let n = t.desc.name.to_str();
738 st.max_name_len = n.len();
742 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
743 match opts.save_metrics {
746 try!(st.metrics.save(pth));
747 try!(st.write_plain(format!("\nmetrics saved to: {}",
751 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
755 fn should_sort_failures_before_printing_them() {
756 use std::io::MemWriter;
759 let test_a = TestDesc {
760 name: StaticTestName("a"),
765 let test_b = TestDesc {
766 name: StaticTestName("b"),
771 let mut st = ConsoleTestState {
773 out: Raw(MemWriter::new()),
781 metrics: MetricMap::new(),
782 failures: ~[(test_b, ~[]), (test_a, ~[])]
785 st.write_failures().unwrap();
786 let s = match st.out {
787 Raw(ref m) => str::from_utf8_lossy(m.get_ref()),
788 Pretty(_) => unreachable!()
791 let apos = s.as_slice().find_str("a").unwrap();
792 let bpos = s.as_slice().find_str("b").unwrap();
793 assert!(apos < bpos);
796 fn use_color() -> bool { return get_concurrency() == 1; }
800 TeFiltered(~[TestDesc]),
801 TeWait(TestDesc, NamePadding),
802 TeResult(TestDesc, TestResult, ~[u8] /* stdout */),
805 pub type MonitorMsg = (TestDesc, TestResult, ~[u8] /* stdout */);
807 fn run_tests(opts: &TestOpts,
808 tests: ~[TestDescAndFn],
809 callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
810 let filtered_tests = filter_tests(opts, tests);
811 let filtered_descs = filtered_tests.map(|t| t.desc.clone());
813 try!(callback(TeFiltered(filtered_descs)));
815 let (filtered_tests, filtered_benchs_and_metrics) =
816 filtered_tests.partition(|e| {
818 StaticTestFn(_) | DynTestFn(_) => true,
823 // It's tempting to just spawn all the tests at once, but since we have
824 // many tests that run in other processes we would be making a big mess.
825 let concurrency = get_concurrency();
827 let mut remaining = filtered_tests;
831 let (tx, rx) = channel::<MonitorMsg>();
833 while pending > 0 || !remaining.is_empty() {
834 while pending < concurrency && !remaining.is_empty() {
835 let test = remaining.pop().unwrap();
836 if concurrency == 1 {
837 // We are doing one test at a time so we can print the name
838 // of the test before we run it. Useful for debugging tests
839 // that hang forever.
840 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
842 run_test(!opts.run_tests, test, tx.clone());
846 let (desc, result, stdout) = rx.recv();
847 if concurrency != 1 {
848 try!(callback(TeWait(desc.clone(), PadNone)));
850 try!(callback(TeResult(desc, result, stdout)));
854 // All benchmarks run at the end, in serial.
855 // (this includes metric fns)
856 for b in filtered_benchs_and_metrics.move_iter() {
857 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
858 run_test(!opts.run_benchmarks, b, tx.clone());
859 let (test, result, stdout) = rx.recv();
860 try!(callback(TeResult(test, result, stdout)));
865 fn get_concurrency() -> uint {
867 match os::getenv("RUST_TEST_TASKS") {
869 let opt_n: Option<uint> = FromStr::from_str(s);
871 Some(n) if n > 0 => n,
872 _ => fail!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
876 rt::default_sched_threads()
883 tests: ~[TestDescAndFn]) -> ~[TestDescAndFn]
885 let mut filtered = tests;
887 // Remove tests that don't match the test filter
888 filtered = if opts.filter.is_none() {
891 let filter_str = match opts.filter {
892 Some(ref f) => (*f).clone(),
896 fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
897 Option<TestDescAndFn> {
898 if test.desc.name.to_str().contains(filter_str) {
905 filtered.move_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
908 // Maybe pull out the ignored test and unignore them
909 filtered = if !opts.run_ignored {
912 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
913 if test.desc.ignore {
914 let TestDescAndFn {desc, testfn} = test;
916 desc: TestDesc {ignore: false, ..desc},
923 filtered.move_iter().filter_map(|x| filter(x)).collect()
926 // Sort the tests alphabetically
927 filtered.sort_by(|t1, t2| t1.desc.name.to_str().cmp(&t2.desc.name.to_str()));
929 // Shard the remaining tests, if sharding requested.
930 match opts.test_shard {
933 filtered.move_iter().enumerate()
934 .filter(|&(i,_)| i % b == a)
940 pub fn run_test(force_ignore: bool,
942 monitor_ch: Sender<MonitorMsg>) {
944 let TestDescAndFn {desc, testfn} = test;
946 if force_ignore || desc.ignore {
947 monitor_ch.send((desc, TrIgnored, ~[]));
951 fn run_test_inner(desc: TestDesc,
952 monitor_ch: Sender<MonitorMsg>,
955 let (tx, rx) = channel();
956 let mut reader = ChanReader::new(rx);
957 let stdout = ChanWriter::new(tx.clone());
958 let stderr = ChanWriter::new(tx);
959 let mut task = task::task().named(match desc.name {
960 DynTestName(ref name) => name.clone().into_maybe_owned(),
961 StaticTestName(name) => name.into_maybe_owned(),
963 task.opts.stdout = Some(~stdout as ~Writer);
964 task.opts.stderr = Some(~stderr as ~Writer);
965 let result_future = task.future_result();
968 let stdout = reader.read_to_end().unwrap();
969 let task_result = result_future.recv();
970 let test_result = calc_result(&desc, task_result.is_ok());
971 monitor_ch.send((desc.clone(), test_result, stdout));
976 DynBenchFn(bencher) => {
977 let bs = ::bench::benchmark(|harness| bencher.run(harness));
978 monitor_ch.send((desc, TrBench(bs), ~[]));
981 StaticBenchFn(benchfn) => {
982 let bs = ::bench::benchmark(|harness| benchfn(harness));
983 monitor_ch.send((desc, TrBench(bs), ~[]));
987 let mut mm = MetricMap::new();
989 monitor_ch.send((desc, TrMetrics(mm), ~[]));
992 StaticMetricFn(f) => {
993 let mut mm = MetricMap::new();
995 monitor_ch.send((desc, TrMetrics(mm), ~[]));
998 DynTestFn(f) => run_test_inner(desc, monitor_ch, f),
999 StaticTestFn(f) => run_test_inner(desc, monitor_ch, proc() f())
1003 fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
1005 if desc.should_fail { TrFailed }
1008 if desc.should_fail { TrOk }
1014 impl ToJson for Metric {
1015 fn to_json(&self) -> json::Json {
1016 let mut map = ~TreeMap::new();
1017 map.insert(~"value", json::Number(self.value));
1018 map.insert(~"noise", json::Number(self.noise));
1025 pub fn new() -> MetricMap {
1026 MetricMap(TreeMap::new())
1029 /// Load MetricDiff from a file.
1033 /// This function will fail if the path does not exist or the path does not
1034 /// contain a valid metric map.
1035 pub fn load(p: &Path) -> MetricMap {
1036 assert!(p.exists());
1037 let mut f = File::open(p).unwrap();
1038 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1039 let mut decoder = json::Decoder::new(value);
1040 MetricMap(Decodable::decode(&mut decoder))
1043 /// Write MetricDiff to a file.
1044 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1045 let mut file = try!(File::create(p));
1046 let MetricMap(ref map) = *self;
1047 map.to_json().to_pretty_writer(&mut file)
1050 /// Compare against another MetricMap. Optionally compare all
1051 /// measurements in the maps using the provided `noise_pct` as a
1052 /// percentage of each value to consider noise. If `None`, each
1053 /// measurement's noise threshold is independently chosen as the
1054 /// maximum of that measurement's recorded noise quantity in either
1056 pub fn compare_to_old(&self, old: &MetricMap,
1057 noise_pct: Option<f64>) -> MetricDiff {
1058 let mut diff : MetricDiff = TreeMap::new();
1059 let MetricMap(ref selfmap) = *self;
1060 let MetricMap(ref old) = *old;
1061 for (k, vold) in old.iter() {
1062 let r = match selfmap.find(k) {
1063 None => MetricRemoved,
1065 let delta = v.value - vold.value;
1066 let noise = match noise_pct {
1067 None => vold.noise.abs().max(v.noise.abs()),
1068 Some(pct) => vold.value * pct / 100.0
1070 if delta.abs() <= noise {
1073 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1074 if vold.noise < 0.0 {
1075 // When 'noise' is negative, it means we want
1076 // to see deltas that go up over time, and can
1077 // only tolerate slight negative movement.
1084 // When 'noise' is positive, it means we want
1085 // to see deltas that go down over time, and
1086 // can only tolerate slight positive movements.
1096 diff.insert((*k).clone(), r);
1098 let MetricMap(ref map) = *self;
1099 for (k, _) in map.iter() {
1100 if !diff.contains_key(k) {
1101 diff.insert((*k).clone(), MetricAdded);
1107 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1108 /// must be non-negative. The `noise` indicates the uncertainty of the
1109 /// metric, which doubles as the "noise range" of acceptable
1110 /// pairwise-regressions on this named value, when comparing from one
1111 /// metric to the next using `compare_to_old`.
1113 /// If `noise` is positive, then it means this metric is of a value
1114 /// you want to see grow smaller, so a change larger than `noise` in the
1115 /// positive direction represents a regression.
1117 /// If `noise` is negative, then it means this metric is of a value
1118 /// you want to see grow larger, so a change larger than `noise` in the
1119 /// negative direction represents a regression.
1120 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1125 let MetricMap(ref mut map) = *self;
1126 map.insert(name.to_owned(), m);
1129 /// Attempt to "ratchet" an external metric file. This involves loading
1130 /// metrics from a metric file (if it exists), comparing against
1131 /// the metrics in `self` using `compare_to_old`, and rewriting the
1132 /// file to contain the metrics in `self` if none of the
1133 /// `MetricChange`s are `Regression`. Returns the diff as well
1134 /// as a boolean indicating whether the ratchet succeeded.
1135 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1136 let old = if p.exists() {
1142 let diff : MetricDiff = self.compare_to_old(&old, pct);
1143 let ok = diff.iter().all(|(_, v)| {
1145 Regression(_) => false,
1151 self.save(p).unwrap();
1160 /// A function that is opaque to the optimizer, to allow benchmarks to
1161 /// pretend to use outputs to assist in avoiding dead-code
1164 /// This function is a no-op, and does not even read from `dummy`.
1165 pub fn black_box<T>(dummy: T) {
1166 // we need to "use" the argument in some way LLVM can't
1168 unsafe {asm!("" : : "r"(&dummy))}
1173 /// Callback for benchmark functions to run in their body.
1174 pub fn iter<T>(&mut self, inner: || -> T) {
1175 self.ns_start = precise_time_ns();
1176 let k = self.iterations;
1177 for _ in range(0u64, k) {
1180 self.ns_end = precise_time_ns();
1183 pub fn ns_elapsed(&mut self) -> u64 {
1184 if self.ns_start == 0 || self.ns_end == 0 {
1187 self.ns_end - self.ns_start
1191 pub fn ns_per_iter(&mut self) -> u64 {
1192 if self.iterations == 0 {
1195 self.ns_elapsed() / cmp::max(self.iterations, 1)
1199 pub fn bench_n(&mut self, n: u64, f: |&mut BenchHarness|) {
1200 self.iterations = n;
1204 // This is a more statistics-driven benchmark algorithm
1205 pub fn auto_bench(&mut self, f: |&mut BenchHarness|) -> stats::Summary {
1207 // Initial bench run to get ballpark figure.
1209 self.bench_n(n, |x| f(x));
1211 // Try to estimate iter count for 1ms falling back to 1m
1212 // iterations if first run took < 1ns.
1213 if self.ns_per_iter() == 0 {
1216 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1218 // if the first run took more than 1ms we don't want to just
1219 // be left doing 0 iterations on every loop. The unfortunate
1220 // side effect of not being able to do as many runs is
1221 // automatically handled by the statistical analysis below
1222 // (i.e. larger error bars).
1223 if n == 0 { n = 1; }
1225 let mut total_run = 0;
1226 let samples : &mut [f64] = [0.0_f64, ..50];
1228 let loop_start = precise_time_ns();
1230 for p in samples.mut_iter() {
1231 self.bench_n(n, |x| f(x));
1232 *p = self.ns_per_iter() as f64;
1235 stats::winsorize(samples, 5.0);
1236 let summ = stats::Summary::new(samples);
1238 for p in samples.mut_iter() {
1239 self.bench_n(5 * n, |x| f(x));
1240 *p = self.ns_per_iter() as f64;
1243 stats::winsorize(samples, 5.0);
1244 let summ5 = stats::Summary::new(samples);
1246 let now = precise_time_ns();
1247 let loop_run = now - loop_start;
1249 // If we've run for 100ms and seem to have converged to a
1251 if loop_run > 100_000_000 &&
1252 summ.median_abs_dev_pct < 1.0 &&
1253 summ.median - summ5.median < summ5.median_abs_dev {
1257 total_run += loop_run;
1258 // Longest we ever run for is 3s.
1259 if total_run > 3_000_000_000 {
1270 use super::{BenchHarness, BenchSamples};
1272 pub fn benchmark(f: |&mut BenchHarness|) -> BenchSamples {
1273 let mut bs = BenchHarness {
1280 let ns_iter_summ = bs.auto_bench(f);
1282 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1283 let iter_s = 1_000_000_000 / ns_iter;
1284 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1287 ns_iter_summ: ns_iter_summ,
1295 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1296 TestDesc, TestDescAndFn, TestOpts, run_test,
1297 Metric, MetricMap, MetricAdded, MetricRemoved,
1298 Improvement, Regression, LikelyNoise,
1299 StaticTestName, DynTestName, DynTestFn};
1300 use std::io::TempDir;
1303 pub fn do_not_run_ignored_tests() {
1305 let desc = TestDescAndFn {
1307 name: StaticTestName("whatever"),
1311 testfn: DynTestFn(proc() f()),
1313 let (tx, rx) = channel();
1314 run_test(false, desc, tx);
1315 let (_, res, _) = rx.recv();
1316 assert!(res != TrOk);
1320 pub fn ignored_tests_result_in_ignored() {
1322 let desc = TestDescAndFn {
1324 name: StaticTestName("whatever"),
1328 testfn: DynTestFn(proc() f()),
1330 let (tx, rx) = channel();
1331 run_test(false, desc, tx);
1332 let (_, res, _) = rx.recv();
1333 assert!(res == TrIgnored);
1337 fn test_should_fail() {
1339 let desc = TestDescAndFn {
1341 name: StaticTestName("whatever"),
1345 testfn: DynTestFn(proc() f()),
1347 let (tx, rx) = channel();
1348 run_test(false, desc, tx);
1349 let (_, res, _) = rx.recv();
1350 assert!(res == TrOk);
1354 fn test_should_fail_but_succeeds() {
1356 let desc = TestDescAndFn {
1358 name: StaticTestName("whatever"),
1362 testfn: DynTestFn(proc() f()),
1364 let (tx, rx) = channel();
1365 run_test(false, desc, tx);
1366 let (_, res, _) = rx.recv();
1367 assert!(res == TrFailed);
1371 fn first_free_arg_should_be_a_filter() {
1372 let args = ~[~"progname", ~"filter"];
1373 let opts = match parse_opts(args) {
1375 _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
1377 assert!("filter" == opts.filter.clone().unwrap());
1381 fn parse_ignored_flag() {
1382 let args = ~[~"progname", ~"filter", ~"--ignored"];
1383 let opts = match parse_opts(args) {
1385 _ => fail!("Malformed arg in parse_ignored_flag")
1387 assert!((opts.run_ignored));
1391 pub fn filter_for_ignored_option() {
1392 // When we run ignored tests the test filter should filter out all the
1393 // unignored tests and flip the ignore flag on the rest to false
1395 let opts = TestOpts {
1400 run_benchmarks: false,
1401 ratchet_noise_percent: None,
1402 ratchet_metrics: None,
1410 name: StaticTestName("1"),
1414 testfn: DynTestFn(proc() {}),
1418 name: StaticTestName("2"),
1422 testfn: DynTestFn(proc() {}),
1425 let filtered = filter_tests(&opts, tests);
1427 assert_eq!(filtered.len(), 1);
1428 assert_eq!(filtered[0].desc.name.to_str(), ~"1");
1429 assert!(filtered[0].desc.ignore == false);
1433 pub fn sort_tests() {
1434 let opts = TestOpts {
1439 run_benchmarks: false,
1440 ratchet_noise_percent: None,
1441 ratchet_metrics: None,
1447 ~[~"sha1::test", ~"int::test_to_str", ~"int::test_pow",
1448 ~"test::do_not_run_ignored_tests",
1449 ~"test::ignored_tests_result_in_ignored",
1450 ~"test::first_free_arg_should_be_a_filter",
1451 ~"test::parse_ignored_flag", ~"test::filter_for_ignored_option",
1452 ~"test::sort_tests"];
1456 let mut tests = ~[];
1457 for name in names.iter() {
1458 let test = TestDescAndFn {
1460 name: DynTestName((*name).clone()),
1464 testfn: DynTestFn(testfn),
1470 let filtered = filter_tests(&opts, tests);
1473 ~[~"int::test_pow", ~"int::test_to_str", ~"sha1::test",
1474 ~"test::do_not_run_ignored_tests",
1475 ~"test::filter_for_ignored_option",
1476 ~"test::first_free_arg_should_be_a_filter",
1477 ~"test::ignored_tests_result_in_ignored",
1478 ~"test::parse_ignored_flag",
1479 ~"test::sort_tests"];
1481 for (a, b) in expected.iter().zip(filtered.iter()) {
1482 assert!(*a == b.desc.name.to_str());
1487 pub fn test_metricmap_compare() {
1488 let mut m1 = MetricMap::new();
1489 let mut m2 = MetricMap::new();
1490 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1491 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1493 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1494 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1496 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1497 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1499 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1500 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1502 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1503 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1505 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1506 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1508 let diff1 = m2.compare_to_old(&m1, None);
1510 assert_eq!(*(diff1.find(&~"in-both-noise").unwrap()), LikelyNoise);
1511 assert_eq!(*(diff1.find(&~"in-first-noise").unwrap()), MetricRemoved);
1512 assert_eq!(*(diff1.find(&~"in-second-noise").unwrap()), MetricAdded);
1513 assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").unwrap()),
1515 assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").unwrap()),
1517 assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").unwrap()),
1519 assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").unwrap()),
1520 Improvement(100.0));
1521 assert_eq!(diff1.len(), 7);
1523 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1525 assert_eq!(*(diff2.find(&~"in-both-noise").unwrap()), LikelyNoise);
1526 assert_eq!(*(diff2.find(&~"in-first-noise").unwrap()), MetricRemoved);
1527 assert_eq!(*(diff2.find(&~"in-second-noise").unwrap()), MetricAdded);
1528 assert_eq!(*(diff2.find(&~"in-both-want-downwards-but-regressed").unwrap()), LikelyNoise);
1529 assert_eq!(*(diff2.find(&~"in-both-want-downwards-and-improved").unwrap()), LikelyNoise);
1530 assert_eq!(*(diff2.find(&~"in-both-want-upwards-but-regressed").unwrap()), LikelyNoise);
1531 assert_eq!(*(diff2.find(&~"in-both-want-upwards-and-improved").unwrap()), LikelyNoise);
1532 assert_eq!(diff2.len(), 7);
1536 pub fn ratchet_test() {
1538 let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
1539 let pth = dpth.path().join("ratchet.json");
1541 let mut m1 = MetricMap::new();
1542 m1.insert_metric("runtime", 1000.0, 2.0);
1543 m1.insert_metric("throughput", 50.0, 2.0);
1545 let mut m2 = MetricMap::new();
1546 m2.insert_metric("runtime", 1100.0, 2.0);
1547 m2.insert_metric("throughput", 50.0, 2.0);
1549 m1.save(&pth).unwrap();
1551 // Ask for a ratchet that should fail to advance.
1552 let (diff1, ok1) = m2.ratchet(&pth, None);
1553 assert_eq!(ok1, false);
1554 assert_eq!(diff1.len(), 2);
1555 assert_eq!(*(diff1.find(&~"runtime").unwrap()), Regression(10.0));
1556 assert_eq!(*(diff1.find(&~"throughput").unwrap()), LikelyNoise);
1558 // Check that it was not rewritten.
1559 let m3 = MetricMap::load(&pth);
1560 let MetricMap(m3) = m3;
1561 assert_eq!(m3.len(), 2);
1562 assert_eq!(*(m3.find(&~"runtime").unwrap()), Metric::new(1000.0, 2.0));
1563 assert_eq!(*(m3.find(&~"throughput").unwrap()), Metric::new(50.0, 2.0));
1565 // Ask for a ratchet with an explicit noise-percentage override,
1566 // that should advance.
1567 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1568 assert_eq!(ok2, true);
1569 assert_eq!(diff2.len(), 2);
1570 assert_eq!(*(diff2.find(&~"runtime").unwrap()), LikelyNoise);
1571 assert_eq!(*(diff2.find(&~"throughput").unwrap()), LikelyNoise);
1573 // Check that it was rewritten.
1574 let m4 = MetricMap::load(&pth);
1575 let MetricMap(m4) = m4;
1576 assert_eq!(m4.len(), 2);
1577 assert_eq!(*(m4.find(&~"runtime").unwrap()), Metric::new(1100.0, 2.0));
1578 assert_eq!(*(m4.find(&~"throughput").unwrap()), Metric::new(50.0, 2.0));