1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_id = "test#0.11.0-pre"]
27 #![comment = "Rust internal test library only used by rustc"]
28 #![license = "MIT/ASL2"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/")]
35 #![feature(asm, macro_rules, phase)]
36 #![deny(deprecated_owned_vector)]
38 extern crate collections;
41 extern crate serialize;
45 use collections::TreeMap;
47 use time::precise_time_ns;
48 use getopts::{OptGroup, optflag, optopt};
50 use serialize::{json, Decodable};
51 use serialize::json::{Json, ToJson};
53 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
59 use std::from_str::FromStr;
60 use std::io::stdio::StdWriter;
61 use std::io::{File, ChanReader, ChanWriter};
65 use std::string::String;
66 use std::task::TaskBuilder;
68 // to be used by rustc to compile tests in libtest
70 pub use {Bencher, TestName, TestResult, TestDesc,
71 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
72 Metric, MetricMap, MetricAdded, MetricRemoved,
73 MetricChange, Improvement, Regression, LikelyNoise,
74 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
75 run_test, test_main, test_main_static, filter_tests,
76 parse_opts, StaticBenchFn, test_main_static_x};
81 // The name of a test. By convention this follows the rules for rust
82 // paths; i.e. it should be a series of identifiers separated by double
83 // colons. This way if some test runner wants to arrange the tests
84 // hierarchically it may.
86 #[deriving(Clone, Eq, TotalEq, Hash)]
88 StaticTestName(&'static str),
92 fn as_slice<'a>(&'a self) -> &'a str {
94 StaticTestName(s) => s,
95 DynTestName(ref s) => s.as_slice()
99 impl Show for TestName {
100 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
101 self.as_slice().fmt(f)
106 enum NamePadding { PadNone, PadOnLeft, PadOnRight }
109 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
110 use std::num::Saturating;
111 let mut name = String::from_str(self.name.as_slice());
112 let fill = column_count.saturating_sub(name.len());
113 let mut pad = " ".repeat(fill);
117 pad.push_str(name.as_slice());
121 name.push_str(pad.as_slice());
128 /// Represents a benchmark function.
129 pub trait TDynBenchFn {
130 fn run(&self, harness: &mut Bencher);
133 // A function that runs a test. If the function returns successfully,
134 // the test succeeds; if the function fails then the test fails. We
135 // may need to come up with a more clever definition of test in order
136 // to support isolation of tests into tasks.
139 StaticBenchFn(fn(&mut Bencher)),
140 StaticMetricFn(proc(&mut MetricMap)),
141 DynTestFn(proc():Send),
142 DynMetricFn(proc(&mut MetricMap)),
143 DynBenchFn(Box<TDynBenchFn>)
147 fn padding(&self) -> NamePadding {
149 &StaticTestFn(..) => PadNone,
150 &StaticBenchFn(..) => PadOnRight,
151 &StaticMetricFn(..) => PadOnRight,
152 &DynTestFn(..) => PadNone,
153 &DynMetricFn(..) => PadOnRight,
154 &DynBenchFn(..) => PadOnRight,
159 impl fmt::Show for TestFn {
160 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
161 f.write(match *self {
162 StaticTestFn(..) => "StaticTestFn(..)",
163 StaticBenchFn(..) => "StaticBenchFn(..)",
164 StaticMetricFn(..) => "StaticMetricFn(..)",
165 DynTestFn(..) => "DynTestFn(..)",
166 DynMetricFn(..) => "DynMetricFn(..)",
167 DynBenchFn(..) => "DynBenchFn(..)"
172 /// Manager of the benchmarking runs.
174 /// This is feed into functions marked with `#[bench]` to allow for
175 /// set-up & tear-down before running a piece of code repeatedly via a
184 // The definition of a single test. A test runner will run a list of
186 #[deriving(Clone, Show, Eq, TotalEq, Hash)]
187 pub struct TestDesc {
190 pub should_fail: bool,
194 pub struct TestDescAndFn {
199 #[deriving(Clone, Encodable, Decodable, Eq, Show)]
206 pub fn new(value: f64, noise: f64) -> Metric {
207 Metric {value: value, noise: noise}
212 pub struct MetricMap(TreeMap<String,Metric>);
214 impl Clone for MetricMap {
215 fn clone(&self) -> MetricMap {
216 let MetricMap(ref map) = *self;
217 MetricMap(map.clone())
221 /// Analysis of a single change in metric
222 #[deriving(Eq, Show)]
223 pub enum MetricChange {
231 pub type MetricDiff = TreeMap<String,MetricChange>;
233 // The default console test runner. It accepts the command line
234 // arguments and a vector of test_descs.
235 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
237 match parse_opts(args) {
239 Some(Err(msg)) => fail!("{}", msg),
242 match run_tests_console(&opts, tests) {
244 Ok(false) => fail!("Some tests failed"),
245 Err(e) => fail!("io error when running tests: {}", e),
249 // A variant optimized for invocation with a static test vector.
250 // This will fail (intentionally) when fed any dynamic tests, because
251 // it is copying the static values out into a dynamic vector and cannot
252 // copy dynamic values. It is doing this because from this point on
253 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
254 // semantics into parallel test runners, which in turn requires a ~[]
255 // rather than a &[].
256 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
257 let owned_tests = tests.iter().map(|t| {
259 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
260 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
261 _ => fail!("non-static tests passed to test::test_main_static")
264 test_main(args, owned_tests)
267 pub fn test_main_static_x(args: &[~str], tests: &[TestDescAndFn]) {
268 test_main_static(args.iter()
269 .map(|x| x.to_string())
275 pub struct TestOpts {
276 pub filter: Option<Regex>,
277 pub run_ignored: bool,
279 pub run_benchmarks: bool,
280 pub ratchet_metrics: Option<Path>,
281 pub ratchet_noise_percent: Option<f64>,
282 pub save_metrics: Option<Path>,
283 pub test_shard: Option<(uint,uint)>,
284 pub logfile: Option<Path>,
290 fn new() -> TestOpts {
295 run_benchmarks: false,
296 ratchet_metrics: None,
297 ratchet_noise_percent: None,
306 /// Result of parsing the options.
307 pub type OptRes = Result<TestOpts, String>;
309 fn optgroups() -> Vec<getopts::OptGroup> {
310 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
311 getopts::optflag("", "test", "Run tests and not benchmarks"),
312 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
313 getopts::optflag("h", "help", "Display this message (longer with --help)"),
314 getopts::optopt("", "save-metrics", "Location to save bench metrics",
316 getopts::optopt("", "ratchet-metrics",
317 "Location to load and save metrics from. The metrics \
318 loaded are cause benchmarks to fail if they run too \
320 getopts::optopt("", "ratchet-noise-percent",
321 "Tests within N% of the recorded metrics will be \
322 considered as passing", "PERCENTAGE"),
323 getopts::optopt("", "logfile", "Write logs to the specified file instead \
325 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
327 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
328 task, allow printing directly"))
331 fn usage(binary: &str) {
332 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
335 The FILTER regex is tested against the name of all tests to run, and
336 only those tests that match are run.
338 By default, all tests are run in parallel. This can be altered with the
339 RUST_TEST_TASKS environment variable when running tests (set it to 1).
341 All tests have their standard output and standard error captured by default.
342 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
343 environment variable. Logging is not captured by default.
347 \#[test] - Indicates a function is a test to be run. This function
349 \#[bench] - Indicates a function is a benchmark to be run. This
350 function takes one argument (test::Bencher).
351 \#[should_fail] - This function (also labeled with \#[test]) will only pass if
352 the code causes a failure (an assertion failure or fail!)
353 \#[ignore] - When applied to a function which is already attributed as a
354 test, then the test runner will ignore these tests during
355 normal test runs. Running with --ignored will run these
356 tests. This may also be written as \#[ignore(cfg(...))] to
357 ignore the test on certain configurations.",
358 usage = getopts::usage(message.as_slice(),
359 optgroups().as_slice()));
362 // Parses command line arguments into test options
363 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
364 let args_ = args.tail();
366 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
368 Err(f) => return Some(Err(f.to_err_msg().to_string()))
371 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
373 let filter = if matches.free.len() > 0 {
374 let s = matches.free.get(0).as_slice();
375 match Regex::new(s) {
377 Err(e) => return Some(Err(format!("could not parse /{}/: {}", s, e)))
383 let run_ignored = matches.opt_present("ignored");
385 let logfile = matches.opt_str("logfile");
386 let logfile = logfile.map(|s| Path::new(s));
388 let run_benchmarks = matches.opt_present("bench");
389 let run_tests = ! run_benchmarks ||
390 matches.opt_present("test");
392 let ratchet_metrics = matches.opt_str("ratchet-metrics");
393 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
395 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
396 let ratchet_noise_percent =
397 ratchet_noise_percent.map(|s| from_str::<f64>(s.as_slice()).unwrap());
399 let save_metrics = matches.opt_str("save-metrics");
400 let save_metrics = save_metrics.map(|s| Path::new(s));
402 let test_shard = matches.opt_str("test-shard");
403 let test_shard = opt_shard(test_shard.map(|x| x.to_string()));
405 let mut nocapture = matches.opt_present("nocapture");
407 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
410 let test_opts = TestOpts {
412 run_ignored: run_ignored,
413 run_tests: run_tests,
414 run_benchmarks: run_benchmarks,
415 ratchet_metrics: ratchet_metrics,
416 ratchet_noise_percent: ratchet_noise_percent,
417 save_metrics: save_metrics,
418 test_shard: test_shard,
420 nocapture: nocapture,
426 pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> {
430 let mut it = s.as_slice().split('.');
431 match (it.next().and_then(from_str::<uint>), it.next().and_then(from_str::<uint>),
433 (Some(a), Some(b), None) => {
435 fail!("tried to run shard {a}.{b}, but {a} is out of bounds \
436 (should be between 1 and {b}", a=a, b=b)
447 #[deriving(Clone, Eq)]
448 pub struct BenchSamples {
449 ns_iter_summ: stats::Summary<f64>,
453 #[deriving(Clone, Eq)]
454 pub enum TestResult {
458 TrMetrics(MetricMap),
459 TrBench(BenchSamples),
462 enum OutputLocation<T> {
463 Pretty(Box<term::Terminal<Box<Writer:Send>>:Send>),
467 struct ConsoleTestState<T> {
468 log_out: Option<File>,
469 out: OutputLocation<T>,
477 failures: Vec<(TestDesc, Vec<u8> )> ,
478 max_name_len: uint, // number of columns to fill when aligning names
481 impl<T: Writer> ConsoleTestState<T> {
482 pub fn new(opts: &TestOpts,
483 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
484 let log_out = match opts.logfile {
485 Some(ref path) => Some(try!(File::create(path))),
488 let out = match term::stdout() {
489 None => Raw(io::stdio::stdout_raw()),
493 Ok(ConsoleTestState {
496 use_color: use_color(),
502 metrics: MetricMap::new(),
503 failures: Vec::new(),
508 pub fn write_ok(&mut self) -> io::IoResult<()> {
509 self.write_pretty("ok", term::color::GREEN)
512 pub fn write_failed(&mut self) -> io::IoResult<()> {
513 self.write_pretty("FAILED", term::color::RED)
516 pub fn write_ignored(&mut self) -> io::IoResult<()> {
517 self.write_pretty("ignored", term::color::YELLOW)
520 pub fn write_metric(&mut self) -> io::IoResult<()> {
521 self.write_pretty("metric", term::color::CYAN)
524 pub fn write_bench(&mut self) -> io::IoResult<()> {
525 self.write_pretty("bench", term::color::CYAN)
528 pub fn write_added(&mut self) -> io::IoResult<()> {
529 self.write_pretty("added", term::color::GREEN)
532 pub fn write_improved(&mut self) -> io::IoResult<()> {
533 self.write_pretty("improved", term::color::GREEN)
536 pub fn write_removed(&mut self) -> io::IoResult<()> {
537 self.write_pretty("removed", term::color::YELLOW)
540 pub fn write_regressed(&mut self) -> io::IoResult<()> {
541 self.write_pretty("regressed", term::color::RED)
544 pub fn write_pretty(&mut self,
546 color: term::color::Color) -> io::IoResult<()> {
548 Pretty(ref mut term) => {
550 try!(term.fg(color));
552 try!(term.write(word.as_bytes()));
558 Raw(ref mut stdout) => stdout.write(word.as_bytes())
562 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
564 Pretty(ref mut term) => term.write(s.as_bytes()),
565 Raw(ref mut stdout) => stdout.write(s.as_bytes())
569 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
571 let noun = if len != 1 { "tests" } else { "test" };
572 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
575 pub fn write_test_start(&mut self, test: &TestDesc,
576 align: NamePadding) -> io::IoResult<()> {
577 let name = test.padded_name(self.max_name_len, align);
578 self.write_plain(format!("test {} ... ", name).as_slice())
581 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
583 TrOk => self.write_ok(),
584 TrFailed => self.write_failed(),
585 TrIgnored => self.write_ignored(),
586 TrMetrics(ref mm) => {
587 try!(self.write_metric());
588 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
591 try!(self.write_bench());
592 self.write_plain(format!(": {}",
593 fmt_bench_samples(bs)).as_slice())
596 self.write_plain("\n")
599 pub fn write_log(&mut self, test: &TestDesc,
600 result: &TestResult) -> io::IoResult<()> {
604 let s = format!("{} {}\n", match *result {
605 TrOk => "ok".to_string(),
606 TrFailed => "failed".to_string(),
607 TrIgnored => "ignored".to_string(),
608 TrMetrics(ref mm) => fmt_metrics(mm),
609 TrBench(ref bs) => fmt_bench_samples(bs)
610 }, test.name.as_slice());
611 o.write(s.as_bytes())
616 pub fn write_failures(&mut self) -> io::IoResult<()> {
617 try!(self.write_plain("\nfailures:\n"));
618 let mut failures = Vec::new();
619 let mut fail_out = String::new();
620 for &(ref f, ref stdout) in self.failures.iter() {
621 failures.push(f.name.to_str());
622 if stdout.len() > 0 {
623 fail_out.push_str(format!("---- {} stdout ----\n\t",
624 f.name.as_slice()).as_slice());
625 let output = str::from_utf8_lossy(stdout.as_slice());
626 fail_out.push_str(output.as_slice()
627 .replace("\n", "\n\t")
629 fail_out.push_str("\n");
632 if fail_out.len() > 0 {
633 try!(self.write_plain("\n"));
634 try!(self.write_plain(fail_out.as_slice()));
637 try!(self.write_plain("\nfailures:\n"));
638 failures.as_mut_slice().sort();
639 for name in failures.iter() {
640 try!(self.write_plain(format!(" {}\n",
641 name.as_slice()).as_slice()));
646 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
648 let mut improved = 0;
649 let mut regressed = 0;
653 for (k, v) in diff.iter() {
655 LikelyNoise => noise += 1,
658 try!(self.write_added());
659 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
663 try!(self.write_removed());
664 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
666 Improvement(pct) => {
668 try!(self.write_plain(format!(": {}", *k).as_slice()));
669 try!(self.write_improved());
670 try!(self.write_plain(format!(" by {:.2f}%\n",
671 pct as f64).as_slice()));
675 try!(self.write_plain(format!(": {}", *k).as_slice()));
676 try!(self.write_regressed());
677 try!(self.write_plain(format!(" by {:.2f}%\n",
678 pct as f64).as_slice()));
682 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
683 {} removed, {} improved, {} regressed, \
685 added, removed, improved, regressed,
688 try!(self.write_plain("updated ratchet file\n"));
690 try!(self.write_plain("left ratchet file untouched\n"));
695 pub fn write_run_finish(&mut self,
696 ratchet_metrics: &Option<Path>,
697 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
698 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
700 let ratchet_success = match *ratchet_metrics {
703 try!(self.write_plain(format!("\nusing metrics ratchet: {}\n",
704 pth.display()).as_slice()));
708 try!(self.write_plain(format!("with noise-tolerance \
712 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
713 try!(self.write_metric_diff(&diff));
718 let test_success = self.failed == 0u;
720 try!(self.write_failures());
723 let success = ratchet_success && test_success;
725 try!(self.write_plain("\ntest result: "));
727 // There's no parallelism at this point so it's safe to use color
728 try!(self.write_ok());
730 try!(self.write_failed());
732 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
733 self.passed, self.failed, self.ignored, self.measured);
734 try!(self.write_plain(s.as_slice()));
739 pub fn fmt_metrics(mm: &MetricMap) -> String {
740 let MetricMap(ref mm) = *mm;
741 let v : Vec<String> = mm.iter()
742 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
743 v.value as f64, v.noise as f64))
745 v.connect(", ").to_string()
748 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
750 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
751 bs.ns_iter_summ.median as uint,
752 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
755 format!("{:>9} ns/iter (+/- {})",
756 bs.ns_iter_summ.median as uint,
757 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
761 // A simple console test runner
762 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
764 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
765 match (*event).clone() {
766 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
767 TeWait(ref test, padding) => st.write_test_start(test, padding),
768 TeResult(test, result, stdout) => {
769 try!(st.write_log(&test, &result));
770 try!(st.write_result(&result));
772 TrOk => st.passed += 1,
773 TrIgnored => st.ignored += 1,
775 let tname = test.name.as_slice();
776 let MetricMap(mm) = mm;
777 for (k,v) in mm.iter() {
779 .insert_metric(format!("{}.{}",
788 st.metrics.insert_metric(test.name.as_slice(),
789 bs.ns_iter_summ.median,
790 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
795 st.failures.push((test, stdout));
803 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
804 fn len_if_padded(t: &TestDescAndFn) -> uint {
805 match t.testfn.padding() {
807 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
810 match tests.iter().max_by(|t|len_if_padded(*t)) {
812 let n = t.desc.name.as_slice();
813 st.max_name_len = n.len();
817 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
818 match opts.save_metrics {
821 try!(st.metrics.save(pth));
822 try!(st.write_plain(format!("\nmetrics saved to: {}",
823 pth.display()).as_slice()));
826 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
830 fn should_sort_failures_before_printing_them() {
831 use std::io::MemWriter;
834 let test_a = TestDesc {
835 name: StaticTestName("a"),
840 let test_b = TestDesc {
841 name: StaticTestName("b"),
846 let mut st = ConsoleTestState {
848 out: Raw(MemWriter::new()),
856 metrics: MetricMap::new(),
857 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
860 st.write_failures().unwrap();
861 let s = match st.out {
862 Raw(ref m) => str::from_utf8_lossy(m.get_ref()),
863 Pretty(_) => unreachable!()
866 let apos = s.as_slice().find_str("a").unwrap();
867 let bpos = s.as_slice().find_str("b").unwrap();
868 assert!(apos < bpos);
871 fn use_color() -> bool { return get_concurrency() == 1; }
875 TeFiltered(Vec<TestDesc> ),
876 TeWait(TestDesc, NamePadding),
877 TeResult(TestDesc, TestResult, Vec<u8> ),
880 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
882 fn run_tests(opts: &TestOpts,
883 tests: Vec<TestDescAndFn> ,
884 callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
885 let filtered_tests = filter_tests(opts, tests);
886 let filtered_descs = filtered_tests.iter()
887 .map(|t| t.desc.clone())
890 try!(callback(TeFiltered(filtered_descs)));
892 let (filtered_tests, filtered_benchs_and_metrics) =
893 filtered_tests.partition(|e| {
895 StaticTestFn(_) | DynTestFn(_) => true,
900 // It's tempting to just spawn all the tests at once, but since we have
901 // many tests that run in other processes we would be making a big mess.
902 let concurrency = get_concurrency();
904 let mut remaining = filtered_tests;
908 let (tx, rx) = channel::<MonitorMsg>();
910 while pending > 0 || !remaining.is_empty() {
911 while pending < concurrency && !remaining.is_empty() {
912 let test = remaining.pop().unwrap();
913 if concurrency == 1 {
914 // We are doing one test at a time so we can print the name
915 // of the test before we run it. Useful for debugging tests
916 // that hang forever.
917 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
919 run_test(opts, !opts.run_tests, test, tx.clone());
923 let (desc, result, stdout) = rx.recv();
924 if concurrency != 1 {
925 try!(callback(TeWait(desc.clone(), PadNone)));
927 try!(callback(TeResult(desc, result, stdout)));
931 // All benchmarks run at the end, in serial.
932 // (this includes metric fns)
933 for b in filtered_benchs_and_metrics.move_iter() {
934 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
935 run_test(opts, !opts.run_benchmarks, b, tx.clone());
936 let (test, result, stdout) = rx.recv();
937 try!(callback(TeResult(test, result, stdout)));
942 fn get_concurrency() -> uint {
944 match os::getenv("RUST_TEST_TASKS") {
946 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
948 Some(n) if n > 0 => n,
949 _ => fail!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
953 rt::default_sched_threads()
958 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
959 let mut filtered = tests;
961 // Remove tests that don't match the test filter
962 filtered = match opts.filter {
966 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
970 // Maybe pull out the ignored test and unignore them
971 filtered = if !opts.run_ignored {
974 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
975 if test.desc.ignore {
976 let TestDescAndFn {desc, testfn} = test;
978 desc: TestDesc {ignore: false, ..desc},
985 filtered.move_iter().filter_map(|x| filter(x)).collect()
988 // Sort the tests alphabetically
989 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(&t2.desc.name.as_slice()));
991 // Shard the remaining tests, if sharding requested.
992 match opts.test_shard {
995 filtered.move_iter().enumerate()
996 // note: using a - 1 so that the valid shards, for example, are
997 // 1.2 and 2.2 instead of 0.2 and 1.2
998 .filter(|&(i,_)| i % b == (a - 1))
1005 pub fn run_test(opts: &TestOpts,
1007 test: TestDescAndFn,
1008 monitor_ch: Sender<MonitorMsg>) {
1010 let TestDescAndFn {desc, testfn} = test;
1012 if force_ignore || desc.ignore {
1013 monitor_ch.send((desc, TrIgnored, Vec::new()));
1017 fn run_test_inner(desc: TestDesc,
1018 monitor_ch: Sender<MonitorMsg>,
1020 testfn: proc():Send) {
1022 let (tx, rx) = channel();
1023 let mut reader = ChanReader::new(rx);
1024 let stdout = ChanWriter::new(tx.clone());
1025 let stderr = ChanWriter::new(tx);
1026 let mut task = TaskBuilder::new().named(match desc.name {
1027 DynTestName(ref name) => name.clone().to_string(),
1028 StaticTestName(name) => name.to_string(),
1031 drop((stdout, stderr));
1033 task.opts.stdout = Some(box stdout as Box<Writer:Send>);
1034 task.opts.stderr = Some(box stderr as Box<Writer:Send>);
1036 let result_future = task.future_result();
1039 let stdout = reader.read_to_end().unwrap().move_iter().collect();
1040 let task_result = result_future.recv();
1041 let test_result = calc_result(&desc, task_result.is_ok());
1042 monitor_ch.send((desc.clone(), test_result, stdout));
1047 DynBenchFn(bencher) => {
1048 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1049 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1052 StaticBenchFn(benchfn) => {
1053 let bs = ::bench::benchmark(|harness| benchfn(harness));
1054 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1058 let mut mm = MetricMap::new();
1060 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1063 StaticMetricFn(f) => {
1064 let mut mm = MetricMap::new();
1066 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1069 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1070 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1075 fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
1077 if desc.should_fail { TrFailed }
1080 if desc.should_fail { TrOk }
1086 impl ToJson for Metric {
1087 fn to_json(&self) -> json::Json {
1088 let mut map = box TreeMap::new();
1089 map.insert("value".to_string(), json::Number(self.value));
1090 map.insert("noise".to_string(), json::Number(self.noise));
1098 pub fn new() -> MetricMap {
1099 MetricMap(TreeMap::new())
1102 /// Load MetricDiff from a file.
1106 /// This function will fail if the path does not exist or the path does not
1107 /// contain a valid metric map.
1108 pub fn load(p: &Path) -> MetricMap {
1109 assert!(p.exists());
1110 let mut f = File::open(p).unwrap();
1111 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1112 let mut decoder = json::Decoder::new(value);
1113 MetricMap(match Decodable::decode(&mut decoder) {
1115 Err(e) => fail!("failure decoding JSON: {}", e)
1119 /// Write MetricDiff to a file.
1120 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1121 let mut file = try!(File::create(p));
1122 let MetricMap(ref map) = *self;
1124 // FIXME(pcwalton): Yuck.
1125 let mut new_map = TreeMap::new();
1126 for (ref key, ref value) in map.iter() {
1127 new_map.insert(key.to_string(), (*value).clone());
1130 new_map.to_json().to_pretty_writer(&mut file)
1133 /// Compare against another MetricMap. Optionally compare all
1134 /// measurements in the maps using the provided `noise_pct` as a
1135 /// percentage of each value to consider noise. If `None`, each
1136 /// measurement's noise threshold is independently chosen as the
1137 /// maximum of that measurement's recorded noise quantity in either
1139 pub fn compare_to_old(&self, old: &MetricMap,
1140 noise_pct: Option<f64>) -> MetricDiff {
1141 let mut diff : MetricDiff = TreeMap::new();
1142 let MetricMap(ref selfmap) = *self;
1143 let MetricMap(ref old) = *old;
1144 for (k, vold) in old.iter() {
1145 let r = match selfmap.find(k) {
1146 None => MetricRemoved,
1148 let delta = v.value - vold.value;
1149 let noise = match noise_pct {
1150 None => vold.noise.abs().max(v.noise.abs()),
1151 Some(pct) => vold.value * pct / 100.0
1153 if delta.abs() <= noise {
1156 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1157 if vold.noise < 0.0 {
1158 // When 'noise' is negative, it means we want
1159 // to see deltas that go up over time, and can
1160 // only tolerate slight negative movement.
1167 // When 'noise' is positive, it means we want
1168 // to see deltas that go down over time, and
1169 // can only tolerate slight positive movements.
1179 diff.insert((*k).clone(), r);
1181 let MetricMap(ref map) = *self;
1182 for (k, _) in map.iter() {
1183 if !diff.contains_key(k) {
1184 diff.insert((*k).clone(), MetricAdded);
1190 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1191 /// must be non-negative. The `noise` indicates the uncertainty of the
1192 /// metric, which doubles as the "noise range" of acceptable
1193 /// pairwise-regressions on this named value, when comparing from one
1194 /// metric to the next using `compare_to_old`.
1196 /// If `noise` is positive, then it means this metric is of a value
1197 /// you want to see grow smaller, so a change larger than `noise` in the
1198 /// positive direction represents a regression.
1200 /// If `noise` is negative, then it means this metric is of a value
1201 /// you want to see grow larger, so a change larger than `noise` in the
1202 /// negative direction represents a regression.
1203 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1208 let MetricMap(ref mut map) = *self;
1209 map.insert(name.to_string(), m);
1212 /// Attempt to "ratchet" an external metric file. This involves loading
1213 /// metrics from a metric file (if it exists), comparing against
1214 /// the metrics in `self` using `compare_to_old`, and rewriting the
1215 /// file to contain the metrics in `self` if none of the
1216 /// `MetricChange`s are `Regression`. Returns the diff as well
1217 /// as a boolean indicating whether the ratchet succeeded.
1218 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1219 let old = if p.exists() {
1225 let diff : MetricDiff = self.compare_to_old(&old, pct);
1226 let ok = diff.iter().all(|(_, v)| {
1228 Regression(_) => false,
1234 self.save(p).unwrap();
1243 /// A function that is opaque to the optimizer, to allow benchmarks to
1244 /// pretend to use outputs to assist in avoiding dead-code
1247 /// This function is a no-op, and does not even read from `dummy`.
1248 pub fn black_box<T>(dummy: T) {
1249 // we need to "use" the argument in some way LLVM can't
1251 unsafe {asm!("" : : "r"(&dummy))}
1256 /// Callback for benchmark functions to run in their body.
1257 pub fn iter<T>(&mut self, inner: || -> T) {
1258 self.ns_start = precise_time_ns();
1259 let k = self.iterations;
1260 for _ in range(0u64, k) {
1263 self.ns_end = precise_time_ns();
1266 pub fn ns_elapsed(&mut self) -> u64 {
1267 if self.ns_start == 0 || self.ns_end == 0 {
1270 self.ns_end - self.ns_start
1274 pub fn ns_per_iter(&mut self) -> u64 {
1275 if self.iterations == 0 {
1278 self.ns_elapsed() / cmp::max(self.iterations, 1)
1282 pub fn bench_n(&mut self, n: u64, f: |&mut Bencher|) {
1283 self.iterations = n;
1287 // This is a more statistics-driven benchmark algorithm
1288 pub fn auto_bench(&mut self, f: |&mut Bencher|) -> stats::Summary<f64> {
1290 // Initial bench run to get ballpark figure.
1292 self.bench_n(n, |x| f(x));
1294 // Try to estimate iter count for 1ms falling back to 1m
1295 // iterations if first run took < 1ns.
1296 if self.ns_per_iter() == 0 {
1299 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1301 // if the first run took more than 1ms we don't want to just
1302 // be left doing 0 iterations on every loop. The unfortunate
1303 // side effect of not being able to do as many runs is
1304 // automatically handled by the statistical analysis below
1305 // (i.e. larger error bars).
1306 if n == 0 { n = 1; }
1308 let mut total_run = 0;
1309 let samples : &mut [f64] = [0.0_f64, ..50];
1311 let loop_start = precise_time_ns();
1313 for p in samples.mut_iter() {
1314 self.bench_n(n, |x| f(x));
1315 *p = self.ns_per_iter() as f64;
1318 stats::winsorize(samples, 5.0);
1319 let summ = stats::Summary::new(samples);
1321 for p in samples.mut_iter() {
1322 self.bench_n(5 * n, |x| f(x));
1323 *p = self.ns_per_iter() as f64;
1326 stats::winsorize(samples, 5.0);
1327 let summ5 = stats::Summary::new(samples);
1329 let now = precise_time_ns();
1330 let loop_run = now - loop_start;
1332 // If we've run for 100ms and seem to have converged to a
1334 if loop_run > 100_000_000 &&
1335 summ.median_abs_dev_pct < 1.0 &&
1336 summ.median - summ5.median < summ5.median_abs_dev {
1340 total_run += loop_run;
1341 // Longest we ever run for is 3s.
1342 if total_run > 3_000_000_000 {
1353 use super::{Bencher, BenchSamples};
1355 pub fn benchmark(f: |&mut Bencher|) -> BenchSamples {
1356 let mut bs = Bencher {
1363 let ns_iter_summ = bs.auto_bench(f);
1365 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1366 let iter_s = 1_000_000_000 / ns_iter;
1367 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1370 ns_iter_summ: ns_iter_summ,
1378 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1379 TestDesc, TestDescAndFn, TestOpts, run_test,
1380 Metric, MetricMap, MetricAdded, MetricRemoved,
1381 Improvement, Regression, LikelyNoise,
1382 StaticTestName, DynTestName, DynTestFn};
1383 use std::io::TempDir;
1386 pub fn do_not_run_ignored_tests() {
1388 let desc = TestDescAndFn {
1390 name: StaticTestName("whatever"),
1394 testfn: DynTestFn(proc() f()),
1396 let (tx, rx) = channel();
1397 run_test(&TestOpts::new(), false, desc, tx);
1398 let (_, res, _) = rx.recv();
1399 assert!(res != TrOk);
1403 pub fn ignored_tests_result_in_ignored() {
1405 let desc = TestDescAndFn {
1407 name: StaticTestName("whatever"),
1411 testfn: DynTestFn(proc() f()),
1413 let (tx, rx) = channel();
1414 run_test(&TestOpts::new(), false, desc, tx);
1415 let (_, res, _) = rx.recv();
1416 assert!(res == TrIgnored);
1420 fn test_should_fail() {
1422 let desc = TestDescAndFn {
1424 name: StaticTestName("whatever"),
1428 testfn: DynTestFn(proc() f()),
1430 let (tx, rx) = channel();
1431 run_test(&TestOpts::new(), false, desc, tx);
1432 let (_, res, _) = rx.recv();
1433 assert!(res == TrOk);
1437 fn test_should_fail_but_succeeds() {
1439 let desc = TestDescAndFn {
1441 name: StaticTestName("whatever"),
1445 testfn: DynTestFn(proc() f()),
1447 let (tx, rx) = channel();
1448 run_test(&TestOpts::new(), false, desc, tx);
1449 let (_, res, _) = rx.recv();
1450 assert!(res == TrFailed);
1454 fn first_free_arg_should_be_a_filter() {
1455 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1456 let opts = match parse_opts(args.as_slice()) {
1458 _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
1460 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1464 fn parse_ignored_flag() {
1465 let args = vec!("progname".to_string(),
1466 "filter".to_string(),
1467 "--ignored".to_string());
1468 let opts = match parse_opts(args.as_slice()) {
1470 _ => fail!("Malformed arg in parse_ignored_flag")
1472 assert!((opts.run_ignored));
1476 pub fn filter_for_ignored_option() {
1477 // When we run ignored tests the test filter should filter out all the
1478 // unignored tests and flip the ignore flag on the rest to false
1480 let mut opts = TestOpts::new();
1481 opts.run_tests = true;
1482 opts.run_ignored = true;
1487 name: StaticTestName("1"),
1491 testfn: DynTestFn(proc() {}),
1495 name: StaticTestName("2"),
1499 testfn: DynTestFn(proc() {}),
1501 let filtered = filter_tests(&opts, tests);
1503 assert_eq!(filtered.len(), 1);
1504 assert_eq!(filtered.get(0).desc.name.to_str().to_string(),
1506 assert!(filtered.get(0).desc.ignore == false);
1510 pub fn sort_tests() {
1511 let mut opts = TestOpts::new();
1512 opts.run_tests = true;
1515 vec!("sha1::test".to_string(),
1516 "int::test_to_str".to_string(),
1517 "int::test_pow".to_string(),
1518 "test::do_not_run_ignored_tests".to_string(),
1519 "test::ignored_tests_result_in_ignored".to_string(),
1520 "test::first_free_arg_should_be_a_filter".to_string(),
1521 "test::parse_ignored_flag".to_string(),
1522 "test::filter_for_ignored_option".to_string(),
1523 "test::sort_tests".to_string());
1527 let mut tests = Vec::new();
1528 for name in names.iter() {
1529 let test = TestDescAndFn {
1531 name: DynTestName((*name).clone()),
1535 testfn: DynTestFn(testfn),
1541 let filtered = filter_tests(&opts, tests);
1544 vec!("int::test_pow".to_string(),
1545 "int::test_to_str".to_string(),
1546 "sha1::test".to_string(),
1547 "test::do_not_run_ignored_tests".to_string(),
1548 "test::filter_for_ignored_option".to_string(),
1549 "test::first_free_arg_should_be_a_filter".to_string(),
1550 "test::ignored_tests_result_in_ignored".to_string(),
1551 "test::parse_ignored_flag".to_string(),
1552 "test::sort_tests".to_string());
1554 for (a, b) in expected.iter().zip(filtered.iter()) {
1555 assert!(*a == b.desc.name.to_str().to_string());
1560 pub fn filter_tests_regex() {
1561 let mut opts = TestOpts::new();
1562 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1564 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1565 "no::XYZ", "no::abc"];
1569 let tests = names.iter().map(|name| {
1572 name: DynTestName(name.to_string()),
1576 testfn: DynTestFn(test_fn)
1579 let filtered = filter_tests(&opts, tests);
1581 let expected: Vec<&str> =
1582 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1584 assert_eq!(filtered.len(), expected.len());
1585 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1586 assert_eq!(test.desc.name.as_slice(), *expected_name);
1591 pub fn test_metricmap_compare() {
1592 let mut m1 = MetricMap::new();
1593 let mut m2 = MetricMap::new();
1594 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1595 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1597 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1598 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1600 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1601 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1603 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1604 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1606 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1607 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1609 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1610 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1612 let diff1 = m2.compare_to_old(&m1, None);
1614 assert_eq!(*(diff1.find(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1615 assert_eq!(*(diff1.find(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1616 assert_eq!(*(diff1.find(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1617 assert_eq!(*(diff1.find(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1619 assert_eq!(*(diff1.find(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1621 assert_eq!(*(diff1.find(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1623 assert_eq!(*(diff1.find(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1624 Improvement(100.0));
1625 assert_eq!(diff1.len(), 7);
1627 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1629 assert_eq!(*(diff2.find(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1630 assert_eq!(*(diff2.find(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1631 assert_eq!(*(diff2.find(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1632 assert_eq!(*(diff2.find(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1634 assert_eq!(*(diff2.find(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1636 assert_eq!(*(diff2.find(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1638 assert_eq!(*(diff2.find(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1640 assert_eq!(diff2.len(), 7);
1644 pub fn ratchet_test() {
1646 let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
1647 let pth = dpth.path().join("ratchet.json");
1649 let mut m1 = MetricMap::new();
1650 m1.insert_metric("runtime", 1000.0, 2.0);
1651 m1.insert_metric("throughput", 50.0, 2.0);
1653 let mut m2 = MetricMap::new();
1654 m2.insert_metric("runtime", 1100.0, 2.0);
1655 m2.insert_metric("throughput", 50.0, 2.0);
1657 m1.save(&pth).unwrap();
1659 // Ask for a ratchet that should fail to advance.
1660 let (diff1, ok1) = m2.ratchet(&pth, None);
1661 assert_eq!(ok1, false);
1662 assert_eq!(diff1.len(), 2);
1663 assert_eq!(*(diff1.find(&"runtime".to_string()).unwrap()), Regression(10.0));
1664 assert_eq!(*(diff1.find(&"throughput".to_string()).unwrap()), LikelyNoise);
1666 // Check that it was not rewritten.
1667 let m3 = MetricMap::load(&pth);
1668 let MetricMap(m3) = m3;
1669 assert_eq!(m3.len(), 2);
1670 assert_eq!(*(m3.find(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1671 assert_eq!(*(m3.find(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1673 // Ask for a ratchet with an explicit noise-percentage override,
1674 // that should advance.
1675 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1676 assert_eq!(ok2, true);
1677 assert_eq!(diff2.len(), 2);
1678 assert_eq!(*(diff2.find(&"runtime".to_string()).unwrap()), LikelyNoise);
1679 assert_eq!(*(diff2.find(&"throughput".to_string()).unwrap()), LikelyNoise);
1681 // Check that it was rewritten.
1682 let m4 = MetricMap::load(&pth);
1683 let MetricMap(m4) = m4;
1684 assert_eq!(m4.len(), 2);
1685 assert_eq!(*(m4.find(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1686 assert_eq!(*(m4.find(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));