1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_id = "test#0.11.0-pre"]
28 #![comment = "Rust internal test library only used by rustc"]
29 #![license = "MIT/ASL2"]
30 #![crate_type = "rlib"]
31 #![crate_type = "dylib"]
32 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
33 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
34 html_root_url = "http://doc.rust-lang.org/")]
36 #![feature(asm, macro_rules, phase)]
40 extern crate serialize;
44 use std::collections::TreeMap;
46 use time::precise_time_ns;
47 use getopts::{OptGroup, optflag, optopt};
49 use serialize::{json, Decodable};
50 use serialize::json::{Json, ToJson};
52 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
58 use std::from_str::FromStr;
59 use std::io::stdio::StdWriter;
60 use std::io::{File, ChanReader, ChanWriter};
64 use std::string::String;
65 use std::task::TaskBuilder;
67 // to be used by rustc to compile tests in libtest
69 pub use {Bencher, TestName, TestResult, TestDesc,
70 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
71 Metric, MetricMap, MetricAdded, MetricRemoved,
72 MetricChange, Improvement, Regression, LikelyNoise,
73 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
74 run_test, test_main, test_main_static, filter_tests,
75 parse_opts, StaticBenchFn};
80 // The name of a test. By convention this follows the rules for rust
81 // paths; i.e. it should be a series of identifiers separated by double
82 // colons. This way if some test runner wants to arrange the tests
83 // hierarchically it may.
85 #[deriving(Clone, PartialEq, Eq, Hash)]
87 StaticTestName(&'static str),
91 fn as_slice<'a>(&'a self) -> &'a str {
93 StaticTestName(s) => s,
94 DynTestName(ref s) => s.as_slice()
98 impl Show for TestName {
99 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
100 self.as_slice().fmt(f)
105 enum NamePadding { PadNone, PadOnLeft, PadOnRight }
108 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
109 use std::num::Saturating;
110 let mut name = String::from_str(self.name.as_slice());
111 let fill = column_count.saturating_sub(name.len());
112 let mut pad = " ".repeat(fill);
116 pad.push_str(name.as_slice());
120 name.push_str(pad.as_slice());
127 /// Represents a benchmark function.
128 pub trait TDynBenchFn {
129 fn run(&self, harness: &mut Bencher);
132 // A function that runs a test. If the function returns successfully,
133 // the test succeeds; if the function fails then the test fails. We
134 // may need to come up with a more clever definition of test in order
135 // to support isolation of tests into tasks.
138 StaticBenchFn(fn(&mut Bencher)),
139 StaticMetricFn(proc(&mut MetricMap)),
140 DynTestFn(proc():Send),
141 DynMetricFn(proc(&mut MetricMap)),
142 DynBenchFn(Box<TDynBenchFn>)
146 fn padding(&self) -> NamePadding {
148 &StaticTestFn(..) => PadNone,
149 &StaticBenchFn(..) => PadOnRight,
150 &StaticMetricFn(..) => PadOnRight,
151 &DynTestFn(..) => PadNone,
152 &DynMetricFn(..) => PadOnRight,
153 &DynBenchFn(..) => PadOnRight,
158 impl fmt::Show for TestFn {
159 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
160 f.write(match *self {
161 StaticTestFn(..) => "StaticTestFn(..)",
162 StaticBenchFn(..) => "StaticBenchFn(..)",
163 StaticMetricFn(..) => "StaticMetricFn(..)",
164 DynTestFn(..) => "DynTestFn(..)",
165 DynMetricFn(..) => "DynMetricFn(..)",
166 DynBenchFn(..) => "DynBenchFn(..)"
171 /// Manager of the benchmarking runs.
173 /// This is feed into functions marked with `#[bench]` to allow for
174 /// set-up & tear-down before running a piece of code repeatedly via a
183 // The definition of a single test. A test runner will run a list of
185 #[deriving(Clone, Show, PartialEq, Eq, Hash)]
186 pub struct TestDesc {
189 pub should_fail: bool,
193 pub struct TestDescAndFn {
198 #[deriving(Clone, Encodable, Decodable, PartialEq, Show)]
205 pub fn new(value: f64, noise: f64) -> Metric {
206 Metric {value: value, noise: noise}
210 #[deriving(PartialEq)]
211 pub struct MetricMap(TreeMap<String,Metric>);
213 impl Clone for MetricMap {
214 fn clone(&self) -> MetricMap {
215 let MetricMap(ref map) = *self;
216 MetricMap(map.clone())
220 /// Analysis of a single change in metric
221 #[deriving(PartialEq, Show)]
222 pub enum MetricChange {
230 pub type MetricDiff = TreeMap<String,MetricChange>;
232 // The default console test runner. It accepts the command line
233 // arguments and a vector of test_descs.
234 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
236 match parse_opts(args) {
238 Some(Err(msg)) => fail!("{}", msg),
241 match run_tests_console(&opts, tests) {
243 Ok(false) => fail!("Some tests failed"),
244 Err(e) => fail!("io error when running tests: {}", e),
248 // A variant optimized for invocation with a static test vector.
249 // This will fail (intentionally) when fed any dynamic tests, because
250 // it is copying the static values out into a dynamic vector and cannot
251 // copy dynamic values. It is doing this because from this point on
252 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
253 // semantics into parallel test runners, which in turn requires a ~[]
254 // rather than a &[].
255 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
256 let owned_tests = tests.iter().map(|t| {
258 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
259 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
260 _ => fail!("non-static tests passed to test::test_main_static")
263 test_main(args, owned_tests)
266 pub enum ColorConfig {
272 pub struct TestOpts {
273 pub filter: Option<Regex>,
274 pub run_ignored: bool,
276 pub run_benchmarks: bool,
277 pub ratchet_metrics: Option<Path>,
278 pub ratchet_noise_percent: Option<f64>,
279 pub save_metrics: Option<Path>,
280 pub test_shard: Option<(uint,uint)>,
281 pub logfile: Option<Path>,
283 pub color: ColorConfig,
288 fn new() -> TestOpts {
293 run_benchmarks: false,
294 ratchet_metrics: None,
295 ratchet_noise_percent: None,
305 /// Result of parsing the options.
306 pub type OptRes = Result<TestOpts, String>;
308 fn optgroups() -> Vec<getopts::OptGroup> {
309 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
310 getopts::optflag("", "test", "Run tests and not benchmarks"),
311 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
312 getopts::optflag("h", "help", "Display this message (longer with --help)"),
313 getopts::optopt("", "save-metrics", "Location to save bench metrics",
315 getopts::optopt("", "ratchet-metrics",
316 "Location to load and save metrics from. The metrics \
317 loaded are cause benchmarks to fail if they run too \
319 getopts::optopt("", "ratchet-noise-percent",
320 "Tests within N% of the recorded metrics will be \
321 considered as passing", "PERCENTAGE"),
322 getopts::optopt("", "logfile", "Write logs to the specified file instead \
324 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
326 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
327 task, allow printing directly"),
328 getopts::optopt("", "color", "Configure coloring of output:
329 auto = colorize if stdout is a tty and tests are run on serially (default);
330 always = always colorize output;
331 never = never colorize output;", "auto|always|never"))
334 fn usage(binary: &str) {
335 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
338 The FILTER regex is tested against the name of all tests to run, and
339 only those tests that match are run.
341 By default, all tests are run in parallel. This can be altered with the
342 RUST_TEST_TASKS environment variable when running tests (set it to 1).
344 All tests have their standard output and standard error captured by default.
345 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
346 environment variable. Logging is not captured by default.
350 #[test] - Indicates a function is a test to be run. This function
352 #[bench] - Indicates a function is a benchmark to be run. This
353 function takes one argument (test::Bencher).
354 #[should_fail] - This function (also labeled with #[test]) will only pass if
355 the code causes a failure (an assertion failure or fail!)
356 #[ignore] - When applied to a function which is already attributed as a
357 test, then the test runner will ignore these tests during
358 normal test runs. Running with --ignored will run these
359 tests. This may also be written as #[ignore(cfg(...))] to
360 ignore the test on certain configurations.",
361 usage = getopts::usage(message.as_slice(),
362 optgroups().as_slice()));
365 // Parses command line arguments into test options
366 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
367 let args_ = args.tail();
369 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
371 Err(f) => return Some(Err(f.to_str()))
374 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
376 let filter = if matches.free.len() > 0 {
377 let s = matches.free.get(0).as_slice();
378 match Regex::new(s) {
380 Err(e) => return Some(Err(format!("could not parse /{}/: {}", s, e)))
386 let run_ignored = matches.opt_present("ignored");
388 let logfile = matches.opt_str("logfile");
389 let logfile = logfile.map(|s| Path::new(s));
391 let run_benchmarks = matches.opt_present("bench");
392 let run_tests = ! run_benchmarks ||
393 matches.opt_present("test");
395 let ratchet_metrics = matches.opt_str("ratchet-metrics");
396 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
398 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
399 let ratchet_noise_percent =
400 ratchet_noise_percent.map(|s| from_str::<f64>(s.as_slice()).unwrap());
402 let save_metrics = matches.opt_str("save-metrics");
403 let save_metrics = save_metrics.map(|s| Path::new(s));
405 let test_shard = matches.opt_str("test-shard");
406 let test_shard = opt_shard(test_shard);
408 let mut nocapture = matches.opt_present("nocapture");
410 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
413 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
414 Some("auto") | None => AutoColor,
415 Some("always") => AlwaysColor,
416 Some("never") => NeverColor,
418 Some(v) => return Some(Err(format!("argument for --color must be \
419 auto, always, or never (was {})",
423 let test_opts = TestOpts {
425 run_ignored: run_ignored,
426 run_tests: run_tests,
427 run_benchmarks: run_benchmarks,
428 ratchet_metrics: ratchet_metrics,
429 ratchet_noise_percent: ratchet_noise_percent,
430 save_metrics: save_metrics,
431 test_shard: test_shard,
433 nocapture: nocapture,
440 pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> {
444 let mut it = s.as_slice().split('.');
445 match (it.next().and_then(from_str::<uint>), it.next().and_then(from_str::<uint>),
447 (Some(a), Some(b), None) => {
449 fail!("tried to run shard {a}.{b}, but {a} is out of bounds \
450 (should be between 1 and {b}", a=a, b=b)
461 #[deriving(Clone, PartialEq)]
462 pub struct BenchSamples {
463 ns_iter_summ: stats::Summary<f64>,
467 #[deriving(Clone, PartialEq)]
468 pub enum TestResult {
472 TrMetrics(MetricMap),
473 TrBench(BenchSamples),
476 enum OutputLocation<T> {
477 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
481 struct ConsoleTestState<T> {
482 log_out: Option<File>,
483 out: OutputLocation<T>,
491 failures: Vec<(TestDesc, Vec<u8> )> ,
492 max_name_len: uint, // number of columns to fill when aligning names
495 impl<T: Writer> ConsoleTestState<T> {
496 pub fn new(opts: &TestOpts,
497 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
498 let log_out = match opts.logfile {
499 Some(ref path) => Some(try!(File::create(path))),
502 let out = match term::stdout() {
503 None => Raw(io::stdio::stdout_raw()),
507 Ok(ConsoleTestState {
510 use_color: use_color(opts),
516 metrics: MetricMap::new(),
517 failures: Vec::new(),
522 pub fn write_ok(&mut self) -> io::IoResult<()> {
523 self.write_pretty("ok", term::color::GREEN)
526 pub fn write_failed(&mut self) -> io::IoResult<()> {
527 self.write_pretty("FAILED", term::color::RED)
530 pub fn write_ignored(&mut self) -> io::IoResult<()> {
531 self.write_pretty("ignored", term::color::YELLOW)
534 pub fn write_metric(&mut self) -> io::IoResult<()> {
535 self.write_pretty("metric", term::color::CYAN)
538 pub fn write_bench(&mut self) -> io::IoResult<()> {
539 self.write_pretty("bench", term::color::CYAN)
542 pub fn write_added(&mut self) -> io::IoResult<()> {
543 self.write_pretty("added", term::color::GREEN)
546 pub fn write_improved(&mut self) -> io::IoResult<()> {
547 self.write_pretty("improved", term::color::GREEN)
550 pub fn write_removed(&mut self) -> io::IoResult<()> {
551 self.write_pretty("removed", term::color::YELLOW)
554 pub fn write_regressed(&mut self) -> io::IoResult<()> {
555 self.write_pretty("regressed", term::color::RED)
558 pub fn write_pretty(&mut self,
560 color: term::color::Color) -> io::IoResult<()> {
562 Pretty(ref mut term) => {
564 try!(term.fg(color));
566 try!(term.write(word.as_bytes()));
572 Raw(ref mut stdout) => stdout.write(word.as_bytes())
576 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
578 Pretty(ref mut term) => term.write(s.as_bytes()),
579 Raw(ref mut stdout) => stdout.write(s.as_bytes())
583 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
585 let noun = if len != 1 { "tests" } else { "test" };
586 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
589 pub fn write_test_start(&mut self, test: &TestDesc,
590 align: NamePadding) -> io::IoResult<()> {
591 let name = test.padded_name(self.max_name_len, align);
592 self.write_plain(format!("test {} ... ", name).as_slice())
595 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
597 TrOk => self.write_ok(),
598 TrFailed => self.write_failed(),
599 TrIgnored => self.write_ignored(),
600 TrMetrics(ref mm) => {
601 try!(self.write_metric());
602 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
605 try!(self.write_bench());
606 self.write_plain(format!(": {}",
607 fmt_bench_samples(bs)).as_slice())
610 self.write_plain("\n")
613 pub fn write_log(&mut self, test: &TestDesc,
614 result: &TestResult) -> io::IoResult<()> {
618 let s = format!("{} {}\n", match *result {
619 TrOk => "ok".to_string(),
620 TrFailed => "failed".to_string(),
621 TrIgnored => "ignored".to_string(),
622 TrMetrics(ref mm) => fmt_metrics(mm),
623 TrBench(ref bs) => fmt_bench_samples(bs)
624 }, test.name.as_slice());
625 o.write(s.as_bytes())
630 pub fn write_failures(&mut self) -> io::IoResult<()> {
631 try!(self.write_plain("\nfailures:\n"));
632 let mut failures = Vec::new();
633 let mut fail_out = String::new();
634 for &(ref f, ref stdout) in self.failures.iter() {
635 failures.push(f.name.to_str());
636 if stdout.len() > 0 {
637 fail_out.push_str(format!("---- {} stdout ----\n\t",
638 f.name.as_slice()).as_slice());
639 let output = str::from_utf8_lossy(stdout.as_slice());
640 fail_out.push_str(output.as_slice()
641 .replace("\n", "\n\t")
643 fail_out.push_str("\n");
646 if fail_out.len() > 0 {
647 try!(self.write_plain("\n"));
648 try!(self.write_plain(fail_out.as_slice()));
651 try!(self.write_plain("\nfailures:\n"));
652 failures.as_mut_slice().sort();
653 for name in failures.iter() {
654 try!(self.write_plain(format!(" {}\n",
655 name.as_slice()).as_slice()));
660 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
662 let mut improved = 0u;
663 let mut regressed = 0u;
665 let mut removed = 0u;
667 for (k, v) in diff.iter() {
669 LikelyNoise => noise += 1,
672 try!(self.write_added());
673 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
677 try!(self.write_removed());
678 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
680 Improvement(pct) => {
682 try!(self.write_plain(format!(": {}", *k).as_slice()));
683 try!(self.write_improved());
684 try!(self.write_plain(format!(" by {:.2f}%\n",
685 pct as f64).as_slice()));
689 try!(self.write_plain(format!(": {}", *k).as_slice()));
690 try!(self.write_regressed());
691 try!(self.write_plain(format!(" by {:.2f}%\n",
692 pct as f64).as_slice()));
696 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
697 {} removed, {} improved, {} regressed, \
699 added, removed, improved, regressed,
702 try!(self.write_plain("updated ratchet file\n"));
704 try!(self.write_plain("left ratchet file untouched\n"));
709 pub fn write_run_finish(&mut self,
710 ratchet_metrics: &Option<Path>,
711 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
712 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
714 let ratchet_success = match *ratchet_metrics {
717 try!(self.write_plain(format!("\nusing metrics ratchet: {}\n",
718 pth.display()).as_slice()));
722 try!(self.write_plain(format!("with noise-tolerance \
726 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
727 try!(self.write_metric_diff(&diff));
732 let test_success = self.failed == 0u;
734 try!(self.write_failures());
737 let success = ratchet_success && test_success;
739 try!(self.write_plain("\ntest result: "));
741 // There's no parallelism at this point so it's safe to use color
742 try!(self.write_ok());
744 try!(self.write_failed());
746 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
747 self.passed, self.failed, self.ignored, self.measured);
748 try!(self.write_plain(s.as_slice()));
753 pub fn fmt_metrics(mm: &MetricMap) -> String {
754 let MetricMap(ref mm) = *mm;
755 let v : Vec<String> = mm.iter()
756 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
757 v.value as f64, v.noise as f64))
762 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
764 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
765 bs.ns_iter_summ.median as uint,
766 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
769 format!("{:>9} ns/iter (+/- {})",
770 bs.ns_iter_summ.median as uint,
771 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
775 // A simple console test runner
776 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
778 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
779 match (*event).clone() {
780 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
781 TeWait(ref test, padding) => st.write_test_start(test, padding),
782 TeResult(test, result, stdout) => {
783 try!(st.write_log(&test, &result));
784 try!(st.write_result(&result));
786 TrOk => st.passed += 1,
787 TrIgnored => st.ignored += 1,
789 let tname = test.name.as_slice();
790 let MetricMap(mm) = mm;
791 for (k,v) in mm.iter() {
793 .insert_metric(format!("{}.{}",
802 st.metrics.insert_metric(test.name.as_slice(),
803 bs.ns_iter_summ.median,
804 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
809 st.failures.push((test, stdout));
817 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
818 fn len_if_padded(t: &TestDescAndFn) -> uint {
819 match t.testfn.padding() {
821 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
824 match tests.iter().max_by(|t|len_if_padded(*t)) {
826 let n = t.desc.name.as_slice();
827 st.max_name_len = n.len();
831 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
832 match opts.save_metrics {
835 try!(st.metrics.save(pth));
836 try!(st.write_plain(format!("\nmetrics saved to: {}",
837 pth.display()).as_slice()));
840 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
844 fn should_sort_failures_before_printing_them() {
845 use std::io::MemWriter;
848 let test_a = TestDesc {
849 name: StaticTestName("a"),
854 let test_b = TestDesc {
855 name: StaticTestName("b"),
860 let mut st = ConsoleTestState {
862 out: Raw(MemWriter::new()),
870 metrics: MetricMap::new(),
871 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
874 st.write_failures().unwrap();
875 let s = match st.out {
876 Raw(ref m) => str::from_utf8_lossy(m.get_ref()),
877 Pretty(_) => unreachable!()
880 let apos = s.as_slice().find_str("a").unwrap();
881 let bpos = s.as_slice().find_str("b").unwrap();
882 assert!(apos < bpos);
885 fn use_color(opts: &TestOpts) -> bool {
887 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
895 TeFiltered(Vec<TestDesc> ),
896 TeWait(TestDesc, NamePadding),
897 TeResult(TestDesc, TestResult, Vec<u8> ),
900 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
902 fn run_tests(opts: &TestOpts,
903 tests: Vec<TestDescAndFn> ,
904 callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
905 let filtered_tests = filter_tests(opts, tests);
906 let filtered_descs = filtered_tests.iter()
907 .map(|t| t.desc.clone())
910 try!(callback(TeFiltered(filtered_descs)));
912 let (filtered_tests, filtered_benchs_and_metrics) =
913 filtered_tests.partition(|e| {
915 StaticTestFn(_) | DynTestFn(_) => true,
920 // It's tempting to just spawn all the tests at once, but since we have
921 // many tests that run in other processes we would be making a big mess.
922 let concurrency = get_concurrency();
924 let mut remaining = filtered_tests;
928 let (tx, rx) = channel::<MonitorMsg>();
930 while pending > 0 || !remaining.is_empty() {
931 while pending < concurrency && !remaining.is_empty() {
932 let test = remaining.pop().unwrap();
933 if concurrency == 1 {
934 // We are doing one test at a time so we can print the name
935 // of the test before we run it. Useful for debugging tests
936 // that hang forever.
937 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
939 run_test(opts, !opts.run_tests, test, tx.clone());
943 let (desc, result, stdout) = rx.recv();
944 if concurrency != 1 {
945 try!(callback(TeWait(desc.clone(), PadNone)));
947 try!(callback(TeResult(desc, result, stdout)));
951 // All benchmarks run at the end, in serial.
952 // (this includes metric fns)
953 for b in filtered_benchs_and_metrics.move_iter() {
954 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
955 run_test(opts, !opts.run_benchmarks, b, tx.clone());
956 let (test, result, stdout) = rx.recv();
957 try!(callback(TeResult(test, result, stdout)));
962 fn get_concurrency() -> uint {
964 match os::getenv("RUST_TEST_TASKS") {
966 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
968 Some(n) if n > 0 => n,
969 _ => fail!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
973 rt::default_sched_threads()
978 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
979 let mut filtered = tests;
981 // Remove tests that don't match the test filter
982 filtered = match opts.filter {
986 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
990 // Maybe pull out the ignored test and unignore them
991 filtered = if !opts.run_ignored {
994 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
995 if test.desc.ignore {
996 let TestDescAndFn {desc, testfn} = test;
998 desc: TestDesc {ignore: false, ..desc},
1005 filtered.move_iter().filter_map(|x| filter(x)).collect()
1008 // Sort the tests alphabetically
1009 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(&t2.desc.name.as_slice()));
1011 // Shard the remaining tests, if sharding requested.
1012 match opts.test_shard {
1015 filtered.move_iter().enumerate()
1016 // note: using a - 1 so that the valid shards, for example, are
1017 // 1.2 and 2.2 instead of 0.2 and 1.2
1018 .filter(|&(i,_)| i % b == (a - 1))
1025 pub fn run_test(opts: &TestOpts,
1027 test: TestDescAndFn,
1028 monitor_ch: Sender<MonitorMsg>) {
1030 let TestDescAndFn {desc, testfn} = test;
1032 if force_ignore || desc.ignore {
1033 monitor_ch.send((desc, TrIgnored, Vec::new()));
1037 fn run_test_inner(desc: TestDesc,
1038 monitor_ch: Sender<MonitorMsg>,
1040 testfn: proc():Send) {
1042 let (tx, rx) = channel();
1043 let mut reader = ChanReader::new(rx);
1044 let stdout = ChanWriter::new(tx.clone());
1045 let stderr = ChanWriter::new(tx);
1046 let mut task = TaskBuilder::new().named(match desc.name {
1047 DynTestName(ref name) => name.clone().to_string(),
1048 StaticTestName(name) => name.to_string(),
1051 drop((stdout, stderr));
1053 task = task.stdout(box stdout as Box<Writer + Send>);
1054 task = task.stderr(box stderr as Box<Writer + Send>);
1056 let result_future = task.try_future(testfn);
1058 let stdout = reader.read_to_end().unwrap().move_iter().collect();
1059 let task_result = result_future.unwrap();
1060 let test_result = calc_result(&desc, task_result.is_ok());
1061 monitor_ch.send((desc.clone(), test_result, stdout));
1066 DynBenchFn(bencher) => {
1067 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1068 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1071 StaticBenchFn(benchfn) => {
1072 let bs = ::bench::benchmark(|harness| benchfn(harness));
1073 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1077 let mut mm = MetricMap::new();
1079 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1082 StaticMetricFn(f) => {
1083 let mut mm = MetricMap::new();
1085 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1088 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1089 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1094 fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
1096 if desc.should_fail { TrFailed }
1099 if desc.should_fail { TrOk }
1105 impl ToJson for Metric {
1106 fn to_json(&self) -> json::Json {
1107 let mut map = box TreeMap::new();
1108 map.insert("value".to_string(), json::Number(self.value));
1109 map.insert("noise".to_string(), json::Number(self.noise));
1117 pub fn new() -> MetricMap {
1118 MetricMap(TreeMap::new())
1121 /// Load MetricDiff from a file.
1125 /// This function will fail if the path does not exist or the path does not
1126 /// contain a valid metric map.
1127 pub fn load(p: &Path) -> MetricMap {
1128 assert!(p.exists());
1129 let mut f = File::open(p).unwrap();
1130 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1131 let mut decoder = json::Decoder::new(value);
1132 MetricMap(match Decodable::decode(&mut decoder) {
1134 Err(e) => fail!("failure decoding JSON: {}", e)
1138 /// Write MetricDiff to a file.
1139 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1140 let mut file = try!(File::create(p));
1141 let MetricMap(ref map) = *self;
1143 // FIXME(pcwalton): Yuck.
1144 let mut new_map = TreeMap::new();
1145 for (ref key, ref value) in map.iter() {
1146 new_map.insert(key.to_string(), (*value).clone());
1149 new_map.to_json().to_pretty_writer(&mut file)
1152 /// Compare against another MetricMap. Optionally compare all
1153 /// measurements in the maps using the provided `noise_pct` as a
1154 /// percentage of each value to consider noise. If `None`, each
1155 /// measurement's noise threshold is independently chosen as the
1156 /// maximum of that measurement's recorded noise quantity in either
1158 pub fn compare_to_old(&self, old: &MetricMap,
1159 noise_pct: Option<f64>) -> MetricDiff {
1160 let mut diff : MetricDiff = TreeMap::new();
1161 let MetricMap(ref selfmap) = *self;
1162 let MetricMap(ref old) = *old;
1163 for (k, vold) in old.iter() {
1164 let r = match selfmap.find(k) {
1165 None => MetricRemoved,
1167 let delta = v.value - vold.value;
1168 let noise = match noise_pct {
1169 None => vold.noise.abs().max(v.noise.abs()),
1170 Some(pct) => vold.value * pct / 100.0
1172 if delta.abs() <= noise {
1175 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1176 if vold.noise < 0.0 {
1177 // When 'noise' is negative, it means we want
1178 // to see deltas that go up over time, and can
1179 // only tolerate slight negative movement.
1186 // When 'noise' is positive, it means we want
1187 // to see deltas that go down over time, and
1188 // can only tolerate slight positive movements.
1198 diff.insert((*k).clone(), r);
1200 let MetricMap(ref map) = *self;
1201 for (k, _) in map.iter() {
1202 if !diff.contains_key(k) {
1203 diff.insert((*k).clone(), MetricAdded);
1209 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1210 /// must be non-negative. The `noise` indicates the uncertainty of the
1211 /// metric, which doubles as the "noise range" of acceptable
1212 /// pairwise-regressions on this named value, when comparing from one
1213 /// metric to the next using `compare_to_old`.
1215 /// If `noise` is positive, then it means this metric is of a value
1216 /// you want to see grow smaller, so a change larger than `noise` in the
1217 /// positive direction represents a regression.
1219 /// If `noise` is negative, then it means this metric is of a value
1220 /// you want to see grow larger, so a change larger than `noise` in the
1221 /// negative direction represents a regression.
1222 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1227 let MetricMap(ref mut map) = *self;
1228 map.insert(name.to_string(), m);
1231 /// Attempt to "ratchet" an external metric file. This involves loading
1232 /// metrics from a metric file (if it exists), comparing against
1233 /// the metrics in `self` using `compare_to_old`, and rewriting the
1234 /// file to contain the metrics in `self` if none of the
1235 /// `MetricChange`s are `Regression`. Returns the diff as well
1236 /// as a boolean indicating whether the ratchet succeeded.
1237 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1238 let old = if p.exists() {
1244 let diff : MetricDiff = self.compare_to_old(&old, pct);
1245 let ok = diff.iter().all(|(_, v)| {
1247 Regression(_) => false,
1253 self.save(p).unwrap();
1262 /// A function that is opaque to the optimizer, to allow benchmarks to
1263 /// pretend to use outputs to assist in avoiding dead-code
1266 /// This function is a no-op, and does not even read from `dummy`.
1267 pub fn black_box<T>(dummy: T) {
1268 // we need to "use" the argument in some way LLVM can't
1270 unsafe {asm!("" : : "r"(&dummy))}
1275 /// Callback for benchmark functions to run in their body.
1276 pub fn iter<T>(&mut self, inner: || -> T) {
1277 self.ns_start = precise_time_ns();
1278 let k = self.iterations;
1279 for _ in range(0u64, k) {
1282 self.ns_end = precise_time_ns();
1285 pub fn ns_elapsed(&mut self) -> u64 {
1286 if self.ns_start == 0 || self.ns_end == 0 {
1289 self.ns_end - self.ns_start
1293 pub fn ns_per_iter(&mut self) -> u64 {
1294 if self.iterations == 0 {
1297 self.ns_elapsed() / cmp::max(self.iterations, 1)
1301 pub fn bench_n(&mut self, n: u64, f: |&mut Bencher|) {
1302 self.iterations = n;
1306 // This is a more statistics-driven benchmark algorithm
1307 pub fn auto_bench(&mut self, f: |&mut Bencher|) -> stats::Summary<f64> {
1309 // Initial bench run to get ballpark figure.
1311 self.bench_n(n, |x| f(x));
1313 // Try to estimate iter count for 1ms falling back to 1m
1314 // iterations if first run took < 1ns.
1315 if self.ns_per_iter() == 0 {
1318 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1320 // if the first run took more than 1ms we don't want to just
1321 // be left doing 0 iterations on every loop. The unfortunate
1322 // side effect of not being able to do as many runs is
1323 // automatically handled by the statistical analysis below
1324 // (i.e. larger error bars).
1325 if n == 0 { n = 1; }
1327 let mut total_run = 0;
1328 let samples : &mut [f64] = [0.0_f64, ..50];
1330 let loop_start = precise_time_ns();
1332 for p in samples.mut_iter() {
1333 self.bench_n(n, |x| f(x));
1334 *p = self.ns_per_iter() as f64;
1337 stats::winsorize(samples, 5.0);
1338 let summ = stats::Summary::new(samples);
1340 for p in samples.mut_iter() {
1341 self.bench_n(5 * n, |x| f(x));
1342 *p = self.ns_per_iter() as f64;
1345 stats::winsorize(samples, 5.0);
1346 let summ5 = stats::Summary::new(samples);
1348 let now = precise_time_ns();
1349 let loop_run = now - loop_start;
1351 // If we've run for 100ms and seem to have converged to a
1353 if loop_run > 100_000_000 &&
1354 summ.median_abs_dev_pct < 1.0 &&
1355 summ.median - summ5.median < summ5.median_abs_dev {
1359 total_run += loop_run;
1360 // Longest we ever run for is 3s.
1361 if total_run > 3_000_000_000 {
1372 use super::{Bencher, BenchSamples};
1374 pub fn benchmark(f: |&mut Bencher|) -> BenchSamples {
1375 let mut bs = Bencher {
1382 let ns_iter_summ = bs.auto_bench(f);
1384 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1385 let iter_s = 1_000_000_000 / ns_iter;
1386 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1389 ns_iter_summ: ns_iter_summ,
1397 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1398 TestDesc, TestDescAndFn, TestOpts, run_test,
1399 Metric, MetricMap, MetricAdded, MetricRemoved,
1400 Improvement, Regression, LikelyNoise,
1401 StaticTestName, DynTestName, DynTestFn};
1402 use std::io::TempDir;
1405 pub fn do_not_run_ignored_tests() {
1407 let desc = TestDescAndFn {
1409 name: StaticTestName("whatever"),
1413 testfn: DynTestFn(proc() f()),
1415 let (tx, rx) = channel();
1416 run_test(&TestOpts::new(), false, desc, tx);
1417 let (_, res, _) = rx.recv();
1418 assert!(res != TrOk);
1422 pub fn ignored_tests_result_in_ignored() {
1424 let desc = TestDescAndFn {
1426 name: StaticTestName("whatever"),
1430 testfn: DynTestFn(proc() f()),
1432 let (tx, rx) = channel();
1433 run_test(&TestOpts::new(), false, desc, tx);
1434 let (_, res, _) = rx.recv();
1435 assert!(res == TrIgnored);
1439 fn test_should_fail() {
1441 let desc = TestDescAndFn {
1443 name: StaticTestName("whatever"),
1447 testfn: DynTestFn(proc() f()),
1449 let (tx, rx) = channel();
1450 run_test(&TestOpts::new(), false, desc, tx);
1451 let (_, res, _) = rx.recv();
1452 assert!(res == TrOk);
1456 fn test_should_fail_but_succeeds() {
1458 let desc = TestDescAndFn {
1460 name: StaticTestName("whatever"),
1464 testfn: DynTestFn(proc() f()),
1466 let (tx, rx) = channel();
1467 run_test(&TestOpts::new(), false, desc, tx);
1468 let (_, res, _) = rx.recv();
1469 assert!(res == TrFailed);
1473 fn first_free_arg_should_be_a_filter() {
1474 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1475 let opts = match parse_opts(args.as_slice()) {
1477 _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
1479 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1483 fn parse_ignored_flag() {
1484 let args = vec!("progname".to_string(),
1485 "filter".to_string(),
1486 "--ignored".to_string());
1487 let opts = match parse_opts(args.as_slice()) {
1489 _ => fail!("Malformed arg in parse_ignored_flag")
1491 assert!((opts.run_ignored));
1495 pub fn filter_for_ignored_option() {
1496 // When we run ignored tests the test filter should filter out all the
1497 // unignored tests and flip the ignore flag on the rest to false
1499 let mut opts = TestOpts::new();
1500 opts.run_tests = true;
1501 opts.run_ignored = true;
1506 name: StaticTestName("1"),
1510 testfn: DynTestFn(proc() {}),
1514 name: StaticTestName("2"),
1518 testfn: DynTestFn(proc() {}),
1520 let filtered = filter_tests(&opts, tests);
1522 assert_eq!(filtered.len(), 1);
1523 assert_eq!(filtered.get(0).desc.name.to_str(),
1525 assert!(filtered.get(0).desc.ignore == false);
1529 pub fn sort_tests() {
1530 let mut opts = TestOpts::new();
1531 opts.run_tests = true;
1534 vec!("sha1::test".to_string(),
1535 "int::test_to_str".to_string(),
1536 "int::test_pow".to_string(),
1537 "test::do_not_run_ignored_tests".to_string(),
1538 "test::ignored_tests_result_in_ignored".to_string(),
1539 "test::first_free_arg_should_be_a_filter".to_string(),
1540 "test::parse_ignored_flag".to_string(),
1541 "test::filter_for_ignored_option".to_string(),
1542 "test::sort_tests".to_string());
1546 let mut tests = Vec::new();
1547 for name in names.iter() {
1548 let test = TestDescAndFn {
1550 name: DynTestName((*name).clone()),
1554 testfn: DynTestFn(testfn),
1560 let filtered = filter_tests(&opts, tests);
1563 vec!("int::test_pow".to_string(),
1564 "int::test_to_str".to_string(),
1565 "sha1::test".to_string(),
1566 "test::do_not_run_ignored_tests".to_string(),
1567 "test::filter_for_ignored_option".to_string(),
1568 "test::first_free_arg_should_be_a_filter".to_string(),
1569 "test::ignored_tests_result_in_ignored".to_string(),
1570 "test::parse_ignored_flag".to_string(),
1571 "test::sort_tests".to_string());
1573 for (a, b) in expected.iter().zip(filtered.iter()) {
1574 assert!(*a == b.desc.name.to_str());
1579 pub fn filter_tests_regex() {
1580 let mut opts = TestOpts::new();
1581 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1583 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1584 "no::XYZ", "no::abc"];
1588 let tests = names.iter().map(|name| {
1591 name: DynTestName(name.to_string()),
1595 testfn: DynTestFn(test_fn)
1598 let filtered = filter_tests(&opts, tests);
1600 let expected: Vec<&str> =
1601 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1603 assert_eq!(filtered.len(), expected.len());
1604 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1605 assert_eq!(test.desc.name.as_slice(), *expected_name);
1610 pub fn test_metricmap_compare() {
1611 let mut m1 = MetricMap::new();
1612 let mut m2 = MetricMap::new();
1613 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1614 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1616 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1617 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1619 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1620 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1622 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1623 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1625 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1626 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1628 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1629 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1631 let diff1 = m2.compare_to_old(&m1, None);
1633 assert_eq!(*(diff1.find(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1634 assert_eq!(*(diff1.find(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1635 assert_eq!(*(diff1.find(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1636 assert_eq!(*(diff1.find(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1638 assert_eq!(*(diff1.find(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1640 assert_eq!(*(diff1.find(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1642 assert_eq!(*(diff1.find(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1643 Improvement(100.0));
1644 assert_eq!(diff1.len(), 7);
1646 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1648 assert_eq!(*(diff2.find(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1649 assert_eq!(*(diff2.find(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1650 assert_eq!(*(diff2.find(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1651 assert_eq!(*(diff2.find(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1653 assert_eq!(*(diff2.find(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1655 assert_eq!(*(diff2.find(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1657 assert_eq!(*(diff2.find(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1659 assert_eq!(diff2.len(), 7);
1663 pub fn ratchet_test() {
1665 let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
1666 let pth = dpth.path().join("ratchet.json");
1668 let mut m1 = MetricMap::new();
1669 m1.insert_metric("runtime", 1000.0, 2.0);
1670 m1.insert_metric("throughput", 50.0, 2.0);
1672 let mut m2 = MetricMap::new();
1673 m2.insert_metric("runtime", 1100.0, 2.0);
1674 m2.insert_metric("throughput", 50.0, 2.0);
1676 m1.save(&pth).unwrap();
1678 // Ask for a ratchet that should fail to advance.
1679 let (diff1, ok1) = m2.ratchet(&pth, None);
1680 assert_eq!(ok1, false);
1681 assert_eq!(diff1.len(), 2);
1682 assert_eq!(*(diff1.find(&"runtime".to_string()).unwrap()), Regression(10.0));
1683 assert_eq!(*(diff1.find(&"throughput".to_string()).unwrap()), LikelyNoise);
1685 // Check that it was not rewritten.
1686 let m3 = MetricMap::load(&pth);
1687 let MetricMap(m3) = m3;
1688 assert_eq!(m3.len(), 2);
1689 assert_eq!(*(m3.find(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1690 assert_eq!(*(m3.find(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1692 // Ask for a ratchet with an explicit noise-percentage override,
1693 // that should advance.
1694 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1695 assert_eq!(ok2, true);
1696 assert_eq!(diff2.len(), 2);
1697 assert_eq!(*(diff2.find(&"runtime".to_string()).unwrap()), LikelyNoise);
1698 assert_eq!(*(diff2.find(&"throughput".to_string()).unwrap()), LikelyNoise);
1700 // Check that it was rewritten.
1701 let m4 = MetricMap::load(&pth);
1702 let MetricMap(m4) = m4;
1703 assert_eq!(m4.len(), 2);
1704 assert_eq!(*(m4.find(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1705 assert_eq!(*(m4.find(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));