1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_id = "test#0.11.0-pre"]
27 #![comment = "Rust internal test library only used by rustc"]
28 #![license = "MIT/ASL2"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/")]
35 #![feature(asm, macro_rules, phase)]
39 extern crate serialize;
43 use std::collections::TreeMap;
45 use time::precise_time_ns;
46 use getopts::{OptGroup, optflag, optopt};
48 use serialize::{json, Decodable};
49 use serialize::json::{Json, ToJson};
51 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
57 use std::from_str::FromStr;
58 use std::io::stdio::StdWriter;
59 use std::io::{File, ChanReader, ChanWriter};
63 use std::string::String;
64 use std::task::TaskBuilder;
66 // to be used by rustc to compile tests in libtest
68 pub use {Bencher, TestName, TestResult, TestDesc,
69 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
70 Metric, MetricMap, MetricAdded, MetricRemoved,
71 MetricChange, Improvement, Regression, LikelyNoise,
72 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
73 run_test, test_main, test_main_static, filter_tests,
74 parse_opts, StaticBenchFn};
79 // The name of a test. By convention this follows the rules for rust
80 // paths; i.e. it should be a series of identifiers separated by double
81 // colons. This way if some test runner wants to arrange the tests
82 // hierarchically it may.
84 #[deriving(Clone, PartialEq, Eq, Hash)]
86 StaticTestName(&'static str),
90 fn as_slice<'a>(&'a self) -> &'a str {
92 StaticTestName(s) => s,
93 DynTestName(ref s) => s.as_slice()
97 impl Show for TestName {
98 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
99 self.as_slice().fmt(f)
104 enum NamePadding { PadNone, PadOnLeft, PadOnRight }
107 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
108 use std::num::Saturating;
109 let mut name = String::from_str(self.name.as_slice());
110 let fill = column_count.saturating_sub(name.len());
111 let mut pad = " ".repeat(fill);
115 pad.push_str(name.as_slice());
119 name.push_str(pad.as_slice());
126 /// Represents a benchmark function.
127 pub trait TDynBenchFn {
128 fn run(&self, harness: &mut Bencher);
131 // A function that runs a test. If the function returns successfully,
132 // the test succeeds; if the function fails then the test fails. We
133 // may need to come up with a more clever definition of test in order
134 // to support isolation of tests into tasks.
137 StaticBenchFn(fn(&mut Bencher)),
138 StaticMetricFn(proc(&mut MetricMap)),
139 DynTestFn(proc():Send),
140 DynMetricFn(proc(&mut MetricMap)),
141 DynBenchFn(Box<TDynBenchFn>)
145 fn padding(&self) -> NamePadding {
147 &StaticTestFn(..) => PadNone,
148 &StaticBenchFn(..) => PadOnRight,
149 &StaticMetricFn(..) => PadOnRight,
150 &DynTestFn(..) => PadNone,
151 &DynMetricFn(..) => PadOnRight,
152 &DynBenchFn(..) => PadOnRight,
157 impl fmt::Show for TestFn {
158 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
159 f.write(match *self {
160 StaticTestFn(..) => "StaticTestFn(..)",
161 StaticBenchFn(..) => "StaticBenchFn(..)",
162 StaticMetricFn(..) => "StaticMetricFn(..)",
163 DynTestFn(..) => "DynTestFn(..)",
164 DynMetricFn(..) => "DynMetricFn(..)",
165 DynBenchFn(..) => "DynBenchFn(..)"
170 /// Manager of the benchmarking runs.
172 /// This is feed into functions marked with `#[bench]` to allow for
173 /// set-up & tear-down before running a piece of code repeatedly via a
182 // The definition of a single test. A test runner will run a list of
184 #[deriving(Clone, Show, PartialEq, Eq, Hash)]
185 pub struct TestDesc {
188 pub should_fail: bool,
192 pub struct TestDescAndFn {
197 #[deriving(Clone, Encodable, Decodable, PartialEq, Show)]
204 pub fn new(value: f64, noise: f64) -> Metric {
205 Metric {value: value, noise: noise}
209 #[deriving(PartialEq)]
210 pub struct MetricMap(TreeMap<String,Metric>);
212 impl Clone for MetricMap {
213 fn clone(&self) -> MetricMap {
214 let MetricMap(ref map) = *self;
215 MetricMap(map.clone())
219 /// Analysis of a single change in metric
220 #[deriving(PartialEq, Show)]
221 pub enum MetricChange {
229 pub type MetricDiff = TreeMap<String,MetricChange>;
231 // The default console test runner. It accepts the command line
232 // arguments and a vector of test_descs.
233 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
235 match parse_opts(args) {
237 Some(Err(msg)) => fail!("{}", msg),
240 match run_tests_console(&opts, tests) {
242 Ok(false) => fail!("Some tests failed"),
243 Err(e) => fail!("io error when running tests: {}", e),
247 // A variant optimized for invocation with a static test vector.
248 // This will fail (intentionally) when fed any dynamic tests, because
249 // it is copying the static values out into a dynamic vector and cannot
250 // copy dynamic values. It is doing this because from this point on
251 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
252 // semantics into parallel test runners, which in turn requires a ~[]
253 // rather than a &[].
254 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
255 let owned_tests = tests.iter().map(|t| {
257 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
258 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
259 _ => fail!("non-static tests passed to test::test_main_static")
262 test_main(args, owned_tests)
265 pub enum ColorConfig {
271 pub struct TestOpts {
272 pub filter: Option<Regex>,
273 pub run_ignored: bool,
275 pub run_benchmarks: bool,
276 pub ratchet_metrics: Option<Path>,
277 pub ratchet_noise_percent: Option<f64>,
278 pub save_metrics: Option<Path>,
279 pub test_shard: Option<(uint,uint)>,
280 pub logfile: Option<Path>,
282 pub color: ColorConfig,
287 fn new() -> TestOpts {
292 run_benchmarks: false,
293 ratchet_metrics: None,
294 ratchet_noise_percent: None,
304 /// Result of parsing the options.
305 pub type OptRes = Result<TestOpts, String>;
307 fn optgroups() -> Vec<getopts::OptGroup> {
308 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
309 getopts::optflag("", "test", "Run tests and not benchmarks"),
310 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
311 getopts::optflag("h", "help", "Display this message (longer with --help)"),
312 getopts::optopt("", "save-metrics", "Location to save bench metrics",
314 getopts::optopt("", "ratchet-metrics",
315 "Location to load and save metrics from. The metrics \
316 loaded are cause benchmarks to fail if they run too \
318 getopts::optopt("", "ratchet-noise-percent",
319 "Tests within N% of the recorded metrics will be \
320 considered as passing", "PERCENTAGE"),
321 getopts::optopt("", "logfile", "Write logs to the specified file instead \
323 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
325 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
326 task, allow printing directly"),
327 getopts::optopt("", "color", "Configure coloring of output:
328 auto = colorize if stdout is a tty and tests are run on serially (default);
329 always = always colorize output;
330 never = never colorize output;", "auto|always|never"))
333 fn usage(binary: &str) {
334 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
337 The FILTER regex is tested against the name of all tests to run, and
338 only those tests that match are run.
340 By default, all tests are run in parallel. This can be altered with the
341 RUST_TEST_TASKS environment variable when running tests (set it to 1).
343 All tests have their standard output and standard error captured by default.
344 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
345 environment variable. Logging is not captured by default.
349 #[test] - Indicates a function is a test to be run. This function
351 #[bench] - Indicates a function is a benchmark to be run. This
352 function takes one argument (test::Bencher).
353 #[should_fail] - This function (also labeled with #[test]) will only pass if
354 the code causes a failure (an assertion failure or fail!)
355 #[ignore] - When applied to a function which is already attributed as a
356 test, then the test runner will ignore these tests during
357 normal test runs. Running with --ignored will run these
358 tests. This may also be written as #[ignore(cfg(...))] to
359 ignore the test on certain configurations.",
360 usage = getopts::usage(message.as_slice(),
361 optgroups().as_slice()));
364 // Parses command line arguments into test options
365 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
366 let args_ = args.tail();
368 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
370 Err(f) => return Some(Err(f.to_str()))
373 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
375 let filter = if matches.free.len() > 0 {
376 let s = matches.free.get(0).as_slice();
377 match Regex::new(s) {
379 Err(e) => return Some(Err(format!("could not parse /{}/: {}", s, e)))
385 let run_ignored = matches.opt_present("ignored");
387 let logfile = matches.opt_str("logfile");
388 let logfile = logfile.map(|s| Path::new(s));
390 let run_benchmarks = matches.opt_present("bench");
391 let run_tests = ! run_benchmarks ||
392 matches.opt_present("test");
394 let ratchet_metrics = matches.opt_str("ratchet-metrics");
395 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
397 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
398 let ratchet_noise_percent =
399 ratchet_noise_percent.map(|s| from_str::<f64>(s.as_slice()).unwrap());
401 let save_metrics = matches.opt_str("save-metrics");
402 let save_metrics = save_metrics.map(|s| Path::new(s));
404 let test_shard = matches.opt_str("test-shard");
405 let test_shard = opt_shard(test_shard.map(|x| x.to_string()));
407 let mut nocapture = matches.opt_present("nocapture");
409 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
412 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
413 Some("auto") | None => AutoColor,
414 Some("always") => AlwaysColor,
415 Some("never") => NeverColor,
417 Some(v) => return Some(Err(format!("argument for --color must be \
418 auto, always, or never (was {})",
422 let test_opts = TestOpts {
424 run_ignored: run_ignored,
425 run_tests: run_tests,
426 run_benchmarks: run_benchmarks,
427 ratchet_metrics: ratchet_metrics,
428 ratchet_noise_percent: ratchet_noise_percent,
429 save_metrics: save_metrics,
430 test_shard: test_shard,
432 nocapture: nocapture,
439 pub fn opt_shard(maybestr: Option<String>) -> Option<(uint,uint)> {
443 let mut it = s.as_slice().split('.');
444 match (it.next().and_then(from_str::<uint>), it.next().and_then(from_str::<uint>),
446 (Some(a), Some(b), None) => {
448 fail!("tried to run shard {a}.{b}, but {a} is out of bounds \
449 (should be between 1 and {b}", a=a, b=b)
460 #[deriving(Clone, PartialEq)]
461 pub struct BenchSamples {
462 ns_iter_summ: stats::Summary<f64>,
466 #[deriving(Clone, PartialEq)]
467 pub enum TestResult {
471 TrMetrics(MetricMap),
472 TrBench(BenchSamples),
475 enum OutputLocation<T> {
476 Pretty(Box<term::Terminal<Box<Writer + Send>> + Send>),
480 struct ConsoleTestState<T> {
481 log_out: Option<File>,
482 out: OutputLocation<T>,
490 failures: Vec<(TestDesc, Vec<u8> )> ,
491 max_name_len: uint, // number of columns to fill when aligning names
494 impl<T: Writer> ConsoleTestState<T> {
495 pub fn new(opts: &TestOpts,
496 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
497 let log_out = match opts.logfile {
498 Some(ref path) => Some(try!(File::create(path))),
501 let out = match term::stdout() {
502 None => Raw(io::stdio::stdout_raw()),
506 Ok(ConsoleTestState {
509 use_color: use_color(opts),
515 metrics: MetricMap::new(),
516 failures: Vec::new(),
521 pub fn write_ok(&mut self) -> io::IoResult<()> {
522 self.write_pretty("ok", term::color::GREEN)
525 pub fn write_failed(&mut self) -> io::IoResult<()> {
526 self.write_pretty("FAILED", term::color::RED)
529 pub fn write_ignored(&mut self) -> io::IoResult<()> {
530 self.write_pretty("ignored", term::color::YELLOW)
533 pub fn write_metric(&mut self) -> io::IoResult<()> {
534 self.write_pretty("metric", term::color::CYAN)
537 pub fn write_bench(&mut self) -> io::IoResult<()> {
538 self.write_pretty("bench", term::color::CYAN)
541 pub fn write_added(&mut self) -> io::IoResult<()> {
542 self.write_pretty("added", term::color::GREEN)
545 pub fn write_improved(&mut self) -> io::IoResult<()> {
546 self.write_pretty("improved", term::color::GREEN)
549 pub fn write_removed(&mut self) -> io::IoResult<()> {
550 self.write_pretty("removed", term::color::YELLOW)
553 pub fn write_regressed(&mut self) -> io::IoResult<()> {
554 self.write_pretty("regressed", term::color::RED)
557 pub fn write_pretty(&mut self,
559 color: term::color::Color) -> io::IoResult<()> {
561 Pretty(ref mut term) => {
563 try!(term.fg(color));
565 try!(term.write(word.as_bytes()));
571 Raw(ref mut stdout) => stdout.write(word.as_bytes())
575 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
577 Pretty(ref mut term) => term.write(s.as_bytes()),
578 Raw(ref mut stdout) => stdout.write(s.as_bytes())
582 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
584 let noun = if len != 1 { "tests" } else { "test" };
585 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
588 pub fn write_test_start(&mut self, test: &TestDesc,
589 align: NamePadding) -> io::IoResult<()> {
590 let name = test.padded_name(self.max_name_len, align);
591 self.write_plain(format!("test {} ... ", name).as_slice())
594 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
596 TrOk => self.write_ok(),
597 TrFailed => self.write_failed(),
598 TrIgnored => self.write_ignored(),
599 TrMetrics(ref mm) => {
600 try!(self.write_metric());
601 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
604 try!(self.write_bench());
605 self.write_plain(format!(": {}",
606 fmt_bench_samples(bs)).as_slice())
609 self.write_plain("\n")
612 pub fn write_log(&mut self, test: &TestDesc,
613 result: &TestResult) -> io::IoResult<()> {
617 let s = format!("{} {}\n", match *result {
618 TrOk => "ok".to_string(),
619 TrFailed => "failed".to_string(),
620 TrIgnored => "ignored".to_string(),
621 TrMetrics(ref mm) => fmt_metrics(mm),
622 TrBench(ref bs) => fmt_bench_samples(bs)
623 }, test.name.as_slice());
624 o.write(s.as_bytes())
629 pub fn write_failures(&mut self) -> io::IoResult<()> {
630 try!(self.write_plain("\nfailures:\n"));
631 let mut failures = Vec::new();
632 let mut fail_out = String::new();
633 for &(ref f, ref stdout) in self.failures.iter() {
634 failures.push(f.name.to_str());
635 if stdout.len() > 0 {
636 fail_out.push_str(format!("---- {} stdout ----\n\t",
637 f.name.as_slice()).as_slice());
638 let output = str::from_utf8_lossy(stdout.as_slice());
639 fail_out.push_str(output.as_slice()
640 .replace("\n", "\n\t")
642 fail_out.push_str("\n");
645 if fail_out.len() > 0 {
646 try!(self.write_plain("\n"));
647 try!(self.write_plain(fail_out.as_slice()));
650 try!(self.write_plain("\nfailures:\n"));
651 failures.as_mut_slice().sort();
652 for name in failures.iter() {
653 try!(self.write_plain(format!(" {}\n",
654 name.as_slice()).as_slice()));
659 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
661 let mut improved = 0;
662 let mut regressed = 0;
666 for (k, v) in diff.iter() {
668 LikelyNoise => noise += 1,
671 try!(self.write_added());
672 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
676 try!(self.write_removed());
677 try!(self.write_plain(format!(": {}\n", *k).as_slice()));
679 Improvement(pct) => {
681 try!(self.write_plain(format!(": {}", *k).as_slice()));
682 try!(self.write_improved());
683 try!(self.write_plain(format!(" by {:.2f}%\n",
684 pct as f64).as_slice()));
688 try!(self.write_plain(format!(": {}", *k).as_slice()));
689 try!(self.write_regressed());
690 try!(self.write_plain(format!(" by {:.2f}%\n",
691 pct as f64).as_slice()));
695 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
696 {} removed, {} improved, {} regressed, \
698 added, removed, improved, regressed,
701 try!(self.write_plain("updated ratchet file\n"));
703 try!(self.write_plain("left ratchet file untouched\n"));
708 pub fn write_run_finish(&mut self,
709 ratchet_metrics: &Option<Path>,
710 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
711 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
713 let ratchet_success = match *ratchet_metrics {
716 try!(self.write_plain(format!("\nusing metrics ratchet: {}\n",
717 pth.display()).as_slice()));
721 try!(self.write_plain(format!("with noise-tolerance \
725 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
726 try!(self.write_metric_diff(&diff));
731 let test_success = self.failed == 0u;
733 try!(self.write_failures());
736 let success = ratchet_success && test_success;
738 try!(self.write_plain("\ntest result: "));
740 // There's no parallelism at this point so it's safe to use color
741 try!(self.write_ok());
743 try!(self.write_failed());
745 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
746 self.passed, self.failed, self.ignored, self.measured);
747 try!(self.write_plain(s.as_slice()));
752 pub fn fmt_metrics(mm: &MetricMap) -> String {
753 let MetricMap(ref mm) = *mm;
754 let v : Vec<String> = mm.iter()
755 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
756 v.value as f64, v.noise as f64))
758 v.connect(", ").to_string()
761 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
763 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
764 bs.ns_iter_summ.median as uint,
765 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
768 format!("{:>9} ns/iter (+/- {})",
769 bs.ns_iter_summ.median as uint,
770 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
774 // A simple console test runner
775 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
777 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
778 match (*event).clone() {
779 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
780 TeWait(ref test, padding) => st.write_test_start(test, padding),
781 TeResult(test, result, stdout) => {
782 try!(st.write_log(&test, &result));
783 try!(st.write_result(&result));
785 TrOk => st.passed += 1,
786 TrIgnored => st.ignored += 1,
788 let tname = test.name.as_slice();
789 let MetricMap(mm) = mm;
790 for (k,v) in mm.iter() {
792 .insert_metric(format!("{}.{}",
801 st.metrics.insert_metric(test.name.as_slice(),
802 bs.ns_iter_summ.median,
803 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
808 st.failures.push((test, stdout));
816 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
817 fn len_if_padded(t: &TestDescAndFn) -> uint {
818 match t.testfn.padding() {
820 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
823 match tests.iter().max_by(|t|len_if_padded(*t)) {
825 let n = t.desc.name.as_slice();
826 st.max_name_len = n.len();
830 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
831 match opts.save_metrics {
834 try!(st.metrics.save(pth));
835 try!(st.write_plain(format!("\nmetrics saved to: {}",
836 pth.display()).as_slice()));
839 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
843 fn should_sort_failures_before_printing_them() {
844 use std::io::MemWriter;
847 let test_a = TestDesc {
848 name: StaticTestName("a"),
853 let test_b = TestDesc {
854 name: StaticTestName("b"),
859 let mut st = ConsoleTestState {
861 out: Raw(MemWriter::new()),
869 metrics: MetricMap::new(),
870 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
873 st.write_failures().unwrap();
874 let s = match st.out {
875 Raw(ref m) => str::from_utf8_lossy(m.get_ref()),
876 Pretty(_) => unreachable!()
879 let apos = s.as_slice().find_str("a").unwrap();
880 let bpos = s.as_slice().find_str("b").unwrap();
881 assert!(apos < bpos);
884 fn use_color(opts: &TestOpts) -> bool {
886 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
894 TeFiltered(Vec<TestDesc> ),
895 TeWait(TestDesc, NamePadding),
896 TeResult(TestDesc, TestResult, Vec<u8> ),
899 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
901 fn run_tests(opts: &TestOpts,
902 tests: Vec<TestDescAndFn> ,
903 callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
904 let filtered_tests = filter_tests(opts, tests);
905 let filtered_descs = filtered_tests.iter()
906 .map(|t| t.desc.clone())
909 try!(callback(TeFiltered(filtered_descs)));
911 let (filtered_tests, filtered_benchs_and_metrics) =
912 filtered_tests.partition(|e| {
914 StaticTestFn(_) | DynTestFn(_) => true,
919 // It's tempting to just spawn all the tests at once, but since we have
920 // many tests that run in other processes we would be making a big mess.
921 let concurrency = get_concurrency();
923 let mut remaining = filtered_tests;
927 let (tx, rx) = channel::<MonitorMsg>();
929 while pending > 0 || !remaining.is_empty() {
930 while pending < concurrency && !remaining.is_empty() {
931 let test = remaining.pop().unwrap();
932 if concurrency == 1 {
933 // We are doing one test at a time so we can print the name
934 // of the test before we run it. Useful for debugging tests
935 // that hang forever.
936 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
938 run_test(opts, !opts.run_tests, test, tx.clone());
942 let (desc, result, stdout) = rx.recv();
943 if concurrency != 1 {
944 try!(callback(TeWait(desc.clone(), PadNone)));
946 try!(callback(TeResult(desc, result, stdout)));
950 // All benchmarks run at the end, in serial.
951 // (this includes metric fns)
952 for b in filtered_benchs_and_metrics.move_iter() {
953 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
954 run_test(opts, !opts.run_benchmarks, b, tx.clone());
955 let (test, result, stdout) = rx.recv();
956 try!(callback(TeResult(test, result, stdout)));
961 fn get_concurrency() -> uint {
963 match os::getenv("RUST_TEST_TASKS") {
965 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
967 Some(n) if n > 0 => n,
968 _ => fail!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
972 rt::default_sched_threads()
977 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
978 let mut filtered = tests;
980 // Remove tests that don't match the test filter
981 filtered = match opts.filter {
985 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
989 // Maybe pull out the ignored test and unignore them
990 filtered = if !opts.run_ignored {
993 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
994 if test.desc.ignore {
995 let TestDescAndFn {desc, testfn} = test;
997 desc: TestDesc {ignore: false, ..desc},
1004 filtered.move_iter().filter_map(|x| filter(x)).collect()
1007 // Sort the tests alphabetically
1008 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(&t2.desc.name.as_slice()));
1010 // Shard the remaining tests, if sharding requested.
1011 match opts.test_shard {
1014 filtered.move_iter().enumerate()
1015 // note: using a - 1 so that the valid shards, for example, are
1016 // 1.2 and 2.2 instead of 0.2 and 1.2
1017 .filter(|&(i,_)| i % b == (a - 1))
1024 pub fn run_test(opts: &TestOpts,
1026 test: TestDescAndFn,
1027 monitor_ch: Sender<MonitorMsg>) {
1029 let TestDescAndFn {desc, testfn} = test;
1031 if force_ignore || desc.ignore {
1032 monitor_ch.send((desc, TrIgnored, Vec::new()));
1036 fn run_test_inner(desc: TestDesc,
1037 monitor_ch: Sender<MonitorMsg>,
1039 testfn: proc():Send) {
1041 let (tx, rx) = channel();
1042 let mut reader = ChanReader::new(rx);
1043 let stdout = ChanWriter::new(tx.clone());
1044 let stderr = ChanWriter::new(tx);
1045 let mut task = TaskBuilder::new().named(match desc.name {
1046 DynTestName(ref name) => name.clone().to_string(),
1047 StaticTestName(name) => name.to_string(),
1050 drop((stdout, stderr));
1052 task.opts.stdout = Some(box stdout as Box<Writer + Send>);
1053 task.opts.stderr = Some(box stderr as Box<Writer + Send>);
1055 let result_future = task.future_result();
1058 let stdout = reader.read_to_end().unwrap().move_iter().collect();
1059 let task_result = result_future.recv();
1060 let test_result = calc_result(&desc, task_result.is_ok());
1061 monitor_ch.send((desc.clone(), test_result, stdout));
1066 DynBenchFn(bencher) => {
1067 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1068 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1071 StaticBenchFn(benchfn) => {
1072 let bs = ::bench::benchmark(|harness| benchfn(harness));
1073 monitor_ch.send((desc, TrBench(bs), Vec::new()));
1077 let mut mm = MetricMap::new();
1079 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1082 StaticMetricFn(f) => {
1083 let mut mm = MetricMap::new();
1085 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1088 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1089 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1094 fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
1096 if desc.should_fail { TrFailed }
1099 if desc.should_fail { TrOk }
1105 impl ToJson for Metric {
1106 fn to_json(&self) -> json::Json {
1107 let mut map = box TreeMap::new();
1108 map.insert("value".to_string(), json::Number(self.value));
1109 map.insert("noise".to_string(), json::Number(self.noise));
1117 pub fn new() -> MetricMap {
1118 MetricMap(TreeMap::new())
1121 /// Load MetricDiff from a file.
1125 /// This function will fail if the path does not exist or the path does not
1126 /// contain a valid metric map.
1127 pub fn load(p: &Path) -> MetricMap {
1128 assert!(p.exists());
1129 let mut f = File::open(p).unwrap();
1130 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1131 let mut decoder = json::Decoder::new(value);
1132 MetricMap(match Decodable::decode(&mut decoder) {
1134 Err(e) => fail!("failure decoding JSON: {}", e)
1138 /// Write MetricDiff to a file.
1139 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1140 let mut file = try!(File::create(p));
1141 let MetricMap(ref map) = *self;
1143 // FIXME(pcwalton): Yuck.
1144 let mut new_map = TreeMap::new();
1145 for (ref key, ref value) in map.iter() {
1146 new_map.insert(key.to_string(), (*value).clone());
1149 new_map.to_json().to_pretty_writer(&mut file)
1152 /// Compare against another MetricMap. Optionally compare all
1153 /// measurements in the maps using the provided `noise_pct` as a
1154 /// percentage of each value to consider noise. If `None`, each
1155 /// measurement's noise threshold is independently chosen as the
1156 /// maximum of that measurement's recorded noise quantity in either
1158 pub fn compare_to_old(&self, old: &MetricMap,
1159 noise_pct: Option<f64>) -> MetricDiff {
1160 let mut diff : MetricDiff = TreeMap::new();
1161 let MetricMap(ref selfmap) = *self;
1162 let MetricMap(ref old) = *old;
1163 for (k, vold) in old.iter() {
1164 let r = match selfmap.find(k) {
1165 None => MetricRemoved,
1167 let delta = v.value - vold.value;
1168 let noise = match noise_pct {
1169 None => vold.noise.abs().max(v.noise.abs()),
1170 Some(pct) => vold.value * pct / 100.0
1172 if delta.abs() <= noise {
1175 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1176 if vold.noise < 0.0 {
1177 // When 'noise' is negative, it means we want
1178 // to see deltas that go up over time, and can
1179 // only tolerate slight negative movement.
1186 // When 'noise' is positive, it means we want
1187 // to see deltas that go down over time, and
1188 // can only tolerate slight positive movements.
1198 diff.insert((*k).clone(), r);
1200 let MetricMap(ref map) = *self;
1201 for (k, _) in map.iter() {
1202 if !diff.contains_key(k) {
1203 diff.insert((*k).clone(), MetricAdded);
1209 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1210 /// must be non-negative. The `noise` indicates the uncertainty of the
1211 /// metric, which doubles as the "noise range" of acceptable
1212 /// pairwise-regressions on this named value, when comparing from one
1213 /// metric to the next using `compare_to_old`.
1215 /// If `noise` is positive, then it means this metric is of a value
1216 /// you want to see grow smaller, so a change larger than `noise` in the
1217 /// positive direction represents a regression.
1219 /// If `noise` is negative, then it means this metric is of a value
1220 /// you want to see grow larger, so a change larger than `noise` in the
1221 /// negative direction represents a regression.
1222 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1227 let MetricMap(ref mut map) = *self;
1228 map.insert(name.to_string(), m);
1231 /// Attempt to "ratchet" an external metric file. This involves loading
1232 /// metrics from a metric file (if it exists), comparing against
1233 /// the metrics in `self` using `compare_to_old`, and rewriting the
1234 /// file to contain the metrics in `self` if none of the
1235 /// `MetricChange`s are `Regression`. Returns the diff as well
1236 /// as a boolean indicating whether the ratchet succeeded.
1237 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1238 let old = if p.exists() {
1244 let diff : MetricDiff = self.compare_to_old(&old, pct);
1245 let ok = diff.iter().all(|(_, v)| {
1247 Regression(_) => false,
1253 self.save(p).unwrap();
1262 /// A function that is opaque to the optimizer, to allow benchmarks to
1263 /// pretend to use outputs to assist in avoiding dead-code
1266 /// This function is a no-op, and does not even read from `dummy`.
1267 pub fn black_box<T>(dummy: T) {
1268 // we need to "use" the argument in some way LLVM can't
1270 unsafe {asm!("" : : "r"(&dummy))}
1275 /// Callback for benchmark functions to run in their body.
1276 pub fn iter<T>(&mut self, inner: || -> T) {
1277 self.ns_start = precise_time_ns();
1278 let k = self.iterations;
1279 for _ in range(0u64, k) {
1282 self.ns_end = precise_time_ns();
1285 pub fn ns_elapsed(&mut self) -> u64 {
1286 if self.ns_start == 0 || self.ns_end == 0 {
1289 self.ns_end - self.ns_start
1293 pub fn ns_per_iter(&mut self) -> u64 {
1294 if self.iterations == 0 {
1297 self.ns_elapsed() / cmp::max(self.iterations, 1)
1301 pub fn bench_n(&mut self, n: u64, f: |&mut Bencher|) {
1302 self.iterations = n;
1306 // This is a more statistics-driven benchmark algorithm
1307 pub fn auto_bench(&mut self, f: |&mut Bencher|) -> stats::Summary<f64> {
1309 // Initial bench run to get ballpark figure.
1311 self.bench_n(n, |x| f(x));
1313 // Try to estimate iter count for 1ms falling back to 1m
1314 // iterations if first run took < 1ns.
1315 if self.ns_per_iter() == 0 {
1318 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1320 // if the first run took more than 1ms we don't want to just
1321 // be left doing 0 iterations on every loop. The unfortunate
1322 // side effect of not being able to do as many runs is
1323 // automatically handled by the statistical analysis below
1324 // (i.e. larger error bars).
1325 if n == 0 { n = 1; }
1327 let mut total_run = 0;
1328 let samples : &mut [f64] = [0.0_f64, ..50];
1330 let loop_start = precise_time_ns();
1332 for p in samples.mut_iter() {
1333 self.bench_n(n, |x| f(x));
1334 *p = self.ns_per_iter() as f64;
1337 stats::winsorize(samples, 5.0);
1338 let summ = stats::Summary::new(samples);
1340 for p in samples.mut_iter() {
1341 self.bench_n(5 * n, |x| f(x));
1342 *p = self.ns_per_iter() as f64;
1345 stats::winsorize(samples, 5.0);
1346 let summ5 = stats::Summary::new(samples);
1348 let now = precise_time_ns();
1349 let loop_run = now - loop_start;
1351 // If we've run for 100ms and seem to have converged to a
1353 if loop_run > 100_000_000 &&
1354 summ.median_abs_dev_pct < 1.0 &&
1355 summ.median - summ5.median < summ5.median_abs_dev {
1359 total_run += loop_run;
1360 // Longest we ever run for is 3s.
1361 if total_run > 3_000_000_000 {
1372 use super::{Bencher, BenchSamples};
1374 pub fn benchmark(f: |&mut Bencher|) -> BenchSamples {
1375 let mut bs = Bencher {
1382 let ns_iter_summ = bs.auto_bench(f);
1384 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1385 let iter_s = 1_000_000_000 / ns_iter;
1386 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1389 ns_iter_summ: ns_iter_summ,
1397 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1398 TestDesc, TestDescAndFn, TestOpts, run_test,
1399 Metric, MetricMap, MetricAdded, MetricRemoved,
1400 Improvement, Regression, LikelyNoise,
1401 StaticTestName, DynTestName, DynTestFn};
1402 use std::io::TempDir;
1405 pub fn do_not_run_ignored_tests() {
1407 let desc = TestDescAndFn {
1409 name: StaticTestName("whatever"),
1413 testfn: DynTestFn(proc() f()),
1415 let (tx, rx) = channel();
1416 run_test(&TestOpts::new(), false, desc, tx);
1417 let (_, res, _) = rx.recv();
1418 assert!(res != TrOk);
1422 pub fn ignored_tests_result_in_ignored() {
1424 let desc = TestDescAndFn {
1426 name: StaticTestName("whatever"),
1430 testfn: DynTestFn(proc() f()),
1432 let (tx, rx) = channel();
1433 run_test(&TestOpts::new(), false, desc, tx);
1434 let (_, res, _) = rx.recv();
1435 assert!(res == TrIgnored);
1439 fn test_should_fail() {
1441 let desc = TestDescAndFn {
1443 name: StaticTestName("whatever"),
1447 testfn: DynTestFn(proc() f()),
1449 let (tx, rx) = channel();
1450 run_test(&TestOpts::new(), false, desc, tx);
1451 let (_, res, _) = rx.recv();
1452 assert!(res == TrOk);
1456 fn test_should_fail_but_succeeds() {
1458 let desc = TestDescAndFn {
1460 name: StaticTestName("whatever"),
1464 testfn: DynTestFn(proc() f()),
1466 let (tx, rx) = channel();
1467 run_test(&TestOpts::new(), false, desc, tx);
1468 let (_, res, _) = rx.recv();
1469 assert!(res == TrFailed);
1473 fn first_free_arg_should_be_a_filter() {
1474 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1475 let opts = match parse_opts(args.as_slice()) {
1477 _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
1479 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1483 fn parse_ignored_flag() {
1484 let args = vec!("progname".to_string(),
1485 "filter".to_string(),
1486 "--ignored".to_string());
1487 let opts = match parse_opts(args.as_slice()) {
1489 _ => fail!("Malformed arg in parse_ignored_flag")
1491 assert!((opts.run_ignored));
1495 pub fn filter_for_ignored_option() {
1496 // When we run ignored tests the test filter should filter out all the
1497 // unignored tests and flip the ignore flag on the rest to false
1499 let mut opts = TestOpts::new();
1500 opts.run_tests = true;
1501 opts.run_ignored = true;
1506 name: StaticTestName("1"),
1510 testfn: DynTestFn(proc() {}),
1514 name: StaticTestName("2"),
1518 testfn: DynTestFn(proc() {}),
1520 let filtered = filter_tests(&opts, tests);
1522 assert_eq!(filtered.len(), 1);
1523 assert_eq!(filtered.get(0).desc.name.to_str(),
1525 assert!(filtered.get(0).desc.ignore == false);
1529 pub fn sort_tests() {
1530 let mut opts = TestOpts::new();
1531 opts.run_tests = true;
1534 vec!("sha1::test".to_string(),
1535 "int::test_to_str".to_string(),
1536 "int::test_pow".to_string(),
1537 "test::do_not_run_ignored_tests".to_string(),
1538 "test::ignored_tests_result_in_ignored".to_string(),
1539 "test::first_free_arg_should_be_a_filter".to_string(),
1540 "test::parse_ignored_flag".to_string(),
1541 "test::filter_for_ignored_option".to_string(),
1542 "test::sort_tests".to_string());
1546 let mut tests = Vec::new();
1547 for name in names.iter() {
1548 let test = TestDescAndFn {
1550 name: DynTestName((*name).clone()),
1554 testfn: DynTestFn(testfn),
1560 let filtered = filter_tests(&opts, tests);
1563 vec!("int::test_pow".to_string(),
1564 "int::test_to_str".to_string(),
1565 "sha1::test".to_string(),
1566 "test::do_not_run_ignored_tests".to_string(),
1567 "test::filter_for_ignored_option".to_string(),
1568 "test::first_free_arg_should_be_a_filter".to_string(),
1569 "test::ignored_tests_result_in_ignored".to_string(),
1570 "test::parse_ignored_flag".to_string(),
1571 "test::sort_tests".to_string());
1573 for (a, b) in expected.iter().zip(filtered.iter()) {
1574 assert!(*a == b.desc.name.to_str());
1579 pub fn filter_tests_regex() {
1580 let mut opts = TestOpts::new();
1581 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1583 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1584 "no::XYZ", "no::abc"];
1588 let tests = names.iter().map(|name| {
1591 name: DynTestName(name.to_string()),
1595 testfn: DynTestFn(test_fn)
1598 let filtered = filter_tests(&opts, tests);
1600 let expected: Vec<&str> =
1601 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1603 assert_eq!(filtered.len(), expected.len());
1604 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1605 assert_eq!(test.desc.name.as_slice(), *expected_name);
1610 pub fn test_metricmap_compare() {
1611 let mut m1 = MetricMap::new();
1612 let mut m2 = MetricMap::new();
1613 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1614 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1616 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1617 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1619 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1620 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1622 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1623 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1625 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1626 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1628 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1629 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1631 let diff1 = m2.compare_to_old(&m1, None);
1633 assert_eq!(*(diff1.find(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1634 assert_eq!(*(diff1.find(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1635 assert_eq!(*(diff1.find(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1636 assert_eq!(*(diff1.find(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1638 assert_eq!(*(diff1.find(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1640 assert_eq!(*(diff1.find(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1642 assert_eq!(*(diff1.find(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1643 Improvement(100.0));
1644 assert_eq!(diff1.len(), 7);
1646 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1648 assert_eq!(*(diff2.find(&"in-both-noise".to_string()).unwrap()), LikelyNoise);
1649 assert_eq!(*(diff2.find(&"in-first-noise".to_string()).unwrap()), MetricRemoved);
1650 assert_eq!(*(diff2.find(&"in-second-noise".to_string()).unwrap()), MetricAdded);
1651 assert_eq!(*(diff2.find(&"in-both-want-downwards-but-regressed".to_string()).unwrap()),
1653 assert_eq!(*(diff2.find(&"in-both-want-downwards-and-improved".to_string()).unwrap()),
1655 assert_eq!(*(diff2.find(&"in-both-want-upwards-but-regressed".to_string()).unwrap()),
1657 assert_eq!(*(diff2.find(&"in-both-want-upwards-and-improved".to_string()).unwrap()),
1659 assert_eq!(diff2.len(), 7);
1663 pub fn ratchet_test() {
1665 let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
1666 let pth = dpth.path().join("ratchet.json");
1668 let mut m1 = MetricMap::new();
1669 m1.insert_metric("runtime", 1000.0, 2.0);
1670 m1.insert_metric("throughput", 50.0, 2.0);
1672 let mut m2 = MetricMap::new();
1673 m2.insert_metric("runtime", 1100.0, 2.0);
1674 m2.insert_metric("throughput", 50.0, 2.0);
1676 m1.save(&pth).unwrap();
1678 // Ask for a ratchet that should fail to advance.
1679 let (diff1, ok1) = m2.ratchet(&pth, None);
1680 assert_eq!(ok1, false);
1681 assert_eq!(diff1.len(), 2);
1682 assert_eq!(*(diff1.find(&"runtime".to_string()).unwrap()), Regression(10.0));
1683 assert_eq!(*(diff1.find(&"throughput".to_string()).unwrap()), LikelyNoise);
1685 // Check that it was not rewritten.
1686 let m3 = MetricMap::load(&pth);
1687 let MetricMap(m3) = m3;
1688 assert_eq!(m3.len(), 2);
1689 assert_eq!(*(m3.find(&"runtime".to_string()).unwrap()), Metric::new(1000.0, 2.0));
1690 assert_eq!(*(m3.find(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));
1692 // Ask for a ratchet with an explicit noise-percentage override,
1693 // that should advance.
1694 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1695 assert_eq!(ok2, true);
1696 assert_eq!(diff2.len(), 2);
1697 assert_eq!(*(diff2.find(&"runtime".to_string()).unwrap()), LikelyNoise);
1698 assert_eq!(*(diff2.find(&"throughput".to_string()).unwrap()), LikelyNoise);
1700 // Check that it was rewritten.
1701 let m4 = MetricMap::load(&pth);
1702 let MetricMap(m4) = m4;
1703 assert_eq!(m4.len(), 2);
1704 assert_eq!(*(m4.find(&"runtime".to_string()).unwrap()), Metric::new(1100.0, 2.0));
1705 assert_eq!(*(m4.find(&"throughput".to_string()).unwrap()), Metric::new(50.0, 2.0));