1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_id = "test#0.11-pre"]
27 #![comment = "Rust internal test library only used by rustc"]
28 #![license = "MIT/ASL2"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://static.rust-lang.org/doc/master")]
35 #![feature(asm, macro_rules)]
36 #![deny(deprecated_owned_vector)]
38 extern crate collections;
40 extern crate serialize;
44 use collections::TreeMap;
46 use time::precise_time_ns;
47 use getopts::{OptGroup, optflag, optopt};
48 use serialize::{json, Decodable};
49 use serialize::json::{Json, ToJson};
51 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
56 use std::from_str::FromStr;
57 use std::io::stdio::StdWriter;
58 use std::io::{File, ChanReader, ChanWriter};
62 use std::strbuf::StrBuf;
65 // to be used by rustc to compile tests in libtest
67 pub use {Bencher, TestName, TestResult, TestDesc,
68 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
69 Metric, MetricMap, MetricAdded, MetricRemoved,
70 MetricChange, Improvement, Regression, LikelyNoise,
71 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
72 run_test, test_main, test_main_static, filter_tests,
73 parse_opts, StaticBenchFn};
78 // The name of a test. By convention this follows the rules for rust
79 // paths; i.e. it should be a series of identifiers separated by double
80 // colons. This way if some test runner wants to arrange the tests
81 // hierarchically it may.
85 StaticTestName(&'static str),
88 impl fmt::Show for TestName {
89 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
91 StaticTestName(s) => f.buf.write_str(s),
92 DynTestName(ref s) => f.buf.write_str(s.as_slice()),
98 enum NamePadding { PadNone, PadOnLeft, PadOnRight }
101 fn padded_name(&self, column_count: uint, align: NamePadding) -> ~str {
102 use std::num::Saturating;
103 let mut name = StrBuf::from_str(self.name.to_str());
104 let fill = column_count.saturating_sub(name.len());
105 let mut pad = StrBuf::from_owned_str(" ".repeat(fill));
107 PadNone => name.into_owned(),
109 pad.push_str(name.as_slice());
113 name.push_str(pad.as_slice());
120 /// Represents a benchmark function.
121 pub trait TDynBenchFn {
122 fn run(&self, harness: &mut Bencher);
125 // A function that runs a test. If the function returns successfully,
126 // the test succeeds; if the function fails then the test fails. We
127 // may need to come up with a more clever definition of test in order
128 // to support isolation of tests into tasks.
131 StaticBenchFn(fn(&mut Bencher)),
132 StaticMetricFn(proc(&mut MetricMap)),
133 DynTestFn(proc():Send),
134 DynMetricFn(proc(&mut MetricMap)),
135 DynBenchFn(~TDynBenchFn)
139 fn padding(&self) -> NamePadding {
141 &StaticTestFn(..) => PadNone,
142 &StaticBenchFn(..) => PadOnRight,
143 &StaticMetricFn(..) => PadOnRight,
144 &DynTestFn(..) => PadNone,
145 &DynMetricFn(..) => PadOnRight,
146 &DynBenchFn(..) => PadOnRight,
151 /// Manager of the benchmarking runs.
153 /// This is feed into functions marked with `#[bench]` to allow for
154 /// set-up & tear-down before running a piece of code repeatedly via a
163 // The definition of a single test. A test runner will run a list of
166 pub struct TestDesc {
169 pub should_fail: bool,
172 pub struct TestDescAndFn {
177 #[deriving(Clone, Encodable, Decodable, Eq, Show)]
184 pub fn new(value: f64, noise: f64) -> Metric {
185 Metric {value: value, noise: noise}
190 pub struct MetricMap(TreeMap<~str,Metric>);
192 impl Clone for MetricMap {
193 fn clone(&self) -> MetricMap {
194 let MetricMap(ref map) = *self;
195 MetricMap(map.clone())
199 /// Analysis of a single change in metric
200 #[deriving(Eq, Show)]
201 pub enum MetricChange {
209 pub type MetricDiff = TreeMap<~str,MetricChange>;
211 // The default console test runner. It accepts the command line
212 // arguments and a vector of test_descs.
213 pub fn test_main(args: &[~str], tests: Vec<TestDescAndFn> ) {
215 match parse_opts(args) {
217 Some(Err(msg)) => fail!("{}", msg),
220 match run_tests_console(&opts, tests) {
222 Ok(false) => fail!("Some tests failed"),
223 Err(e) => fail!("io error when running tests: {}", e),
227 // A variant optimized for invocation with a static test vector.
228 // This will fail (intentionally) when fed any dynamic tests, because
229 // it is copying the static values out into a dynamic vector and cannot
230 // copy dynamic values. It is doing this because from this point on
231 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
232 // semantics into parallel test runners, which in turn requires a ~[]
233 // rather than a &[].
234 pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
235 let owned_tests = tests.iter().map(|t| {
238 TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
241 TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
244 fail!("non-static tests passed to test::test_main_static");
248 test_main(args, owned_tests)
251 pub struct TestOpts {
252 pub filter: Option<~str>,
253 pub run_ignored: bool,
255 pub run_benchmarks: bool,
256 pub ratchet_metrics: Option<Path>,
257 pub ratchet_noise_percent: Option<f64>,
258 pub save_metrics: Option<Path>,
259 pub test_shard: Option<(uint,uint)>,
260 pub logfile: Option<Path>
263 /// Result of parsing the options.
264 pub type OptRes = Result<TestOpts, ~str>;
266 fn optgroups() -> Vec<getopts::OptGroup> {
267 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
268 getopts::optflag("", "test", "Run tests and not benchmarks"),
269 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
270 getopts::optflag("h", "help", "Display this message (longer with --help)"),
271 getopts::optopt("", "save-metrics", "Location to save bench metrics",
273 getopts::optopt("", "ratchet-metrics",
274 "Location to load and save metrics from. The metrics \
275 loaded are cause benchmarks to fail if they run too \
277 getopts::optopt("", "ratchet-noise-percent",
278 "Tests within N% of the recorded metrics will be \
279 considered as passing", "PERCENTAGE"),
280 getopts::optopt("", "logfile", "Write logs to the specified file instead \
282 getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
286 fn usage(binary: &str, helpstr: &str) {
287 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
288 println!("{}", getopts::usage(message, optgroups().as_slice()));
290 if helpstr == "help" {
292 The FILTER is matched against the name of all tests to run, and if any tests
293 have a substring match, only those tests are run.
295 By default, all tests are run in parallel. This can be altered with the
296 RUST_TEST_TASKS environment variable when running tests (set it to 1).
300 #[test] - Indicates a function is a test to be run. This function
302 #[bench] - Indicates a function is a benchmark to be run. This
303 function takes one argument (test::Bencher).
304 #[should_fail] - This function (also labeled with #[test]) will only pass if
305 the code causes a failure (an assertion failure or fail!)
306 #[ignore] - When applied to a function which is already attributed as a
307 test, then the test runner will ignore these tests during
308 normal test runs. Running with --ignored will run these
309 tests. This may also be written as #[ignore(cfg(...))] to
310 ignore the test on certain configurations.");
314 // Parses command line arguments into test options
315 pub fn parse_opts(args: &[~str]) -> Option<OptRes> {
316 let args_ = args.tail();
318 match getopts::getopts(args_, optgroups().as_slice()) {
320 Err(f) => return Some(Err(f.to_err_msg()))
323 if matches.opt_present("h") { usage(args[0], "h"); return None; }
324 if matches.opt_present("help") { usage(args[0], "help"); return None; }
327 if matches.free.len() > 0 {
328 Some((*matches.free.get(0)).clone())
333 let run_ignored = matches.opt_present("ignored");
335 let logfile = matches.opt_str("logfile");
336 let logfile = logfile.map(|s| Path::new(s));
338 let run_benchmarks = matches.opt_present("bench");
339 let run_tests = ! run_benchmarks ||
340 matches.opt_present("test");
342 let ratchet_metrics = matches.opt_str("ratchet-metrics");
343 let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
345 let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
346 let ratchet_noise_percent = ratchet_noise_percent.map(|s| from_str::<f64>(s).unwrap());
348 let save_metrics = matches.opt_str("save-metrics");
349 let save_metrics = save_metrics.map(|s| Path::new(s));
351 let test_shard = matches.opt_str("test-shard");
352 let test_shard = opt_shard(test_shard);
354 let test_opts = TestOpts {
356 run_ignored: run_ignored,
357 run_tests: run_tests,
358 run_benchmarks: run_benchmarks,
359 ratchet_metrics: ratchet_metrics,
360 ratchet_noise_percent: ratchet_noise_percent,
361 save_metrics: save_metrics,
362 test_shard: test_shard,
369 pub fn opt_shard(maybestr: Option<~str>) -> Option<(uint,uint)> {
373 let mut it = s.split('.');
374 match (it.next().and_then(from_str), it.next().and_then(from_str), it.next()) {
375 (Some(a), Some(b), None) => Some((a, b)),
383 #[deriving(Clone, Eq)]
384 pub struct BenchSamples {
385 ns_iter_summ: stats::Summary,
389 #[deriving(Clone, Eq)]
390 pub enum TestResult {
394 TrMetrics(MetricMap),
395 TrBench(BenchSamples),
398 enum OutputLocation<T> {
399 Pretty(term::Terminal<T>),
403 struct ConsoleTestState<T> {
404 log_out: Option<File>,
405 out: OutputLocation<T>,
413 failures: Vec<(TestDesc, Vec<u8> )> ,
414 max_name_len: uint, // number of columns to fill when aligning names
417 impl<T: Writer> ConsoleTestState<T> {
418 pub fn new(opts: &TestOpts,
419 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
420 let log_out = match opts.logfile {
421 Some(ref path) => Some(try!(File::create(path))),
424 let out = match term::Terminal::new(io::stdio::stdout_raw()) {
425 Err(_) => Raw(io::stdio::stdout_raw()),
428 Ok(ConsoleTestState {
431 use_color: use_color(),
437 metrics: MetricMap::new(),
438 failures: Vec::new(),
443 pub fn write_ok(&mut self) -> io::IoResult<()> {
444 self.write_pretty("ok", term::color::GREEN)
447 pub fn write_failed(&mut self) -> io::IoResult<()> {
448 self.write_pretty("FAILED", term::color::RED)
451 pub fn write_ignored(&mut self) -> io::IoResult<()> {
452 self.write_pretty("ignored", term::color::YELLOW)
455 pub fn write_metric(&mut self) -> io::IoResult<()> {
456 self.write_pretty("metric", term::color::CYAN)
459 pub fn write_bench(&mut self) -> io::IoResult<()> {
460 self.write_pretty("bench", term::color::CYAN)
463 pub fn write_added(&mut self) -> io::IoResult<()> {
464 self.write_pretty("added", term::color::GREEN)
467 pub fn write_improved(&mut self) -> io::IoResult<()> {
468 self.write_pretty("improved", term::color::GREEN)
471 pub fn write_removed(&mut self) -> io::IoResult<()> {
472 self.write_pretty("removed", term::color::YELLOW)
475 pub fn write_regressed(&mut self) -> io::IoResult<()> {
476 self.write_pretty("regressed", term::color::RED)
479 pub fn write_pretty(&mut self,
481 color: term::color::Color) -> io::IoResult<()> {
483 Pretty(ref mut term) => {
485 try!(term.fg(color));
487 try!(term.write(word.as_bytes()));
493 Raw(ref mut stdout) => stdout.write(word.as_bytes())
497 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
499 Pretty(ref mut term) => term.write(s.as_bytes()),
500 Raw(ref mut stdout) => stdout.write(s.as_bytes())
504 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
506 let noun = if len != 1 { &"tests" } else { &"test" };
507 self.write_plain(format!("\nrunning {} {}\n", len, noun))
510 pub fn write_test_start(&mut self, test: &TestDesc,
511 align: NamePadding) -> io::IoResult<()> {
512 let name = test.padded_name(self.max_name_len, align);
513 self.write_plain(format!("test {} ... ", name))
516 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
518 TrOk => self.write_ok(),
519 TrFailed => self.write_failed(),
520 TrIgnored => self.write_ignored(),
521 TrMetrics(ref mm) => {
522 try!(self.write_metric());
523 self.write_plain(format!(": {}", fmt_metrics(mm)))
526 try!(self.write_bench());
527 self.write_plain(format!(": {}", fmt_bench_samples(bs)))
530 self.write_plain("\n")
533 pub fn write_log(&mut self, test: &TestDesc,
534 result: &TestResult) -> io::IoResult<()> {
538 let s = format!("{} {}\n", match *result {
540 TrFailed => ~"failed",
541 TrIgnored => ~"ignored",
542 TrMetrics(ref mm) => fmt_metrics(mm),
543 TrBench(ref bs) => fmt_bench_samples(bs)
544 }, test.name.to_str());
545 o.write(s.as_bytes())
550 pub fn write_failures(&mut self) -> io::IoResult<()> {
551 try!(self.write_plain("\nfailures:\n"));
552 let mut failures = Vec::new();
553 let mut fail_out = StrBuf::new();
554 for &(ref f, ref stdout) in self.failures.iter() {
555 failures.push(f.name.to_str());
556 if stdout.len() > 0 {
557 fail_out.push_str(format!("---- {} stdout ----\n\t",
559 let output = str::from_utf8_lossy(stdout.as_slice());
560 fail_out.push_str(output.as_slice().replace("\n", "\n\t"));
561 fail_out.push_str("\n");
564 if fail_out.len() > 0 {
565 try!(self.write_plain("\n"));
566 try!(self.write_plain(fail_out.as_slice()));
569 try!(self.write_plain("\nfailures:\n"));
570 failures.as_mut_slice().sort();
571 for name in failures.iter() {
572 try!(self.write_plain(format!(" {}\n", name.to_str())));
577 pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
579 let mut improved = 0;
580 let mut regressed = 0;
584 for (k, v) in diff.iter() {
586 LikelyNoise => noise += 1,
589 try!(self.write_added());
590 try!(self.write_plain(format!(": {}\n", *k)));
594 try!(self.write_removed());
595 try!(self.write_plain(format!(": {}\n", *k)));
597 Improvement(pct) => {
599 try!(self.write_plain(format!(": {}", *k)));
600 try!(self.write_improved());
601 try!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
605 try!(self.write_plain(format!(": {}", *k)));
606 try!(self.write_regressed());
607 try!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
611 try!(self.write_plain(format!("result of ratchet: {} metrics added, \
612 {} removed, {} improved, {} regressed, \
614 added, removed, improved, regressed,
617 try!(self.write_plain("updated ratchet file\n"));
619 try!(self.write_plain("left ratchet file untouched\n"));
624 pub fn write_run_finish(&mut self,
625 ratchet_metrics: &Option<Path>,
626 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
627 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
629 let ratchet_success = match *ratchet_metrics {
632 try!(self.write_plain(format!("\nusing metrics ratchet: {}\n",
637 try!(self.write_plain(format!("with noise-tolerance \
641 let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
642 try!(self.write_metric_diff(&diff));
647 let test_success = self.failed == 0u;
649 try!(self.write_failures());
652 let success = ratchet_success && test_success;
654 try!(self.write_plain("\ntest result: "));
656 // There's no parallelism at this point so it's safe to use color
657 try!(self.write_ok());
659 try!(self.write_failed());
661 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
662 self.passed, self.failed, self.ignored, self.measured);
663 try!(self.write_plain(s));
668 pub fn fmt_metrics(mm: &MetricMap) -> ~str {
669 let MetricMap(ref mm) = *mm;
670 let v : Vec<~str> = mm.iter()
671 .map(|(k,v)| format!("{}: {} (+/- {})",
679 pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
681 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
682 bs.ns_iter_summ.median as uint,
683 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
686 format!("{:>9} ns/iter (+/- {})",
687 bs.ns_iter_summ.median as uint,
688 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
692 // A simple console test runner
693 pub fn run_tests_console(opts: &TestOpts,
694 tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
695 fn callback<T: Writer>(event: &TestEvent,
696 st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
697 match (*event).clone() {
698 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
699 TeWait(ref test, padding) => st.write_test_start(test, padding),
700 TeResult(test, result, stdout) => {
701 try!(st.write_log(&test, &result));
702 try!(st.write_result(&result));
704 TrOk => st.passed += 1,
705 TrIgnored => st.ignored += 1,
707 let tname = test.name.to_str();
708 let MetricMap(mm) = mm;
709 for (k,v) in mm.iter() {
710 st.metrics.insert_metric(tname + "." + *k,
716 st.metrics.insert_metric(test.name.to_str(),
717 bs.ns_iter_summ.median,
718 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
723 st.failures.push((test, stdout));
730 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
731 fn len_if_padded(t: &TestDescAndFn) -> uint {
732 match t.testfn.padding() {
734 PadOnLeft | PadOnRight => t.desc.name.to_str().len(),
737 match tests.iter().max_by(|t|len_if_padded(*t)) {
739 let n = t.desc.name.to_str();
740 st.max_name_len = n.len();
744 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
745 match opts.save_metrics {
748 try!(st.metrics.save(pth));
749 try!(st.write_plain(format!("\nmetrics saved to: {}",
753 return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
757 fn should_sort_failures_before_printing_them() {
758 use std::io::MemWriter;
761 let test_a = TestDesc {
762 name: StaticTestName("a"),
767 let test_b = TestDesc {
768 name: StaticTestName("b"),
773 let mut st = ConsoleTestState {
775 out: Raw(MemWriter::new()),
783 metrics: MetricMap::new(),
784 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
787 st.write_failures().unwrap();
788 let s = match st.out {
789 Raw(ref m) => str::from_utf8_lossy(m.get_ref()),
790 Pretty(_) => unreachable!()
793 let apos = s.as_slice().find_str("a").unwrap();
794 let bpos = s.as_slice().find_str("b").unwrap();
795 assert!(apos < bpos);
798 fn use_color() -> bool { return get_concurrency() == 1; }
802 TeFiltered(Vec<TestDesc> ),
803 TeWait(TestDesc, NamePadding),
804 TeResult(TestDesc, TestResult, Vec<u8> ),
807 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
809 fn run_tests(opts: &TestOpts,
810 tests: Vec<TestDescAndFn> ,
811 callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
812 let filtered_tests = filter_tests(opts, tests);
813 let filtered_descs = filtered_tests.iter()
814 .map(|t| t.desc.clone())
817 try!(callback(TeFiltered(filtered_descs)));
819 let (filtered_tests, filtered_benchs_and_metrics) =
820 filtered_tests.partition(|e| {
822 StaticTestFn(_) | DynTestFn(_) => true,
827 // It's tempting to just spawn all the tests at once, but since we have
828 // many tests that run in other processes we would be making a big mess.
829 let concurrency = get_concurrency();
831 let mut remaining = filtered_tests;
835 let (tx, rx) = channel::<MonitorMsg>();
837 while pending > 0 || !remaining.is_empty() {
838 while pending < concurrency && !remaining.is_empty() {
839 let test = remaining.pop().unwrap();
840 if concurrency == 1 {
841 // We are doing one test at a time so we can print the name
842 // of the test before we run it. Useful for debugging tests
843 // that hang forever.
844 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
846 run_test(!opts.run_tests, test, tx.clone());
850 let (desc, result, stdout) = rx.recv();
851 if concurrency != 1 {
852 try!(callback(TeWait(desc.clone(), PadNone)));
854 try!(callback(TeResult(desc, result, stdout)));
858 // All benchmarks run at the end, in serial.
859 // (this includes metric fns)
860 for b in filtered_benchs_and_metrics.move_iter() {
861 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
862 run_test(!opts.run_benchmarks, b, tx.clone());
863 let (test, result, stdout) = rx.recv();
864 try!(callback(TeResult(test, result, stdout)));
869 fn get_concurrency() -> uint {
871 match os::getenv("RUST_TEST_TASKS") {
873 let opt_n: Option<uint> = FromStr::from_str(s);
875 Some(n) if n > 0 => n,
876 _ => fail!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
880 rt::default_sched_threads()
887 tests: Vec<TestDescAndFn> ) -> Vec<TestDescAndFn> {
888 let mut filtered = tests;
890 // Remove tests that don't match the test filter
891 filtered = if opts.filter.is_none() {
894 let filter_str = match opts.filter {
895 Some(ref f) => (*f).clone(),
899 fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
900 Option<TestDescAndFn> {
901 if test.desc.name.to_str().contains(filter_str) {
908 filtered.move_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
911 // Maybe pull out the ignored test and unignore them
912 filtered = if !opts.run_ignored {
915 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
916 if test.desc.ignore {
917 let TestDescAndFn {desc, testfn} = test;
919 desc: TestDesc {ignore: false, ..desc},
926 filtered.move_iter().filter_map(|x| filter(x)).collect()
929 // Sort the tests alphabetically
930 filtered.sort_by(|t1, t2| t1.desc.name.to_str().cmp(&t2.desc.name.to_str()));
932 // Shard the remaining tests, if sharding requested.
933 match opts.test_shard {
936 filtered.move_iter().enumerate()
937 .filter(|&(i,_)| i % b == a)
944 pub fn run_test(force_ignore: bool,
946 monitor_ch: Sender<MonitorMsg>) {
948 let TestDescAndFn {desc, testfn} = test;
950 if force_ignore || desc.ignore {
951 monitor_ch.send((desc, TrIgnored, Vec::new()));
955 #[allow(deprecated_owned_vector)]
956 fn run_test_inner(desc: TestDesc,
957 monitor_ch: Sender<MonitorMsg>,
958 testfn: proc():Send) {
960 let (tx, rx) = channel();
961 let mut reader = ChanReader::new(rx);
962 let stdout = ChanWriter::new(tx.clone());
963 let stderr = ChanWriter::new(tx);
964 let mut task = task::task().named(match desc.name {
965 DynTestName(ref name) => name.clone().into_maybe_owned(),
966 StaticTestName(name) => name.into_maybe_owned(),
968 task.opts.stdout = Some(~stdout as ~Writer:Send);
969 task.opts.stderr = Some(~stderr as ~Writer:Send);
970 let result_future = task.future_result();
973 let stdout = reader.read_to_end().unwrap().move_iter().collect();
974 let task_result = result_future.recv();
975 let test_result = calc_result(&desc, task_result.is_ok());
976 monitor_ch.send((desc.clone(), test_result, stdout));
981 DynBenchFn(bencher) => {
982 let bs = ::bench::benchmark(|harness| bencher.run(harness));
983 monitor_ch.send((desc, TrBench(bs), Vec::new()));
986 StaticBenchFn(benchfn) => {
987 let bs = ::bench::benchmark(|harness| benchfn(harness));
988 monitor_ch.send((desc, TrBench(bs), Vec::new()));
992 let mut mm = MetricMap::new();
994 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
997 StaticMetricFn(f) => {
998 let mut mm = MetricMap::new();
1000 monitor_ch.send((desc, TrMetrics(mm), Vec::new()));
1003 DynTestFn(f) => run_test_inner(desc, monitor_ch, f),
1004 StaticTestFn(f) => run_test_inner(desc, monitor_ch, proc() f())
1008 fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
1010 if desc.should_fail { TrFailed }
1013 if desc.should_fail { TrOk }
1019 impl ToJson for Metric {
1020 fn to_json(&self) -> json::Json {
1021 let mut map = ~TreeMap::new();
1022 map.insert(~"value", json::Number(self.value));
1023 map.insert(~"noise", json::Number(self.noise));
1031 pub fn new() -> MetricMap {
1032 MetricMap(TreeMap::new())
1035 /// Load MetricDiff from a file.
1039 /// This function will fail if the path does not exist or the path does not
1040 /// contain a valid metric map.
1041 pub fn load(p: &Path) -> MetricMap {
1042 assert!(p.exists());
1043 let mut f = File::open(p).unwrap();
1044 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1045 let mut decoder = json::Decoder::new(value);
1046 MetricMap(match Decodable::decode(&mut decoder) {
1048 Err(e) => fail!("failure decoding JSON: {}", e)
1052 /// Write MetricDiff to a file.
1053 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1054 let mut file = try!(File::create(p));
1055 let MetricMap(ref map) = *self;
1056 map.to_json().to_pretty_writer(&mut file)
1059 /// Compare against another MetricMap. Optionally compare all
1060 /// measurements in the maps using the provided `noise_pct` as a
1061 /// percentage of each value to consider noise. If `None`, each
1062 /// measurement's noise threshold is independently chosen as the
1063 /// maximum of that measurement's recorded noise quantity in either
1065 pub fn compare_to_old(&self, old: &MetricMap,
1066 noise_pct: Option<f64>) -> MetricDiff {
1067 let mut diff : MetricDiff = TreeMap::new();
1068 let MetricMap(ref selfmap) = *self;
1069 let MetricMap(ref old) = *old;
1070 for (k, vold) in old.iter() {
1071 let r = match selfmap.find(k) {
1072 None => MetricRemoved,
1074 let delta = v.value - vold.value;
1075 let noise = match noise_pct {
1076 None => vold.noise.abs().max(v.noise.abs()),
1077 Some(pct) => vold.value * pct / 100.0
1079 if delta.abs() <= noise {
1082 let pct = delta.abs() / vold.value.max(f64::EPSILON) * 100.0;
1083 if vold.noise < 0.0 {
1084 // When 'noise' is negative, it means we want
1085 // to see deltas that go up over time, and can
1086 // only tolerate slight negative movement.
1093 // When 'noise' is positive, it means we want
1094 // to see deltas that go down over time, and
1095 // can only tolerate slight positive movements.
1105 diff.insert((*k).clone(), r);
1107 let MetricMap(ref map) = *self;
1108 for (k, _) in map.iter() {
1109 if !diff.contains_key(k) {
1110 diff.insert((*k).clone(), MetricAdded);
1116 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1117 /// must be non-negative. The `noise` indicates the uncertainty of the
1118 /// metric, which doubles as the "noise range" of acceptable
1119 /// pairwise-regressions on this named value, when comparing from one
1120 /// metric to the next using `compare_to_old`.
1122 /// If `noise` is positive, then it means this metric is of a value
1123 /// you want to see grow smaller, so a change larger than `noise` in the
1124 /// positive direction represents a regression.
1126 /// If `noise` is negative, then it means this metric is of a value
1127 /// you want to see grow larger, so a change larger than `noise` in the
1128 /// negative direction represents a regression.
1129 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1134 let MetricMap(ref mut map) = *self;
1135 map.insert(name.to_owned(), m);
1138 /// Attempt to "ratchet" an external metric file. This involves loading
1139 /// metrics from a metric file (if it exists), comparing against
1140 /// the metrics in `self` using `compare_to_old`, and rewriting the
1141 /// file to contain the metrics in `self` if none of the
1142 /// `MetricChange`s are `Regression`. Returns the diff as well
1143 /// as a boolean indicating whether the ratchet succeeded.
1144 pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
1145 let old = if p.exists() {
1151 let diff : MetricDiff = self.compare_to_old(&old, pct);
1152 let ok = diff.iter().all(|(_, v)| {
1154 Regression(_) => false,
1160 self.save(p).unwrap();
1169 /// A function that is opaque to the optimizer, to allow benchmarks to
1170 /// pretend to use outputs to assist in avoiding dead-code
1173 /// This function is a no-op, and does not even read from `dummy`.
1174 pub fn black_box<T>(dummy: T) {
1175 // we need to "use" the argument in some way LLVM can't
1177 unsafe {asm!("" : : "r"(&dummy))}
1182 /// Callback for benchmark functions to run in their body.
1183 pub fn iter<T>(&mut self, inner: || -> T) {
1184 self.ns_start = precise_time_ns();
1185 let k = self.iterations;
1186 for _ in range(0u64, k) {
1189 self.ns_end = precise_time_ns();
1192 pub fn ns_elapsed(&mut self) -> u64 {
1193 if self.ns_start == 0 || self.ns_end == 0 {
1196 self.ns_end - self.ns_start
1200 pub fn ns_per_iter(&mut self) -> u64 {
1201 if self.iterations == 0 {
1204 self.ns_elapsed() / cmp::max(self.iterations, 1)
1208 pub fn bench_n(&mut self, n: u64, f: |&mut Bencher|) {
1209 self.iterations = n;
1213 // This is a more statistics-driven benchmark algorithm
1214 pub fn auto_bench(&mut self, f: |&mut Bencher|) -> stats::Summary {
1216 // Initial bench run to get ballpark figure.
1218 self.bench_n(n, |x| f(x));
1220 // Try to estimate iter count for 1ms falling back to 1m
1221 // iterations if first run took < 1ns.
1222 if self.ns_per_iter() == 0 {
1225 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1227 // if the first run took more than 1ms we don't want to just
1228 // be left doing 0 iterations on every loop. The unfortunate
1229 // side effect of not being able to do as many runs is
1230 // automatically handled by the statistical analysis below
1231 // (i.e. larger error bars).
1232 if n == 0 { n = 1; }
1234 let mut total_run = 0;
1235 let samples : &mut [f64] = [0.0_f64, ..50];
1237 let loop_start = precise_time_ns();
1239 for p in samples.mut_iter() {
1240 self.bench_n(n, |x| f(x));
1241 *p = self.ns_per_iter() as f64;
1244 stats::winsorize(samples, 5.0);
1245 let summ = stats::Summary::new(samples);
1247 for p in samples.mut_iter() {
1248 self.bench_n(5 * n, |x| f(x));
1249 *p = self.ns_per_iter() as f64;
1252 stats::winsorize(samples, 5.0);
1253 let summ5 = stats::Summary::new(samples);
1255 let now = precise_time_ns();
1256 let loop_run = now - loop_start;
1258 // If we've run for 100ms and seem to have converged to a
1260 if loop_run > 100_000_000 &&
1261 summ.median_abs_dev_pct < 1.0 &&
1262 summ.median - summ5.median < summ5.median_abs_dev {
1266 total_run += loop_run;
1267 // Longest we ever run for is 3s.
1268 if total_run > 3_000_000_000 {
1279 use super::{Bencher, BenchSamples};
1281 pub fn benchmark(f: |&mut Bencher|) -> BenchSamples {
1282 let mut bs = Bencher {
1289 let ns_iter_summ = bs.auto_bench(f);
1291 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1292 let iter_s = 1_000_000_000 / ns_iter;
1293 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1296 ns_iter_summ: ns_iter_summ,
1304 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1305 TestDesc, TestDescAndFn, TestOpts, run_test,
1306 Metric, MetricMap, MetricAdded, MetricRemoved,
1307 Improvement, Regression, LikelyNoise,
1308 StaticTestName, DynTestName, DynTestFn};
1309 use std::io::TempDir;
1312 pub fn do_not_run_ignored_tests() {
1314 let desc = TestDescAndFn {
1316 name: StaticTestName("whatever"),
1320 testfn: DynTestFn(proc() f()),
1322 let (tx, rx) = channel();
1323 run_test(false, desc, tx);
1324 let (_, res, _) = rx.recv();
1325 assert!(res != TrOk);
1329 pub fn ignored_tests_result_in_ignored() {
1331 let desc = TestDescAndFn {
1333 name: StaticTestName("whatever"),
1337 testfn: DynTestFn(proc() f()),
1339 let (tx, rx) = channel();
1340 run_test(false, desc, tx);
1341 let (_, res, _) = rx.recv();
1342 assert!(res == TrIgnored);
1346 fn test_should_fail() {
1348 let desc = TestDescAndFn {
1350 name: StaticTestName("whatever"),
1354 testfn: DynTestFn(proc() f()),
1356 let (tx, rx) = channel();
1357 run_test(false, desc, tx);
1358 let (_, res, _) = rx.recv();
1359 assert!(res == TrOk);
1363 fn test_should_fail_but_succeeds() {
1365 let desc = TestDescAndFn {
1367 name: StaticTestName("whatever"),
1371 testfn: DynTestFn(proc() f()),
1373 let (tx, rx) = channel();
1374 run_test(false, desc, tx);
1375 let (_, res, _) = rx.recv();
1376 assert!(res == TrFailed);
1380 fn first_free_arg_should_be_a_filter() {
1381 let args = vec!(~"progname", ~"filter");
1382 let opts = match parse_opts(args.as_slice()) {
1384 _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
1386 assert!("filter" == opts.filter.clone().unwrap());
1390 fn parse_ignored_flag() {
1391 let args = vec!(~"progname", ~"filter", ~"--ignored");
1392 let opts = match parse_opts(args.as_slice()) {
1394 _ => fail!("Malformed arg in parse_ignored_flag")
1396 assert!((opts.run_ignored));
1400 pub fn filter_for_ignored_option() {
1401 // When we run ignored tests the test filter should filter out all the
1402 // unignored tests and flip the ignore flag on the rest to false
1404 let opts = TestOpts {
1409 run_benchmarks: false,
1410 ratchet_noise_percent: None,
1411 ratchet_metrics: None,
1419 name: StaticTestName("1"),
1423 testfn: DynTestFn(proc() {}),
1427 name: StaticTestName("2"),
1431 testfn: DynTestFn(proc() {}),
1433 let filtered = filter_tests(&opts, tests);
1435 assert_eq!(filtered.len(), 1);
1436 assert_eq!(filtered.get(0).desc.name.to_str(), ~"1");
1437 assert!(filtered.get(0).desc.ignore == false);
1441 pub fn sort_tests() {
1442 let opts = TestOpts {
1447 run_benchmarks: false,
1448 ratchet_noise_percent: None,
1449 ratchet_metrics: None,
1455 vec!(~"sha1::test", ~"int::test_to_str", ~"int::test_pow",
1456 ~"test::do_not_run_ignored_tests",
1457 ~"test::ignored_tests_result_in_ignored",
1458 ~"test::first_free_arg_should_be_a_filter",
1459 ~"test::parse_ignored_flag", ~"test::filter_for_ignored_option",
1460 ~"test::sort_tests");
1464 let mut tests = Vec::new();
1465 for name in names.iter() {
1466 let test = TestDescAndFn {
1468 name: DynTestName((*name).clone()),
1472 testfn: DynTestFn(testfn),
1478 let filtered = filter_tests(&opts, tests);
1481 vec!(~"int::test_pow", ~"int::test_to_str", ~"sha1::test",
1482 ~"test::do_not_run_ignored_tests",
1483 ~"test::filter_for_ignored_option",
1484 ~"test::first_free_arg_should_be_a_filter",
1485 ~"test::ignored_tests_result_in_ignored",
1486 ~"test::parse_ignored_flag",
1487 ~"test::sort_tests");
1489 for (a, b) in expected.iter().zip(filtered.iter()) {
1490 assert!(*a == b.desc.name.to_str());
1495 pub fn test_metricmap_compare() {
1496 let mut m1 = MetricMap::new();
1497 let mut m2 = MetricMap::new();
1498 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1499 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1501 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1502 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1504 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1505 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1507 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1508 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1510 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1511 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1513 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1514 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1516 let diff1 = m2.compare_to_old(&m1, None);
1518 assert_eq!(*(diff1.find(&~"in-both-noise").unwrap()), LikelyNoise);
1519 assert_eq!(*(diff1.find(&~"in-first-noise").unwrap()), MetricRemoved);
1520 assert_eq!(*(diff1.find(&~"in-second-noise").unwrap()), MetricAdded);
1521 assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").unwrap()),
1523 assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").unwrap()),
1525 assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").unwrap()),
1527 assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").unwrap()),
1528 Improvement(100.0));
1529 assert_eq!(diff1.len(), 7);
1531 let diff2 = m2.compare_to_old(&m1, Some(200.0));
1533 assert_eq!(*(diff2.find(&~"in-both-noise").unwrap()), LikelyNoise);
1534 assert_eq!(*(diff2.find(&~"in-first-noise").unwrap()), MetricRemoved);
1535 assert_eq!(*(diff2.find(&~"in-second-noise").unwrap()), MetricAdded);
1536 assert_eq!(*(diff2.find(&~"in-both-want-downwards-but-regressed").unwrap()), LikelyNoise);
1537 assert_eq!(*(diff2.find(&~"in-both-want-downwards-and-improved").unwrap()), LikelyNoise);
1538 assert_eq!(*(diff2.find(&~"in-both-want-upwards-but-regressed").unwrap()), LikelyNoise);
1539 assert_eq!(*(diff2.find(&~"in-both-want-upwards-and-improved").unwrap()), LikelyNoise);
1540 assert_eq!(diff2.len(), 7);
1544 pub fn ratchet_test() {
1546 let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
1547 let pth = dpth.path().join("ratchet.json");
1549 let mut m1 = MetricMap::new();
1550 m1.insert_metric("runtime", 1000.0, 2.0);
1551 m1.insert_metric("throughput", 50.0, 2.0);
1553 let mut m2 = MetricMap::new();
1554 m2.insert_metric("runtime", 1100.0, 2.0);
1555 m2.insert_metric("throughput", 50.0, 2.0);
1557 m1.save(&pth).unwrap();
1559 // Ask for a ratchet that should fail to advance.
1560 let (diff1, ok1) = m2.ratchet(&pth, None);
1561 assert_eq!(ok1, false);
1562 assert_eq!(diff1.len(), 2);
1563 assert_eq!(*(diff1.find(&~"runtime").unwrap()), Regression(10.0));
1564 assert_eq!(*(diff1.find(&~"throughput").unwrap()), LikelyNoise);
1566 // Check that it was not rewritten.
1567 let m3 = MetricMap::load(&pth);
1568 let MetricMap(m3) = m3;
1569 assert_eq!(m3.len(), 2);
1570 assert_eq!(*(m3.find(&~"runtime").unwrap()), Metric::new(1000.0, 2.0));
1571 assert_eq!(*(m3.find(&~"throughput").unwrap()), Metric::new(50.0, 2.0));
1573 // Ask for a ratchet with an explicit noise-percentage override,
1574 // that should advance.
1575 let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
1576 assert_eq!(ok2, true);
1577 assert_eq!(diff2.len(), 2);
1578 assert_eq!(*(diff2.find(&~"runtime").unwrap()), LikelyNoise);
1579 assert_eq!(*(diff2.find(&~"throughput").unwrap()), LikelyNoise);
1581 // Check that it was rewritten.
1582 let m4 = MetricMap::load(&pth);
1583 let MetricMap(m4) = m4;
1584 assert_eq!(m4.len(), 2);
1585 assert_eq!(*(m4.find(&~"runtime").unwrap()), Metric::new(1100.0, 2.0));
1586 assert_eq!(*(m4.find(&~"throughput").unwrap()), Metric::new(50.0, 2.0));