1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![allow(unknown_features)]
35 #![feature(asm, slicing_syntax)]
36 #![feature(box_syntax)]
37 #![allow(unknown_features)] #![feature(int_uint)]
42 extern crate serialize;
43 extern crate "serialize" as rustc_serialize;
46 pub use self::TestFn::*;
47 pub use self::ColorConfig::*;
48 pub use self::TestResult::*;
49 pub use self::TestName::*;
50 use self::TestEvent::*;
51 use self::NamePadding::*;
52 use self::OutputLocation::*;
55 use getopts::{OptGroup, optflag, optopt};
57 use serialize::{json, Decodable, Encodable};
59 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
63 use std::collections::BTreeMap;
65 use std::io::fs::PathExtensions;
66 use std::io::stdio::StdWriter;
67 use std::io::{File, ChanReader, ChanWriter};
69 use std::iter::repeat;
70 use std::num::{Float, Int};
72 use std::str::FromStr;
73 use std::sync::mpsc::{channel, Sender};
74 use std::thread::{self, Thread};
75 use std::thunk::{Thunk, Invoke};
76 use std::time::Duration;
78 // to be used by rustc to compile tests in libtest
80 pub use {Bencher, TestName, TestResult, TestDesc,
81 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
83 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
84 run_test, test_main, test_main_static, filter_tests,
85 parse_opts, StaticBenchFn, ShouldFail};
90 // The name of a test. By convention this follows the rules for rust
91 // paths; i.e. it should be a series of identifiers separated by double
92 // colons. This way if some test runner wants to arrange the tests
93 // hierarchically it may.
95 #[derive(Clone, PartialEq, Eq, Hash, Show)]
97 StaticTestName(&'static str),
101 fn as_slice<'a>(&'a self) -> &'a str {
103 StaticTestName(s) => s,
104 DynTestName(ref s) => s.as_slice()
108 impl fmt::Display for TestName {
109 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
110 fmt::Display::fmt(self.as_slice(), f)
114 #[derive(Clone, Copy)]
122 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
123 let mut name = String::from_str(self.name.as_slice());
124 let fill = column_count.saturating_sub(name.len());
125 let mut pad = repeat(" ").take(fill).collect::<String>();
129 pad.push_str(name.as_slice());
133 name.push_str(pad.as_slice());
140 /// Represents a benchmark function.
141 pub trait TDynBenchFn {
142 fn run(&self, harness: &mut Bencher);
145 // A function that runs a test. If the function returns successfully,
146 // the test succeeds; if the function panics then the test fails. We
147 // may need to come up with a more clever definition of test in order
148 // to support isolation of tests into tasks.
151 StaticBenchFn(fn(&mut Bencher)),
152 StaticMetricFn(fn(&mut MetricMap)),
154 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
155 DynBenchFn(Box<TDynBenchFn+'static>)
159 fn padding(&self) -> NamePadding {
161 &StaticTestFn(..) => PadNone,
162 &StaticBenchFn(..) => PadOnRight,
163 &StaticMetricFn(..) => PadOnRight,
164 &DynTestFn(..) => PadNone,
165 &DynMetricFn(..) => PadOnRight,
166 &DynBenchFn(..) => PadOnRight,
171 impl fmt::Debug for TestFn {
172 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
173 f.write_str(match *self {
174 StaticTestFn(..) => "StaticTestFn(..)",
175 StaticBenchFn(..) => "StaticBenchFn(..)",
176 StaticMetricFn(..) => "StaticMetricFn(..)",
177 DynTestFn(..) => "DynTestFn(..)",
178 DynMetricFn(..) => "DynMetricFn(..)",
179 DynBenchFn(..) => "DynBenchFn(..)"
184 /// Manager of the benchmarking runs.
186 /// This is fed into functions marked with `#[bench]` to allow for
187 /// set-up & tear-down before running a piece of code repeatedly via a
196 #[derive(Copy, Clone, Show, PartialEq, Eq, Hash)]
197 pub enum ShouldFail {
199 Yes(Option<&'static str>)
202 // The definition of a single test. A test runner will run a list of
204 #[derive(Clone, Show, PartialEq, Eq, Hash)]
205 pub struct TestDesc {
208 pub should_fail: ShouldFail,
211 unsafe impl Send for TestDesc {}
214 pub struct TestDescAndFn {
219 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
226 pub fn new(value: f64, noise: f64) -> Metric {
227 Metric {value: value, noise: noise}
232 pub struct MetricMap(BTreeMap<String,Metric>);
234 impl Clone for MetricMap {
235 fn clone(&self) -> MetricMap {
236 let MetricMap(ref map) = *self;
237 MetricMap(map.clone())
241 // The default console test runner. It accepts the command line
242 // arguments and a vector of test_descs.
243 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
245 match parse_opts(args) {
247 Some(Err(msg)) => panic!("{:?}", msg),
250 match run_tests_console(&opts, tests) {
252 Ok(false) => panic!("Some tests failed"),
253 Err(e) => panic!("io error when running tests: {:?}", e),
257 // A variant optimized for invocation with a static test vector.
258 // This will panic (intentionally) when fed any dynamic tests, because
259 // it is copying the static values out into a dynamic vector and cannot
260 // copy dynamic values. It is doing this because from this point on
261 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
262 // semantics into parallel test runners, which in turn requires a ~[]
263 // rather than a &[].
264 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
265 let owned_tests = tests.iter().map(|t| {
267 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
268 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
269 _ => panic!("non-static tests passed to test::test_main_static")
272 test_main(args, owned_tests)
276 pub enum ColorConfig {
282 pub struct TestOpts {
283 pub filter: Option<Regex>,
284 pub run_ignored: bool,
286 pub run_benchmarks: bool,
287 pub logfile: Option<Path>,
289 pub color: ColorConfig,
294 fn new() -> TestOpts {
299 run_benchmarks: false,
307 /// Result of parsing the options.
308 pub type OptRes = Result<TestOpts, String>;
310 fn optgroups() -> Vec<getopts::OptGroup> {
311 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
312 getopts::optflag("", "test", "Run tests and not benchmarks"),
313 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
314 getopts::optflag("h", "help", "Display this message (longer with --help)"),
315 getopts::optopt("", "logfile", "Write logs to the specified file instead \
317 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
318 task, allow printing directly"),
319 getopts::optopt("", "color", "Configure coloring of output:
320 auto = colorize if stdout is a tty and tests are run on serially (default);
321 always = always colorize output;
322 never = never colorize output;", "auto|always|never"))
325 fn usage(binary: &str) {
326 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
329 The FILTER regex is tested against the name of all tests to run, and
330 only those tests that match are run.
332 By default, all tests are run in parallel. This can be altered with the
333 RUST_TEST_TASKS environment variable when running tests (set it to 1).
335 All tests have their standard output and standard error captured by default.
336 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
337 environment variable. Logging is not captured by default.
341 #[test] - Indicates a function is a test to be run. This function
343 #[bench] - Indicates a function is a benchmark to be run. This
344 function takes one argument (test::Bencher).
345 #[should_fail] - This function (also labeled with #[test]) will only pass if
346 the code causes a failure (an assertion failure or panic!)
347 A message may be provided, which the failure string must
348 contain: #[should_fail(expected = "foo")].
349 #[ignore] - When applied to a function which is already attributed as a
350 test, then the test runner will ignore these tests during
351 normal test runs. Running with --ignored will run these
353 usage = getopts::usage(message.as_slice(),
354 optgroups().as_slice()));
357 // Parses command line arguments into test options
358 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
359 let args_ = args.tail();
361 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
363 Err(f) => return Some(Err(f.to_string()))
366 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
368 let filter = if matches.free.len() > 0 {
369 let s = matches.free[0].as_slice();
370 match Regex::new(s) {
372 Err(e) => return Some(Err(format!("could not parse /{}/: {:?}", s, e)))
378 let run_ignored = matches.opt_present("ignored");
380 let logfile = matches.opt_str("logfile");
381 let logfile = logfile.map(|s| Path::new(s));
383 let run_benchmarks = matches.opt_present("bench");
384 let run_tests = ! run_benchmarks ||
385 matches.opt_present("test");
387 let mut nocapture = matches.opt_present("nocapture");
389 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
392 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
393 Some("auto") | None => AutoColor,
394 Some("always") => AlwaysColor,
395 Some("never") => NeverColor,
397 Some(v) => return Some(Err(format!("argument for --color must be \
398 auto, always, or never (was {})",
402 let test_opts = TestOpts {
404 run_ignored: run_ignored,
405 run_tests: run_tests,
406 run_benchmarks: run_benchmarks,
408 nocapture: nocapture,
415 #[derive(Clone, PartialEq)]
416 pub struct BenchSamples {
417 ns_iter_summ: stats::Summary<f64>,
421 #[derive(Clone, PartialEq)]
422 pub enum TestResult {
426 TrMetrics(MetricMap),
427 TrBench(BenchSamples),
430 unsafe impl Send for TestResult {}
432 enum OutputLocation<T> {
433 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
437 struct ConsoleTestState<T> {
438 log_out: Option<File>,
439 out: OutputLocation<T>,
443 show_all_stats: bool,
450 failures: Vec<(TestDesc, Vec<u8> )> ,
451 max_name_len: uint, // number of columns to fill when aligning names
454 impl<T: Writer> ConsoleTestState<T> {
455 pub fn new(opts: &TestOpts,
456 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
457 let log_out = match opts.logfile {
458 Some(ref path) => Some(try!(File::create(path))),
461 let out = match term::stdout() {
462 None => Raw(io::stdio::stdout_raw()),
466 Ok(ConsoleTestState {
469 use_color: use_color(opts),
472 show_all_stats: false,
478 metrics: MetricMap::new(),
479 failures: Vec::new(),
484 pub fn write_ok(&mut self) -> io::IoResult<()> {
485 self.write_pretty("ok", term::color::GREEN)
488 pub fn write_failed(&mut self) -> io::IoResult<()> {
489 self.write_pretty("FAILED", term::color::RED)
492 pub fn write_ignored(&mut self) -> io::IoResult<()> {
493 self.write_pretty("ignored", term::color::YELLOW)
496 pub fn write_metric(&mut self) -> io::IoResult<()> {
497 self.write_pretty("metric", term::color::CYAN)
500 pub fn write_bench(&mut self) -> io::IoResult<()> {
501 self.write_pretty("bench", term::color::CYAN)
504 pub fn write_pretty(&mut self,
506 color: term::color::Color) -> io::IoResult<()> {
508 Pretty(ref mut term) => {
510 try!(term.fg(color));
512 try!(term.write(word.as_bytes()));
518 Raw(ref mut stdout) => stdout.write(word.as_bytes())
522 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
524 Pretty(ref mut term) => term.write(s.as_bytes()),
525 Raw(ref mut stdout) => stdout.write(s.as_bytes())
529 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
531 let noun = if len != 1 { "tests" } else { "test" };
532 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
535 pub fn write_test_start(&mut self, test: &TestDesc,
536 align: NamePadding) -> io::IoResult<()> {
537 let name = test.padded_name(self.max_name_len, align);
538 self.write_plain(format!("test {} ... ", name).as_slice())
541 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
543 TrOk => self.write_ok(),
544 TrFailed => self.write_failed(),
545 TrIgnored => self.write_ignored(),
546 TrMetrics(ref mm) => {
547 try!(self.write_metric());
548 self.write_plain(format!(": {}", fmt_metrics(mm)).as_slice())
551 try!(self.write_bench());
553 if self.show_boxplot {
554 let mut wr = Vec::new();
556 try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
558 let s = String::from_utf8(wr).unwrap();
560 try!(self.write_plain(format!(": {}", s).as_slice()));
563 if self.show_all_stats {
564 let mut wr = Vec::new();
566 try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
568 let s = String::from_utf8(wr).unwrap();
570 try!(self.write_plain(format!(": {}", s).as_slice()));
572 try!(self.write_plain(format!(": {}",
573 fmt_bench_samples(bs)).as_slice()));
579 self.write_plain("\n")
582 pub fn write_log(&mut self, test: &TestDesc,
583 result: &TestResult) -> io::IoResult<()> {
587 let s = format!("{} {}\n", match *result {
588 TrOk => "ok".to_string(),
589 TrFailed => "failed".to_string(),
590 TrIgnored => "ignored".to_string(),
591 TrMetrics(ref mm) => fmt_metrics(mm),
592 TrBench(ref bs) => fmt_bench_samples(bs)
593 }, test.name.as_slice());
594 o.write(s.as_bytes())
599 pub fn write_failures(&mut self) -> io::IoResult<()> {
600 try!(self.write_plain("\nfailures:\n"));
601 let mut failures = Vec::new();
602 let mut fail_out = String::new();
603 for &(ref f, ref stdout) in self.failures.iter() {
604 failures.push(f.name.to_string());
605 if stdout.len() > 0 {
606 fail_out.push_str(format!("---- {} stdout ----\n\t",
607 f.name.as_slice()).as_slice());
608 let output = String::from_utf8_lossy(stdout.as_slice());
609 fail_out.push_str(output.as_slice());
610 fail_out.push_str("\n");
613 if fail_out.len() > 0 {
614 try!(self.write_plain("\n"));
615 try!(self.write_plain(fail_out.as_slice()));
618 try!(self.write_plain("\nfailures:\n"));
620 for name in failures.iter() {
621 try!(self.write_plain(format!(" {}\n",
622 name.as_slice()).as_slice()));
627 pub fn write_run_finish(&mut self,
628 ratchet_metrics: &Option<Path>,
629 ratchet_pct: Option<f64>) -> io::IoResult<bool> {
630 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
632 let ratchet_success = match *ratchet_metrics {
635 try!(self.write_plain(format!("\nusing metrics ratchet: {:?}\n",
636 pth.display()).as_slice()));
640 try!(self.write_plain(format!("with noise-tolerance \
648 let test_success = self.failed == 0u;
650 try!(self.write_failures());
653 let success = ratchet_success && test_success;
655 try!(self.write_plain("\ntest result: "));
657 // There's no parallelism at this point so it's safe to use color
658 try!(self.write_ok());
660 try!(self.write_failed());
662 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
663 self.passed, self.failed, self.ignored, self.measured);
664 try!(self.write_plain(s.as_slice()));
669 pub fn fmt_metrics(mm: &MetricMap) -> String {
670 let MetricMap(ref mm) = *mm;
671 let v : Vec<String> = mm.iter()
672 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
673 v.value as f64, v.noise as f64))
678 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
680 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
681 bs.ns_iter_summ.median as uint,
682 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
685 format!("{:>9} ns/iter (+/- {})",
686 bs.ns_iter_summ.median as uint,
687 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
691 // A simple console test runner
692 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
694 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
695 match (*event).clone() {
696 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
697 TeWait(ref test, padding) => st.write_test_start(test, padding),
698 TeResult(test, result, stdout) => {
699 try!(st.write_log(&test, &result));
700 try!(st.write_result(&result));
702 TrOk => st.passed += 1,
703 TrIgnored => st.ignored += 1,
705 let tname = test.name.as_slice();
706 let MetricMap(mm) = mm;
707 for (k,v) in mm.iter() {
709 .insert_metric(format!("{}.{}",
718 st.metrics.insert_metric(test.name.as_slice(),
719 bs.ns_iter_summ.median,
720 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
725 st.failures.push((test, stdout));
733 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
734 fn len_if_padded(t: &TestDescAndFn) -> uint {
735 match t.testfn.padding() {
737 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
740 match tests.iter().max_by(|t|len_if_padded(*t)) {
742 let n = t.desc.name.as_slice();
743 st.max_name_len = n.len();
747 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
748 return st.write_run_finish(&None, None);
752 fn should_sort_failures_before_printing_them() {
753 let test_a = TestDesc {
754 name: StaticTestName("a"),
756 should_fail: ShouldFail::No
759 let test_b = TestDesc {
760 name: StaticTestName("b"),
762 should_fail: ShouldFail::No
765 let mut st = ConsoleTestState {
767 out: Raw(Vec::new()),
771 show_all_stats: false,
778 metrics: MetricMap::new(),
779 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
782 st.write_failures().unwrap();
783 let s = match st.out {
784 Raw(ref m) => String::from_utf8_lossy(&m[]),
785 Pretty(_) => unreachable!()
788 let apos = s.find_str("a").unwrap();
789 let bpos = s.find_str("b").unwrap();
790 assert!(apos < bpos);
793 fn use_color(opts: &TestOpts) -> bool {
795 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
803 TeFiltered(Vec<TestDesc> ),
804 TeWait(TestDesc, NamePadding),
805 TeResult(TestDesc, TestResult, Vec<u8> ),
808 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
811 fn run_tests<F>(opts: &TestOpts,
812 tests: Vec<TestDescAndFn> ,
813 mut callback: F) -> io::IoResult<()> where
814 F: FnMut(TestEvent) -> io::IoResult<()>,
816 let filtered_tests = filter_tests(opts, tests);
817 let filtered_descs = filtered_tests.iter()
818 .map(|t| t.desc.clone())
821 try!(callback(TeFiltered(filtered_descs)));
823 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
824 filtered_tests.into_iter().partition(|e| {
826 StaticTestFn(_) | DynTestFn(_) => true,
831 // It's tempting to just spawn all the tests at once, but since we have
832 // many tests that run in other processes we would be making a big mess.
833 let concurrency = get_concurrency();
835 let mut remaining = filtered_tests;
839 let (tx, rx) = channel::<MonitorMsg>();
841 while pending > 0 || !remaining.is_empty() {
842 while pending < concurrency && !remaining.is_empty() {
843 let test = remaining.pop().unwrap();
844 if concurrency == 1 {
845 // We are doing one test at a time so we can print the name
846 // of the test before we run it. Useful for debugging tests
847 // that hang forever.
848 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
850 run_test(opts, !opts.run_tests, test, tx.clone());
854 let (desc, result, stdout) = rx.recv().unwrap();
855 if concurrency != 1 {
856 try!(callback(TeWait(desc.clone(), PadNone)));
858 try!(callback(TeResult(desc, result, stdout)));
862 // All benchmarks run at the end, in serial.
863 // (this includes metric fns)
864 for b in filtered_benchs_and_metrics.into_iter() {
865 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
866 run_test(opts, !opts.run_benchmarks, b, tx.clone());
867 let (test, result, stdout) = rx.recv().unwrap();
868 try!(callback(TeResult(test, result, stdout)));
873 fn get_concurrency() -> uint {
875 match os::getenv("RUST_TEST_TASKS") {
877 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
879 Some(n) if n > 0 => n,
880 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
884 rt::default_sched_threads()
889 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
890 let mut filtered = tests;
892 // Remove tests that don't match the test filter
893 filtered = match opts.filter {
897 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
901 // Maybe pull out the ignored test and unignore them
902 filtered = if !opts.run_ignored {
905 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
906 if test.desc.ignore {
907 let TestDescAndFn {desc, testfn} = test;
909 desc: TestDesc {ignore: false, ..desc},
916 filtered.into_iter().filter_map(|x| filter(x)).collect()
919 // Sort the tests alphabetically
920 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
925 pub fn run_test(opts: &TestOpts,
928 monitor_ch: Sender<MonitorMsg>) {
930 let TestDescAndFn {desc, testfn} = test;
932 if force_ignore || desc.ignore {
933 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
937 fn run_test_inner(desc: TestDesc,
938 monitor_ch: Sender<MonitorMsg>,
941 Thread::spawn(move || {
942 let (tx, rx) = channel();
943 let mut reader = ChanReader::new(rx);
944 let stdout = ChanWriter::new(tx.clone());
945 let stderr = ChanWriter::new(tx);
946 let mut cfg = thread::Builder::new().name(match desc.name {
947 DynTestName(ref name) => name.clone().to_string(),
948 StaticTestName(name) => name.to_string(),
951 drop((stdout, stderr));
953 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
954 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
957 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
958 let stdout = reader.read_to_end().unwrap().into_iter().collect();
959 let test_result = calc_result(&desc, result_guard.join());
960 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
965 DynBenchFn(bencher) => {
966 let bs = ::bench::benchmark(|harness| bencher.run(harness));
967 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
970 StaticBenchFn(benchfn) => {
971 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
972 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
976 let mut mm = MetricMap::new();
978 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
981 StaticMetricFn(f) => {
982 let mut mm = MetricMap::new();
984 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
987 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
988 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
989 Thunk::new(move|| f()))
993 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
994 match (&desc.should_fail, task_result) {
995 (&ShouldFail::No, Ok(())) |
996 (&ShouldFail::Yes(None), Err(_)) => TrOk,
997 (&ShouldFail::Yes(Some(msg)), Err(ref err))
998 if err.downcast_ref::<String>()
1000 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1001 .map(|e| e.contains(msg))
1002 .unwrap_or(false) => TrOk,
1009 pub fn new() -> MetricMap {
1010 MetricMap(BTreeMap::new())
1013 /// Load MetricDiff from a file.
1017 /// This function will panic if the path does not exist or the path does not
1018 /// contain a valid metric map.
1019 pub fn load(p: &Path) -> MetricMap {
1020 assert!(p.exists());
1021 let mut f = File::open(p).unwrap();
1022 let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
1023 let mut decoder = json::Decoder::new(value);
1024 MetricMap(match Decodable::decode(&mut decoder) {
1026 Err(e) => panic!("failure decoding JSON: {:?}", e)
1030 /// Write MetricDiff to a file.
1031 pub fn save(&self, p: &Path) -> io::IoResult<()> {
1032 let mut file = try!(File::create(p));
1033 let MetricMap(ref map) = *self;
1034 write!(&mut file, "{}", json::as_json(map))
1037 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1038 /// must be non-negative. The `noise` indicates the uncertainty of the
1039 /// metric, which doubles as the "noise range" of acceptable
1040 /// pairwise-regressions on this named value, when comparing from one
1041 /// metric to the next using `compare_to_old`.
1043 /// If `noise` is positive, then it means this metric is of a value
1044 /// you want to see grow smaller, so a change larger than `noise` in the
1045 /// positive direction represents a regression.
1047 /// If `noise` is negative, then it means this metric is of a value
1048 /// you want to see grow larger, so a change larger than `noise` in the
1049 /// negative direction represents a regression.
1050 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1055 let MetricMap(ref mut map) = *self;
1056 map.insert(name.to_string(), m);
1063 /// A function that is opaque to the optimizer, to allow benchmarks to
1064 /// pretend to use outputs to assist in avoiding dead-code
1067 /// This function is a no-op, and does not even read from `dummy`.
1068 pub fn black_box<T>(dummy: T) -> T {
1069 // we need to "use" the argument in some way LLVM can't
1071 unsafe {asm!("" : : "r"(&dummy))}
1077 /// Callback for benchmark functions to run in their body.
1078 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1079 self.dur = Duration::span(|| {
1080 let k = self.iterations;
1081 for _ in range(0u64, k) {
1087 pub fn ns_elapsed(&mut self) -> u64 {
1088 self.dur.num_nanoseconds().unwrap() as u64
1091 pub fn ns_per_iter(&mut self) -> u64 {
1092 if self.iterations == 0 {
1095 self.ns_elapsed() / cmp::max(self.iterations, 1)
1099 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1100 self.iterations = n;
1104 // This is a more statistics-driven benchmark algorithm
1105 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1106 // Initial bench run to get ballpark figure.
1108 self.bench_n(n, |x| f(x));
1110 // Try to estimate iter count for 1ms falling back to 1m
1111 // iterations if first run took < 1ns.
1112 if self.ns_per_iter() == 0 {
1115 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1117 // if the first run took more than 1ms we don't want to just
1118 // be left doing 0 iterations on every loop. The unfortunate
1119 // side effect of not being able to do as many runs is
1120 // automatically handled by the statistical analysis below
1121 // (i.e. larger error bars).
1122 if n == 0 { n = 1; }
1124 let mut total_run = Duration::nanoseconds(0);
1125 let samples : &mut [f64] = &mut [0.0_f64; 50];
1127 let mut summ = None;
1128 let mut summ5 = None;
1130 let loop_run = Duration::span(|| {
1132 for p in samples.iter_mut() {
1133 self.bench_n(n, |x| f(x));
1134 *p = self.ns_per_iter() as f64;
1137 stats::winsorize(samples, 5.0);
1138 summ = Some(stats::Summary::new(samples));
1140 for p in samples.iter_mut() {
1141 self.bench_n(5 * n, |x| f(x));
1142 *p = self.ns_per_iter() as f64;
1145 stats::winsorize(samples, 5.0);
1146 summ5 = Some(stats::Summary::new(samples));
1148 let summ = summ.unwrap();
1149 let summ5 = summ5.unwrap();
1151 // If we've run for 100ms and seem to have converged to a
1153 if loop_run.num_milliseconds() > 100 &&
1154 summ.median_abs_dev_pct < 1.0 &&
1155 summ.median - summ5.median < summ5.median_abs_dev {
1159 total_run = total_run + loop_run;
1160 // Longest we ever run for is 3s.
1161 if total_run.num_seconds() > 3 {
1172 use std::time::Duration;
1173 use super::{Bencher, BenchSamples};
1175 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1176 let mut bs = Bencher {
1178 dur: Duration::nanoseconds(0),
1182 let ns_iter_summ = bs.auto_bench(f);
1184 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1185 let iter_s = 1_000_000_000 / ns_iter;
1186 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1189 ns_iter_summ: ns_iter_summ,
1197 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1198 TestDesc, TestDescAndFn, TestOpts, run_test,
1200 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1201 use std::io::TempDir;
1202 use std::thunk::Thunk;
1203 use std::sync::mpsc::channel;
1206 pub fn do_not_run_ignored_tests() {
1207 fn f() { panic!(); }
1208 let desc = TestDescAndFn {
1210 name: StaticTestName("whatever"),
1212 should_fail: ShouldFail::No,
1214 testfn: DynTestFn(Thunk::new(move|| f())),
1216 let (tx, rx) = channel();
1217 run_test(&TestOpts::new(), false, desc, tx);
1218 let (_, res, _) = rx.recv().unwrap();
1219 assert!(res != TrOk);
1223 pub fn ignored_tests_result_in_ignored() {
1225 let desc = TestDescAndFn {
1227 name: StaticTestName("whatever"),
1229 should_fail: ShouldFail::No,
1231 testfn: DynTestFn(Thunk::new(move|| f())),
1233 let (tx, rx) = channel();
1234 run_test(&TestOpts::new(), false, desc, tx);
1235 let (_, res, _) = rx.recv().unwrap();
1236 assert!(res == TrIgnored);
1240 fn test_should_fail() {
1241 fn f() { panic!(); }
1242 let desc = TestDescAndFn {
1244 name: StaticTestName("whatever"),
1246 should_fail: ShouldFail::Yes(None)
1248 testfn: DynTestFn(Thunk::new(move|| f())),
1250 let (tx, rx) = channel();
1251 run_test(&TestOpts::new(), false, desc, tx);
1252 let (_, res, _) = rx.recv().unwrap();
1253 assert!(res == TrOk);
1257 fn test_should_fail_good_message() {
1258 fn f() { panic!("an error message"); }
1259 let desc = TestDescAndFn {
1261 name: StaticTestName("whatever"),
1263 should_fail: ShouldFail::Yes(Some("error message"))
1265 testfn: DynTestFn(Thunk::new(move|| f())),
1267 let (tx, rx) = channel();
1268 run_test(&TestOpts::new(), false, desc, tx);
1269 let (_, res, _) = rx.recv().unwrap();
1270 assert!(res == TrOk);
1274 fn test_should_fail_bad_message() {
1275 fn f() { panic!("an error message"); }
1276 let desc = TestDescAndFn {
1278 name: StaticTestName("whatever"),
1280 should_fail: ShouldFail::Yes(Some("foobar"))
1282 testfn: DynTestFn(Thunk::new(move|| f())),
1284 let (tx, rx) = channel();
1285 run_test(&TestOpts::new(), false, desc, tx);
1286 let (_, res, _) = rx.recv().unwrap();
1287 assert!(res == TrFailed);
1291 fn test_should_fail_but_succeeds() {
1293 let desc = TestDescAndFn {
1295 name: StaticTestName("whatever"),
1297 should_fail: ShouldFail::Yes(None)
1299 testfn: DynTestFn(Thunk::new(move|| f())),
1301 let (tx, rx) = channel();
1302 run_test(&TestOpts::new(), false, desc, tx);
1303 let (_, res, _) = rx.recv().unwrap();
1304 assert!(res == TrFailed);
1308 fn first_free_arg_should_be_a_filter() {
1309 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1310 let opts = match parse_opts(args.as_slice()) {
1312 _ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
1314 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1318 fn parse_ignored_flag() {
1319 let args = vec!("progname".to_string(),
1320 "filter".to_string(),
1321 "--ignored".to_string());
1322 let opts = match parse_opts(args.as_slice()) {
1324 _ => panic!("Malformed arg in parse_ignored_flag")
1326 assert!((opts.run_ignored));
1330 pub fn filter_for_ignored_option() {
1331 // When we run ignored tests the test filter should filter out all the
1332 // unignored tests and flip the ignore flag on the rest to false
1334 let mut opts = TestOpts::new();
1335 opts.run_tests = true;
1336 opts.run_ignored = true;
1341 name: StaticTestName("1"),
1343 should_fail: ShouldFail::No,
1345 testfn: DynTestFn(Thunk::new(move|| {})),
1349 name: StaticTestName("2"),
1351 should_fail: ShouldFail::No,
1353 testfn: DynTestFn(Thunk::new(move|| {})),
1355 let filtered = filter_tests(&opts, tests);
1357 assert_eq!(filtered.len(), 1);
1358 assert_eq!(filtered[0].desc.name.to_string(),
1360 assert!(filtered[0].desc.ignore == false);
1364 pub fn sort_tests() {
1365 let mut opts = TestOpts::new();
1366 opts.run_tests = true;
1369 vec!("sha1::test".to_string(),
1370 "int::test_to_str".to_string(),
1371 "int::test_pow".to_string(),
1372 "test::do_not_run_ignored_tests".to_string(),
1373 "test::ignored_tests_result_in_ignored".to_string(),
1374 "test::first_free_arg_should_be_a_filter".to_string(),
1375 "test::parse_ignored_flag".to_string(),
1376 "test::filter_for_ignored_option".to_string(),
1377 "test::sort_tests".to_string());
1381 let mut tests = Vec::new();
1382 for name in names.iter() {
1383 let test = TestDescAndFn {
1385 name: DynTestName((*name).clone()),
1387 should_fail: ShouldFail::No,
1389 testfn: DynTestFn(Thunk::new(testfn)),
1395 let filtered = filter_tests(&opts, tests);
1398 vec!("int::test_pow".to_string(),
1399 "int::test_to_str".to_string(),
1400 "sha1::test".to_string(),
1401 "test::do_not_run_ignored_tests".to_string(),
1402 "test::filter_for_ignored_option".to_string(),
1403 "test::first_free_arg_should_be_a_filter".to_string(),
1404 "test::ignored_tests_result_in_ignored".to_string(),
1405 "test::parse_ignored_flag".to_string(),
1406 "test::sort_tests".to_string());
1408 for (a, b) in expected.iter().zip(filtered.iter()) {
1409 assert!(*a == b.desc.name.to_string());
1414 pub fn filter_tests_regex() {
1415 let mut opts = TestOpts::new();
1416 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1418 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1419 "no::XYZ", "no::abc"];
1423 let tests = names.iter().map(|name| {
1426 name: DynTestName(name.to_string()),
1428 should_fail: ShouldFail::No,
1430 testfn: DynTestFn(Thunk::new(test_fn))
1433 let filtered = filter_tests(&opts, tests);
1435 let expected: Vec<&str> =
1436 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1438 assert_eq!(filtered.len(), expected.len());
1439 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1440 assert_eq!(test.desc.name.as_slice(), *expected_name);
1445 pub fn test_metricmap_compare() {
1446 let mut m1 = MetricMap::new();
1447 let mut m2 = MetricMap::new();
1448 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1449 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1451 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1452 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1454 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1455 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1457 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1458 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1460 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1461 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1463 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1464 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);