1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![allow(unknown_features)]
35 #![feature(asm, slicing_syntax)]
36 #![feature(box_syntax)]
37 #![allow(unknown_features)] #![feature(int_uint)]
42 extern crate serialize;
43 extern crate "serialize" as rustc_serialize;
46 pub use self::TestFn::*;
47 pub use self::ColorConfig::*;
48 pub use self::TestResult::*;
49 pub use self::TestName::*;
50 use self::TestEvent::*;
51 use self::NamePadding::*;
52 use self::OutputLocation::*;
55 use getopts::{OptGroup, optflag, optopt};
57 use serialize::Encodable;
59 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
63 use std::collections::BTreeMap;
65 use std::io::stdio::StdWriter;
66 use std::io::{File, ChanReader, ChanWriter};
68 use std::iter::repeat;
69 use std::num::{Float, Int};
71 use std::str::FromStr;
72 use std::sync::mpsc::{channel, Sender};
73 use std::thread::{self, Thread};
74 use std::thunk::{Thunk, Invoke};
75 use std::time::Duration;
77 // to be used by rustc to compile tests in libtest
79 pub use {Bencher, TestName, TestResult, TestDesc,
80 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
82 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
83 run_test, test_main, test_main_static, filter_tests,
84 parse_opts, StaticBenchFn, ShouldFail};
89 // The name of a test. By convention this follows the rules for rust
90 // paths; i.e. it should be a series of identifiers separated by double
91 // colons. This way if some test runner wants to arrange the tests
92 // hierarchically it may.
94 #[derive(Clone, PartialEq, Eq, Hash, Show)]
96 StaticTestName(&'static str),
100 fn as_slice<'a>(&'a self) -> &'a str {
102 StaticTestName(s) => s,
103 DynTestName(ref s) => s.as_slice()
107 impl fmt::Display for TestName {
108 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
109 fmt::Display::fmt(self.as_slice(), f)
113 #[derive(Clone, Copy)]
121 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
122 let mut name = String::from_str(self.name.as_slice());
123 let fill = column_count.saturating_sub(name.len());
124 let mut pad = repeat(" ").take(fill).collect::<String>();
128 pad.push_str(name.as_slice());
132 name.push_str(pad.as_slice());
139 /// Represents a benchmark function.
140 pub trait TDynBenchFn {
141 fn run(&self, harness: &mut Bencher);
144 // A function that runs a test. If the function returns successfully,
145 // the test succeeds; if the function panics then the test fails. We
146 // may need to come up with a more clever definition of test in order
147 // to support isolation of tests into tasks.
150 StaticBenchFn(fn(&mut Bencher)),
151 StaticMetricFn(fn(&mut MetricMap)),
153 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
154 DynBenchFn(Box<TDynBenchFn+'static>)
158 fn padding(&self) -> NamePadding {
160 &StaticTestFn(..) => PadNone,
161 &StaticBenchFn(..) => PadOnRight,
162 &StaticMetricFn(..) => PadOnRight,
163 &DynTestFn(..) => PadNone,
164 &DynMetricFn(..) => PadOnRight,
165 &DynBenchFn(..) => PadOnRight,
170 impl fmt::Debug for TestFn {
171 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
172 f.write_str(match *self {
173 StaticTestFn(..) => "StaticTestFn(..)",
174 StaticBenchFn(..) => "StaticBenchFn(..)",
175 StaticMetricFn(..) => "StaticMetricFn(..)",
176 DynTestFn(..) => "DynTestFn(..)",
177 DynMetricFn(..) => "DynMetricFn(..)",
178 DynBenchFn(..) => "DynBenchFn(..)"
183 /// Manager of the benchmarking runs.
185 /// This is fed into functions marked with `#[bench]` to allow for
186 /// set-up & tear-down before running a piece of code repeatedly via a
195 #[derive(Copy, Clone, Show, PartialEq, Eq, Hash)]
196 pub enum ShouldFail {
198 Yes(Option<&'static str>)
201 // The definition of a single test. A test runner will run a list of
203 #[derive(Clone, Show, PartialEq, Eq, Hash)]
204 pub struct TestDesc {
207 pub should_fail: ShouldFail,
210 unsafe impl Send for TestDesc {}
213 pub struct TestDescAndFn {
218 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
225 pub fn new(value: f64, noise: f64) -> Metric {
226 Metric {value: value, noise: noise}
231 pub struct MetricMap(BTreeMap<String,Metric>);
233 impl Clone for MetricMap {
234 fn clone(&self) -> MetricMap {
235 let MetricMap(ref map) = *self;
236 MetricMap(map.clone())
240 // The default console test runner. It accepts the command line
241 // arguments and a vector of test_descs.
242 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
244 match parse_opts(args) {
246 Some(Err(msg)) => panic!("{:?}", msg),
249 match run_tests_console(&opts, tests) {
251 Ok(false) => panic!("Some tests failed"),
252 Err(e) => panic!("io error when running tests: {:?}", e),
256 // A variant optimized for invocation with a static test vector.
257 // This will panic (intentionally) when fed any dynamic tests, because
258 // it is copying the static values out into a dynamic vector and cannot
259 // copy dynamic values. It is doing this because from this point on
260 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
261 // semantics into parallel test runners, which in turn requires a ~[]
262 // rather than a &[].
263 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
264 let owned_tests = tests.iter().map(|t| {
266 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
267 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
268 _ => panic!("non-static tests passed to test::test_main_static")
271 test_main(args, owned_tests)
275 pub enum ColorConfig {
281 pub struct TestOpts {
282 pub filter: Option<Regex>,
283 pub run_ignored: bool,
285 pub run_benchmarks: bool,
286 pub logfile: Option<Path>,
288 pub color: ColorConfig,
293 fn new() -> TestOpts {
298 run_benchmarks: false,
306 /// Result of parsing the options.
307 pub type OptRes = Result<TestOpts, String>;
309 fn optgroups() -> Vec<getopts::OptGroup> {
310 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
311 getopts::optflag("", "test", "Run tests and not benchmarks"),
312 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
313 getopts::optflag("h", "help", "Display this message (longer with --help)"),
314 getopts::optopt("", "logfile", "Write logs to the specified file instead \
316 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
317 task, allow printing directly"),
318 getopts::optopt("", "color", "Configure coloring of output:
319 auto = colorize if stdout is a tty and tests are run on serially (default);
320 always = always colorize output;
321 never = never colorize output;", "auto|always|never"))
324 fn usage(binary: &str) {
325 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
328 The FILTER regex is tested against the name of all tests to run, and
329 only those tests that match are run.
331 By default, all tests are run in parallel. This can be altered with the
332 RUST_TEST_TASKS environment variable when running tests (set it to 1).
334 All tests have their standard output and standard error captured by default.
335 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
336 environment variable. Logging is not captured by default.
340 #[test] - Indicates a function is a test to be run. This function
342 #[bench] - Indicates a function is a benchmark to be run. This
343 function takes one argument (test::Bencher).
344 #[should_fail] - This function (also labeled with #[test]) will only pass if
345 the code causes a failure (an assertion failure or panic!)
346 A message may be provided, which the failure string must
347 contain: #[should_fail(expected = "foo")].
348 #[ignore] - When applied to a function which is already attributed as a
349 test, then the test runner will ignore these tests during
350 normal test runs. Running with --ignored will run these
352 usage = getopts::usage(message.as_slice(),
353 optgroups().as_slice()));
356 // Parses command line arguments into test options
357 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
358 let args_ = args.tail();
360 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
362 Err(f) => return Some(Err(f.to_string()))
365 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
367 let filter = if matches.free.len() > 0 {
368 let s = matches.free[0].as_slice();
369 match Regex::new(s) {
371 Err(e) => return Some(Err(format!("could not parse /{}/: {:?}", s, e)))
377 let run_ignored = matches.opt_present("ignored");
379 let logfile = matches.opt_str("logfile");
380 let logfile = logfile.map(|s| Path::new(s));
382 let run_benchmarks = matches.opt_present("bench");
383 let run_tests = ! run_benchmarks ||
384 matches.opt_present("test");
386 let mut nocapture = matches.opt_present("nocapture");
388 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
391 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
392 Some("auto") | None => AutoColor,
393 Some("always") => AlwaysColor,
394 Some("never") => NeverColor,
396 Some(v) => return Some(Err(format!("argument for --color must be \
397 auto, always, or never (was {})",
401 let test_opts = TestOpts {
403 run_ignored: run_ignored,
404 run_tests: run_tests,
405 run_benchmarks: run_benchmarks,
407 nocapture: nocapture,
414 #[derive(Clone, PartialEq)]
415 pub struct BenchSamples {
416 ns_iter_summ: stats::Summary<f64>,
420 #[derive(Clone, PartialEq)]
421 pub enum TestResult {
425 TrMetrics(MetricMap),
426 TrBench(BenchSamples),
429 unsafe impl Send for TestResult {}
431 enum OutputLocation<T> {
432 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
436 struct ConsoleTestState<T> {
437 log_out: Option<File>,
438 out: OutputLocation<T>,
442 show_all_stats: bool,
449 failures: Vec<(TestDesc, Vec<u8> )> ,
450 max_name_len: uint, // number of columns to fill when aligning names
453 impl<T: Writer> ConsoleTestState<T> {
454 pub fn new(opts: &TestOpts,
455 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
456 let log_out = match opts.logfile {
457 Some(ref path) => Some(try!(File::create(path))),
460 let out = match term::stdout() {
461 None => Raw(io::stdio::stdout_raw()),
465 Ok(ConsoleTestState {
468 use_color: use_color(opts),
471 show_all_stats: false,
477 metrics: MetricMap::new(),
478 failures: Vec::new(),
483 pub fn write_ok(&mut self) -> io::IoResult<()> {
484 self.write_pretty("ok", term::color::GREEN)
487 pub fn write_failed(&mut self) -> io::IoResult<()> {
488 self.write_pretty("FAILED", term::color::RED)
491 pub fn write_ignored(&mut self) -> io::IoResult<()> {
492 self.write_pretty("ignored", term::color::YELLOW)
495 pub fn write_metric(&mut self) -> io::IoResult<()> {
496 self.write_pretty("metric", term::color::CYAN)
499 pub fn write_bench(&mut self) -> io::IoResult<()> {
500 self.write_pretty("bench", term::color::CYAN)
503 pub fn write_pretty(&mut self,
505 color: term::color::Color) -> io::IoResult<()> {
507 Pretty(ref mut term) => {
509 try!(term.fg(color));
511 try!(term.write(word.as_bytes()));
517 Raw(ref mut stdout) => stdout.write(word.as_bytes())
521 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
523 Pretty(ref mut term) => term.write(s.as_bytes()),
524 Raw(ref mut stdout) => stdout.write(s.as_bytes())
528 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
530 let noun = if len != 1 { "tests" } else { "test" };
531 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
534 pub fn write_test_start(&mut self, test: &TestDesc,
535 align: NamePadding) -> io::IoResult<()> {
536 let name = test.padded_name(self.max_name_len, align);
537 self.write_plain(format!("test {} ... ", name).as_slice())
540 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
542 TrOk => self.write_ok(),
543 TrFailed => self.write_failed(),
544 TrIgnored => self.write_ignored(),
545 TrMetrics(ref mm) => {
546 try!(self.write_metric());
547 self.write_plain(format!(": {}", mm.fmt_metrics()).as_slice())
550 try!(self.write_bench());
552 if self.show_boxplot {
553 let mut wr = Vec::new();
555 try!(stats::write_boxplot(&mut wr, &bs.ns_iter_summ, self.boxplot_width));
557 let s = String::from_utf8(wr).unwrap();
559 try!(self.write_plain(format!(": {}", s).as_slice()));
562 if self.show_all_stats {
563 let mut wr = Vec::new();
565 try!(stats::write_5_number_summary(&mut wr, &bs.ns_iter_summ));
567 let s = String::from_utf8(wr).unwrap();
569 try!(self.write_plain(format!(": {}", s).as_slice()));
571 try!(self.write_plain(format!(": {}",
572 fmt_bench_samples(bs)).as_slice()));
578 self.write_plain("\n")
581 pub fn write_log(&mut self, test: &TestDesc,
582 result: &TestResult) -> io::IoResult<()> {
586 let s = format!("{} {}\n", match *result {
587 TrOk => "ok".to_string(),
588 TrFailed => "failed".to_string(),
589 TrIgnored => "ignored".to_string(),
590 TrMetrics(ref mm) => mm.fmt_metrics(),
591 TrBench(ref bs) => fmt_bench_samples(bs)
592 }, test.name.as_slice());
593 o.write(s.as_bytes())
598 pub fn write_failures(&mut self) -> io::IoResult<()> {
599 try!(self.write_plain("\nfailures:\n"));
600 let mut failures = Vec::new();
601 let mut fail_out = String::new();
602 for &(ref f, ref stdout) in self.failures.iter() {
603 failures.push(f.name.to_string());
604 if stdout.len() > 0 {
605 fail_out.push_str(format!("---- {} stdout ----\n\t",
606 f.name.as_slice()).as_slice());
607 let output = String::from_utf8_lossy(stdout.as_slice());
608 fail_out.push_str(output.as_slice());
609 fail_out.push_str("\n");
612 if fail_out.len() > 0 {
613 try!(self.write_plain("\n"));
614 try!(self.write_plain(fail_out.as_slice()));
617 try!(self.write_plain("\nfailures:\n"));
619 for name in failures.iter() {
620 try!(self.write_plain(format!(" {}\n",
621 name.as_slice()).as_slice()));
626 pub fn write_run_finish(&mut self) -> io::IoResult<bool> {
627 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
629 let success = self.failed == 0u;
631 try!(self.write_failures());
634 try!(self.write_plain("\ntest result: "));
636 // There's no parallelism at this point so it's safe to use color
637 try!(self.write_ok());
639 try!(self.write_failed());
641 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
642 self.passed, self.failed, self.ignored, self.measured);
643 try!(self.write_plain(s.as_slice()));
648 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
650 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
651 bs.ns_iter_summ.median as uint,
652 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
655 format!("{:>9} ns/iter (+/- {})",
656 bs.ns_iter_summ.median as uint,
657 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
661 // A simple console test runner
662 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
664 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
665 match (*event).clone() {
666 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
667 TeWait(ref test, padding) => st.write_test_start(test, padding),
668 TeResult(test, result, stdout) => {
669 try!(st.write_log(&test, &result));
670 try!(st.write_result(&result));
672 TrOk => st.passed += 1,
673 TrIgnored => st.ignored += 1,
675 let tname = test.name.as_slice();
676 let MetricMap(mm) = mm;
677 for (k,v) in mm.iter() {
679 .insert_metric(format!("{}.{}",
688 st.metrics.insert_metric(test.name.as_slice(),
689 bs.ns_iter_summ.median,
690 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
695 st.failures.push((test, stdout));
703 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
704 fn len_if_padded(t: &TestDescAndFn) -> uint {
705 match t.testfn.padding() {
707 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
710 match tests.iter().max_by(|t|len_if_padded(*t)) {
712 let n = t.desc.name.as_slice();
713 st.max_name_len = n.len();
717 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
718 return st.write_run_finish();
722 fn should_sort_failures_before_printing_them() {
723 let test_a = TestDesc {
724 name: StaticTestName("a"),
726 should_fail: ShouldFail::No
729 let test_b = TestDesc {
730 name: StaticTestName("b"),
732 should_fail: ShouldFail::No
735 let mut st = ConsoleTestState {
737 out: Raw(Vec::new()),
741 show_all_stats: false,
748 metrics: MetricMap::new(),
749 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
752 st.write_failures().unwrap();
753 let s = match st.out {
754 Raw(ref m) => String::from_utf8_lossy(&m[]),
755 Pretty(_) => unreachable!()
758 let apos = s.find_str("a").unwrap();
759 let bpos = s.find_str("b").unwrap();
760 assert!(apos < bpos);
763 fn use_color(opts: &TestOpts) -> bool {
765 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
773 TeFiltered(Vec<TestDesc> ),
774 TeWait(TestDesc, NamePadding),
775 TeResult(TestDesc, TestResult, Vec<u8> ),
778 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
781 fn run_tests<F>(opts: &TestOpts,
782 tests: Vec<TestDescAndFn> ,
783 mut callback: F) -> io::IoResult<()> where
784 F: FnMut(TestEvent) -> io::IoResult<()>,
786 let filtered_tests = filter_tests(opts, tests);
787 let filtered_descs = filtered_tests.iter()
788 .map(|t| t.desc.clone())
791 try!(callback(TeFiltered(filtered_descs)));
793 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
794 filtered_tests.into_iter().partition(|e| {
796 StaticTestFn(_) | DynTestFn(_) => true,
801 // It's tempting to just spawn all the tests at once, but since we have
802 // many tests that run in other processes we would be making a big mess.
803 let concurrency = get_concurrency();
805 let mut remaining = filtered_tests;
809 let (tx, rx) = channel::<MonitorMsg>();
811 while pending > 0 || !remaining.is_empty() {
812 while pending < concurrency && !remaining.is_empty() {
813 let test = remaining.pop().unwrap();
814 if concurrency == 1 {
815 // We are doing one test at a time so we can print the name
816 // of the test before we run it. Useful for debugging tests
817 // that hang forever.
818 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
820 run_test(opts, !opts.run_tests, test, tx.clone());
824 let (desc, result, stdout) = rx.recv().unwrap();
825 if concurrency != 1 {
826 try!(callback(TeWait(desc.clone(), PadNone)));
828 try!(callback(TeResult(desc, result, stdout)));
832 // All benchmarks run at the end, in serial.
833 // (this includes metric fns)
834 for b in filtered_benchs_and_metrics.into_iter() {
835 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
836 run_test(opts, !opts.run_benchmarks, b, tx.clone());
837 let (test, result, stdout) = rx.recv().unwrap();
838 try!(callback(TeResult(test, result, stdout)));
843 fn get_concurrency() -> uint {
845 match os::getenv("RUST_TEST_TASKS") {
847 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
849 Some(n) if n > 0 => n,
850 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
854 rt::default_sched_threads()
859 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
860 let mut filtered = tests;
862 // Remove tests that don't match the test filter
863 filtered = match opts.filter {
867 .filter(|test| re.is_match(test.desc.name.as_slice())).collect()
871 // Maybe pull out the ignored test and unignore them
872 filtered = if !opts.run_ignored {
875 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
876 if test.desc.ignore {
877 let TestDescAndFn {desc, testfn} = test;
879 desc: TestDesc {ignore: false, ..desc},
886 filtered.into_iter().filter_map(|x| filter(x)).collect()
889 // Sort the tests alphabetically
890 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
895 pub fn run_test(opts: &TestOpts,
898 monitor_ch: Sender<MonitorMsg>) {
900 let TestDescAndFn {desc, testfn} = test;
902 if force_ignore || desc.ignore {
903 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
907 fn run_test_inner(desc: TestDesc,
908 monitor_ch: Sender<MonitorMsg>,
911 Thread::spawn(move || {
912 let (tx, rx) = channel();
913 let mut reader = ChanReader::new(rx);
914 let stdout = ChanWriter::new(tx.clone());
915 let stderr = ChanWriter::new(tx);
916 let mut cfg = thread::Builder::new().name(match desc.name {
917 DynTestName(ref name) => name.clone().to_string(),
918 StaticTestName(name) => name.to_string(),
921 drop((stdout, stderr));
923 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
924 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
927 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
928 let stdout = reader.read_to_end().unwrap().into_iter().collect();
929 let test_result = calc_result(&desc, result_guard.join());
930 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
935 DynBenchFn(bencher) => {
936 let bs = ::bench::benchmark(|harness| bencher.run(harness));
937 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
940 StaticBenchFn(benchfn) => {
941 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
942 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
946 let mut mm = MetricMap::new();
948 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
951 StaticMetricFn(f) => {
952 let mut mm = MetricMap::new();
954 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
957 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
958 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
959 Thunk::new(move|| f()))
963 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
964 match (&desc.should_fail, task_result) {
965 (&ShouldFail::No, Ok(())) |
966 (&ShouldFail::Yes(None), Err(_)) => TrOk,
967 (&ShouldFail::Yes(Some(msg)), Err(ref err))
968 if err.downcast_ref::<String>()
970 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
971 .map(|e| e.contains(msg))
972 .unwrap_or(false) => TrOk,
979 pub fn new() -> MetricMap {
980 MetricMap(BTreeMap::new())
983 /// Insert a named `value` (+/- `noise`) metric into the map. The value
984 /// must be non-negative. The `noise` indicates the uncertainty of the
985 /// metric, which doubles as the "noise range" of acceptable
986 /// pairwise-regressions on this named value, when comparing from one
987 /// metric to the next using `compare_to_old`.
989 /// If `noise` is positive, then it means this metric is of a value
990 /// you want to see grow smaller, so a change larger than `noise` in the
991 /// positive direction represents a regression.
993 /// If `noise` is negative, then it means this metric is of a value
994 /// you want to see grow larger, so a change larger than `noise` in the
995 /// negative direction represents a regression.
996 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1001 let MetricMap(ref mut map) = *self;
1002 map.insert(name.to_string(), m);
1005 pub fn fmt_metrics(&self) -> String {
1006 let MetricMap(ref mm) = *self;
1007 let v : Vec<String> = mm.iter()
1008 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1009 v.value as f64, v.noise as f64))
1018 /// A function that is opaque to the optimizer, to allow benchmarks to
1019 /// pretend to use outputs to assist in avoiding dead-code
1022 /// This function is a no-op, and does not even read from `dummy`.
1023 pub fn black_box<T>(dummy: T) -> T {
1024 // we need to "use" the argument in some way LLVM can't
1026 unsafe {asm!("" : : "r"(&dummy))}
1032 /// Callback for benchmark functions to run in their body.
1033 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1034 self.dur = Duration::span(|| {
1035 let k = self.iterations;
1036 for _ in range(0u64, k) {
1042 pub fn ns_elapsed(&mut self) -> u64 {
1043 self.dur.num_nanoseconds().unwrap() as u64
1046 pub fn ns_per_iter(&mut self) -> u64 {
1047 if self.iterations == 0 {
1050 self.ns_elapsed() / cmp::max(self.iterations, 1)
1054 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1055 self.iterations = n;
1059 // This is a more statistics-driven benchmark algorithm
1060 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1061 // Initial bench run to get ballpark figure.
1063 self.bench_n(n, |x| f(x));
1065 // Try to estimate iter count for 1ms falling back to 1m
1066 // iterations if first run took < 1ns.
1067 if self.ns_per_iter() == 0 {
1070 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1072 // if the first run took more than 1ms we don't want to just
1073 // be left doing 0 iterations on every loop. The unfortunate
1074 // side effect of not being able to do as many runs is
1075 // automatically handled by the statistical analysis below
1076 // (i.e. larger error bars).
1077 if n == 0 { n = 1; }
1079 let mut total_run = Duration::nanoseconds(0);
1080 let samples : &mut [f64] = &mut [0.0_f64; 50];
1082 let mut summ = None;
1083 let mut summ5 = None;
1085 let loop_run = Duration::span(|| {
1087 for p in samples.iter_mut() {
1088 self.bench_n(n, |x| f(x));
1089 *p = self.ns_per_iter() as f64;
1092 stats::winsorize(samples, 5.0);
1093 summ = Some(stats::Summary::new(samples));
1095 for p in samples.iter_mut() {
1096 self.bench_n(5 * n, |x| f(x));
1097 *p = self.ns_per_iter() as f64;
1100 stats::winsorize(samples, 5.0);
1101 summ5 = Some(stats::Summary::new(samples));
1103 let summ = summ.unwrap();
1104 let summ5 = summ5.unwrap();
1106 // If we've run for 100ms and seem to have converged to a
1108 if loop_run.num_milliseconds() > 100 &&
1109 summ.median_abs_dev_pct < 1.0 &&
1110 summ.median - summ5.median < summ5.median_abs_dev {
1114 total_run = total_run + loop_run;
1115 // Longest we ever run for is 3s.
1116 if total_run.num_seconds() > 3 {
1127 use std::time::Duration;
1128 use super::{Bencher, BenchSamples};
1130 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1131 let mut bs = Bencher {
1133 dur: Duration::nanoseconds(0),
1137 let ns_iter_summ = bs.auto_bench(f);
1139 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1140 let iter_s = 1_000_000_000 / ns_iter;
1141 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1144 ns_iter_summ: ns_iter_summ,
1152 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1153 TestDesc, TestDescAndFn, TestOpts, run_test,
1155 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1156 use std::io::TempDir;
1157 use std::thunk::Thunk;
1158 use std::sync::mpsc::channel;
1161 pub fn do_not_run_ignored_tests() {
1162 fn f() { panic!(); }
1163 let desc = TestDescAndFn {
1165 name: StaticTestName("whatever"),
1167 should_fail: ShouldFail::No,
1169 testfn: DynTestFn(Thunk::new(move|| f())),
1171 let (tx, rx) = channel();
1172 run_test(&TestOpts::new(), false, desc, tx);
1173 let (_, res, _) = rx.recv().unwrap();
1174 assert!(res != TrOk);
1178 pub fn ignored_tests_result_in_ignored() {
1180 let desc = TestDescAndFn {
1182 name: StaticTestName("whatever"),
1184 should_fail: ShouldFail::No,
1186 testfn: DynTestFn(Thunk::new(move|| f())),
1188 let (tx, rx) = channel();
1189 run_test(&TestOpts::new(), false, desc, tx);
1190 let (_, res, _) = rx.recv().unwrap();
1191 assert!(res == TrIgnored);
1195 fn test_should_fail() {
1196 fn f() { panic!(); }
1197 let desc = TestDescAndFn {
1199 name: StaticTestName("whatever"),
1201 should_fail: ShouldFail::Yes(None)
1203 testfn: DynTestFn(Thunk::new(move|| f())),
1205 let (tx, rx) = channel();
1206 run_test(&TestOpts::new(), false, desc, tx);
1207 let (_, res, _) = rx.recv().unwrap();
1208 assert!(res == TrOk);
1212 fn test_should_fail_good_message() {
1213 fn f() { panic!("an error message"); }
1214 let desc = TestDescAndFn {
1216 name: StaticTestName("whatever"),
1218 should_fail: ShouldFail::Yes(Some("error message"))
1220 testfn: DynTestFn(Thunk::new(move|| f())),
1222 let (tx, rx) = channel();
1223 run_test(&TestOpts::new(), false, desc, tx);
1224 let (_, res, _) = rx.recv().unwrap();
1225 assert!(res == TrOk);
1229 fn test_should_fail_bad_message() {
1230 fn f() { panic!("an error message"); }
1231 let desc = TestDescAndFn {
1233 name: StaticTestName("whatever"),
1235 should_fail: ShouldFail::Yes(Some("foobar"))
1237 testfn: DynTestFn(Thunk::new(move|| f())),
1239 let (tx, rx) = channel();
1240 run_test(&TestOpts::new(), false, desc, tx);
1241 let (_, res, _) = rx.recv().unwrap();
1242 assert!(res == TrFailed);
1246 fn test_should_fail_but_succeeds() {
1248 let desc = TestDescAndFn {
1250 name: StaticTestName("whatever"),
1252 should_fail: ShouldFail::Yes(None)
1254 testfn: DynTestFn(Thunk::new(move|| f())),
1256 let (tx, rx) = channel();
1257 run_test(&TestOpts::new(), false, desc, tx);
1258 let (_, res, _) = rx.recv().unwrap();
1259 assert!(res == TrFailed);
1263 fn first_free_arg_should_be_a_filter() {
1264 let args = vec!("progname".to_string(), "some_regex_filter".to_string());
1265 let opts = match parse_opts(args.as_slice()) {
1267 _ => panic!("Malformed arg in first_free_arg_should_be_a_filter")
1269 assert!(opts.filter.expect("should've found filter").is_match("some_regex_filter"))
1273 fn parse_ignored_flag() {
1274 let args = vec!("progname".to_string(),
1275 "filter".to_string(),
1276 "--ignored".to_string());
1277 let opts = match parse_opts(args.as_slice()) {
1279 _ => panic!("Malformed arg in parse_ignored_flag")
1281 assert!((opts.run_ignored));
1285 pub fn filter_for_ignored_option() {
1286 // When we run ignored tests the test filter should filter out all the
1287 // unignored tests and flip the ignore flag on the rest to false
1289 let mut opts = TestOpts::new();
1290 opts.run_tests = true;
1291 opts.run_ignored = true;
1296 name: StaticTestName("1"),
1298 should_fail: ShouldFail::No,
1300 testfn: DynTestFn(Thunk::new(move|| {})),
1304 name: StaticTestName("2"),
1306 should_fail: ShouldFail::No,
1308 testfn: DynTestFn(Thunk::new(move|| {})),
1310 let filtered = filter_tests(&opts, tests);
1312 assert_eq!(filtered.len(), 1);
1313 assert_eq!(filtered[0].desc.name.to_string(),
1315 assert!(filtered[0].desc.ignore == false);
1319 pub fn sort_tests() {
1320 let mut opts = TestOpts::new();
1321 opts.run_tests = true;
1324 vec!("sha1::test".to_string(),
1325 "int::test_to_str".to_string(),
1326 "int::test_pow".to_string(),
1327 "test::do_not_run_ignored_tests".to_string(),
1328 "test::ignored_tests_result_in_ignored".to_string(),
1329 "test::first_free_arg_should_be_a_filter".to_string(),
1330 "test::parse_ignored_flag".to_string(),
1331 "test::filter_for_ignored_option".to_string(),
1332 "test::sort_tests".to_string());
1336 let mut tests = Vec::new();
1337 for name in names.iter() {
1338 let test = TestDescAndFn {
1340 name: DynTestName((*name).clone()),
1342 should_fail: ShouldFail::No,
1344 testfn: DynTestFn(Thunk::new(testfn)),
1350 let filtered = filter_tests(&opts, tests);
1353 vec!("int::test_pow".to_string(),
1354 "int::test_to_str".to_string(),
1355 "sha1::test".to_string(),
1356 "test::do_not_run_ignored_tests".to_string(),
1357 "test::filter_for_ignored_option".to_string(),
1358 "test::first_free_arg_should_be_a_filter".to_string(),
1359 "test::ignored_tests_result_in_ignored".to_string(),
1360 "test::parse_ignored_flag".to_string(),
1361 "test::sort_tests".to_string());
1363 for (a, b) in expected.iter().zip(filtered.iter()) {
1364 assert!(*a == b.desc.name.to_string());
1369 pub fn filter_tests_regex() {
1370 let mut opts = TestOpts::new();
1371 opts.filter = Some(::regex::Regex::new("a.*b.+c").unwrap());
1373 let mut names = ["yes::abXc", "yes::aXXXbXXXXc",
1374 "no::XYZ", "no::abc"];
1378 let tests = names.iter().map(|name| {
1381 name: DynTestName(name.to_string()),
1383 should_fail: ShouldFail::No,
1385 testfn: DynTestFn(Thunk::new(test_fn))
1388 let filtered = filter_tests(&opts, tests);
1390 let expected: Vec<&str> =
1391 names.iter().map(|&s| s).filter(|name| name.starts_with("yes")).collect();
1393 assert_eq!(filtered.len(), expected.len());
1394 for (test, expected_name) in filtered.iter().zip(expected.iter()) {
1395 assert_eq!(test.desc.name.as_slice(), *expected_name);
1400 pub fn test_metricmap_compare() {
1401 let mut m1 = MetricMap::new();
1402 let mut m2 = MetricMap::new();
1403 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1404 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1406 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1407 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1409 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1410 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1412 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1413 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1415 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1416 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1418 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1419 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);