1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test")]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
35 #![feature(asm, slicing_syntax)]
36 #![feature(box_syntax)]
37 #![feature(collections)]
44 #![feature(rustc_private)]
45 #![feature(staged_api)]
49 extern crate serialize;
50 extern crate "serialize" as rustc_serialize;
53 pub use self::TestFn::*;
54 pub use self::ColorConfig::*;
55 pub use self::TestResult::*;
56 pub use self::TestName::*;
57 use self::TestEvent::*;
58 use self::NamePadding::*;
59 use self::OutputLocation::*;
62 use getopts::{OptGroup, optflag, optopt};
63 use serialize::Encodable;
65 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
69 use std::collections::BTreeMap;
71 use std::old_io::stdio::StdWriter;
72 use std::old_io::{File, ChanReader, ChanWriter};
74 use std::iter::repeat;
75 use std::num::{Float, Int};
77 use std::sync::mpsc::{channel, Sender};
78 use std::thread::{self, Thread};
79 use std::thunk::{Thunk, Invoke};
80 use std::time::Duration;
82 // to be used by rustc to compile tests in libtest
84 pub use {Bencher, TestName, TestResult, TestDesc,
85 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
87 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
88 run_test, test_main, test_main_static, filter_tests,
89 parse_opts, StaticBenchFn, ShouldFail};
94 // The name of a test. By convention this follows the rules for rust
95 // paths; i.e. it should be a series of identifiers separated by double
96 // colons. This way if some test runner wants to arrange the tests
97 // hierarchically it may.
99 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
101 StaticTestName(&'static str),
105 fn as_slice<'a>(&'a self) -> &'a str {
107 StaticTestName(s) => s,
108 DynTestName(ref s) => s.as_slice()
112 impl fmt::Display for TestName {
113 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
114 fmt::Display::fmt(self.as_slice(), f)
118 #[derive(Clone, Copy)]
126 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
127 let mut name = String::from_str(self.name.as_slice());
128 let fill = column_count.saturating_sub(name.len());
129 let mut pad = repeat(" ").take(fill).collect::<String>();
133 pad.push_str(name.as_slice());
137 name.push_str(pad.as_slice());
144 /// Represents a benchmark function.
145 pub trait TDynBenchFn {
146 fn run(&self, harness: &mut Bencher);
149 // A function that runs a test. If the function returns successfully,
150 // the test succeeds; if the function panics then the test fails. We
151 // may need to come up with a more clever definition of test in order
152 // to support isolation of tests into tasks.
155 StaticBenchFn(fn(&mut Bencher)),
156 StaticMetricFn(fn(&mut MetricMap)),
158 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
159 DynBenchFn(Box<TDynBenchFn+'static>)
163 fn padding(&self) -> NamePadding {
165 &StaticTestFn(..) => PadNone,
166 &StaticBenchFn(..) => PadOnRight,
167 &StaticMetricFn(..) => PadOnRight,
168 &DynTestFn(..) => PadNone,
169 &DynMetricFn(..) => PadOnRight,
170 &DynBenchFn(..) => PadOnRight,
175 impl fmt::Debug for TestFn {
176 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
177 f.write_str(match *self {
178 StaticTestFn(..) => "StaticTestFn(..)",
179 StaticBenchFn(..) => "StaticBenchFn(..)",
180 StaticMetricFn(..) => "StaticMetricFn(..)",
181 DynTestFn(..) => "DynTestFn(..)",
182 DynMetricFn(..) => "DynMetricFn(..)",
183 DynBenchFn(..) => "DynBenchFn(..)"
188 /// Manager of the benchmarking runs.
190 /// This is fed into functions marked with `#[bench]` to allow for
191 /// set-up & tear-down before running a piece of code repeatedly via a
200 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
201 pub enum ShouldFail {
203 Yes(Option<&'static str>)
206 // The definition of a single test. A test runner will run a list of
208 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
209 pub struct TestDesc {
212 pub should_fail: ShouldFail,
215 unsafe impl Send for TestDesc {}
218 pub struct TestDescAndFn {
223 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
230 pub fn new(value: f64, noise: f64) -> Metric {
231 Metric {value: value, noise: noise}
236 pub struct MetricMap(BTreeMap<String,Metric>);
238 impl Clone for MetricMap {
239 fn clone(&self) -> MetricMap {
240 let MetricMap(ref map) = *self;
241 MetricMap(map.clone())
245 // The default console test runner. It accepts the command line
246 // arguments and a vector of test_descs.
247 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
249 match parse_opts(args) {
251 Some(Err(msg)) => panic!("{:?}", msg),
254 match run_tests_console(&opts, tests) {
256 Ok(false) => panic!("Some tests failed"),
257 Err(e) => panic!("io error when running tests: {:?}", e),
261 // A variant optimized for invocation with a static test vector.
262 // This will panic (intentionally) when fed any dynamic tests, because
263 // it is copying the static values out into a dynamic vector and cannot
264 // copy dynamic values. It is doing this because from this point on
265 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
266 // semantics into parallel test runners, which in turn requires a ~[]
267 // rather than a &[].
268 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
269 let owned_tests = tests.iter().map(|t| {
271 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
272 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
273 _ => panic!("non-static tests passed to test::test_main_static")
276 test_main(args, owned_tests)
280 pub enum ColorConfig {
286 pub struct TestOpts {
287 pub filter: Option<String>,
288 pub run_ignored: bool,
290 pub run_benchmarks: bool,
291 pub logfile: Option<Path>,
293 pub color: ColorConfig,
298 fn new() -> TestOpts {
303 run_benchmarks: false,
311 /// Result of parsing the options.
312 pub type OptRes = Result<TestOpts, String>;
314 fn optgroups() -> Vec<getopts::OptGroup> {
315 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
316 getopts::optflag("", "test", "Run tests and not benchmarks"),
317 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
318 getopts::optflag("h", "help", "Display this message (longer with --help)"),
319 getopts::optopt("", "logfile", "Write logs to the specified file instead \
321 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
322 task, allow printing directly"),
323 getopts::optopt("", "color", "Configure coloring of output:
324 auto = colorize if stdout is a tty and tests are run on serially (default);
325 always = always colorize output;
326 never = never colorize output;", "auto|always|never"))
329 fn usage(binary: &str) {
330 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
333 The FILTER regex is tested against the name of all tests to run, and
334 only those tests that match are run.
336 By default, all tests are run in parallel. This can be altered with the
337 RUST_TEST_TASKS environment variable when running tests (set it to 1).
339 All tests have their standard output and standard error captured by default.
340 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
341 environment variable. Logging is not captured by default.
345 #[test] - Indicates a function is a test to be run. This function
347 #[bench] - Indicates a function is a benchmark to be run. This
348 function takes one argument (test::Bencher).
349 #[should_fail] - This function (also labeled with #[test]) will only pass if
350 the code causes a failure (an assertion failure or panic!)
351 A message may be provided, which the failure string must
352 contain: #[should_fail(expected = "foo")].
353 #[ignore] - When applied to a function which is already attributed as a
354 test, then the test runner will ignore these tests during
355 normal test runs. Running with --ignored will run these
357 usage = getopts::usage(message.as_slice(),
358 optgroups().as_slice()));
361 // Parses command line arguments into test options
362 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
363 let args_ = args.tail();
365 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
367 Err(f) => return Some(Err(f.to_string()))
370 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
372 let filter = if matches.free.len() > 0 {
373 Some(matches.free[0].clone())
378 let run_ignored = matches.opt_present("ignored");
380 let logfile = matches.opt_str("logfile");
381 let logfile = logfile.map(|s| Path::new(s));
383 let run_benchmarks = matches.opt_present("bench");
384 let run_tests = ! run_benchmarks ||
385 matches.opt_present("test");
387 let mut nocapture = matches.opt_present("nocapture");
389 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
392 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
393 Some("auto") | None => AutoColor,
394 Some("always") => AlwaysColor,
395 Some("never") => NeverColor,
397 Some(v) => return Some(Err(format!("argument for --color must be \
398 auto, always, or never (was {})",
402 let test_opts = TestOpts {
404 run_ignored: run_ignored,
405 run_tests: run_tests,
406 run_benchmarks: run_benchmarks,
408 nocapture: nocapture,
415 #[derive(Clone, PartialEq)]
416 pub struct BenchSamples {
417 ns_iter_summ: stats::Summary<f64>,
421 #[derive(Clone, PartialEq)]
422 pub enum TestResult {
426 TrMetrics(MetricMap),
427 TrBench(BenchSamples),
430 unsafe impl Send for TestResult {}
432 enum OutputLocation<T> {
433 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
437 struct ConsoleTestState<T> {
438 log_out: Option<File>,
439 out: OutputLocation<T>,
447 failures: Vec<(TestDesc, Vec<u8> )> ,
448 max_name_len: uint, // number of columns to fill when aligning names
451 impl<T: Writer> ConsoleTestState<T> {
452 pub fn new(opts: &TestOpts,
453 _: Option<T>) -> old_io::IoResult<ConsoleTestState<StdWriter>> {
454 let log_out = match opts.logfile {
455 Some(ref path) => Some(try!(File::create(path))),
458 let out = match term::stdout() {
459 None => Raw(old_io::stdio::stdout_raw()),
463 Ok(ConsoleTestState {
466 use_color: use_color(opts),
472 metrics: MetricMap::new(),
473 failures: Vec::new(),
478 pub fn write_ok(&mut self) -> old_io::IoResult<()> {
479 self.write_pretty("ok", term::color::GREEN)
482 pub fn write_failed(&mut self) -> old_io::IoResult<()> {
483 self.write_pretty("FAILED", term::color::RED)
486 pub fn write_ignored(&mut self) -> old_io::IoResult<()> {
487 self.write_pretty("ignored", term::color::YELLOW)
490 pub fn write_metric(&mut self) -> old_io::IoResult<()> {
491 self.write_pretty("metric", term::color::CYAN)
494 pub fn write_bench(&mut self) -> old_io::IoResult<()> {
495 self.write_pretty("bench", term::color::CYAN)
498 pub fn write_pretty(&mut self,
500 color: term::color::Color) -> old_io::IoResult<()> {
502 Pretty(ref mut term) => {
504 try!(term.fg(color));
506 try!(term.write_all(word.as_bytes()));
512 Raw(ref mut stdout) => stdout.write_all(word.as_bytes())
516 pub fn write_plain(&mut self, s: &str) -> old_io::IoResult<()> {
518 Pretty(ref mut term) => term.write_all(s.as_bytes()),
519 Raw(ref mut stdout) => stdout.write_all(s.as_bytes())
523 pub fn write_run_start(&mut self, len: uint) -> old_io::IoResult<()> {
525 let noun = if len != 1 { "tests" } else { "test" };
526 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
529 pub fn write_test_start(&mut self, test: &TestDesc,
530 align: NamePadding) -> old_io::IoResult<()> {
531 let name = test.padded_name(self.max_name_len, align);
532 self.write_plain(format!("test {} ... ", name).as_slice())
535 pub fn write_result(&mut self, result: &TestResult) -> old_io::IoResult<()> {
537 TrOk => self.write_ok(),
538 TrFailed => self.write_failed(),
539 TrIgnored => self.write_ignored(),
540 TrMetrics(ref mm) => {
541 try!(self.write_metric());
542 self.write_plain(format!(": {}", mm.fmt_metrics()).as_slice())
545 try!(self.write_bench());
547 try!(self.write_plain(format!(": {}",
548 fmt_bench_samples(bs)).as_slice()));
553 self.write_plain("\n")
556 pub fn write_log(&mut self, test: &TestDesc,
557 result: &TestResult) -> old_io::IoResult<()> {
561 let s = format!("{} {}\n", match *result {
562 TrOk => "ok".to_string(),
563 TrFailed => "failed".to_string(),
564 TrIgnored => "ignored".to_string(),
565 TrMetrics(ref mm) => mm.fmt_metrics(),
566 TrBench(ref bs) => fmt_bench_samples(bs)
567 }, test.name.as_slice());
568 o.write_all(s.as_bytes())
573 pub fn write_failures(&mut self) -> old_io::IoResult<()> {
574 try!(self.write_plain("\nfailures:\n"));
575 let mut failures = Vec::new();
576 let mut fail_out = String::new();
577 for &(ref f, ref stdout) in &self.failures {
578 failures.push(f.name.to_string());
579 if stdout.len() > 0 {
580 fail_out.push_str(format!("---- {} stdout ----\n\t",
581 f.name.as_slice()).as_slice());
582 let output = String::from_utf8_lossy(stdout.as_slice());
583 fail_out.push_str(output.as_slice());
584 fail_out.push_str("\n");
587 if fail_out.len() > 0 {
588 try!(self.write_plain("\n"));
589 try!(self.write_plain(fail_out.as_slice()));
592 try!(self.write_plain("\nfailures:\n"));
594 for name in &failures {
595 try!(self.write_plain(format!(" {}\n",
596 name.as_slice()).as_slice()));
601 pub fn write_run_finish(&mut self) -> old_io::IoResult<bool> {
602 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
604 let success = self.failed == 0u;
606 try!(self.write_failures());
609 try!(self.write_plain("\ntest result: "));
611 // There's no parallelism at this point so it's safe to use color
612 try!(self.write_ok());
614 try!(self.write_failed());
616 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
617 self.passed, self.failed, self.ignored, self.measured);
618 try!(self.write_plain(s.as_slice()));
623 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
625 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
626 bs.ns_iter_summ.median as uint,
627 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
630 format!("{:>9} ns/iter (+/- {})",
631 bs.ns_iter_summ.median as uint,
632 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
636 // A simple console test runner
637 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> old_io::IoResult<bool> {
639 fn callback<T: Writer>(event: &TestEvent,
640 st: &mut ConsoleTestState<T>) -> old_io::IoResult<()> {
641 match (*event).clone() {
642 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
643 TeWait(ref test, padding) => st.write_test_start(test, padding),
644 TeResult(test, result, stdout) => {
645 try!(st.write_log(&test, &result));
646 try!(st.write_result(&result));
648 TrOk => st.passed += 1,
649 TrIgnored => st.ignored += 1,
651 let tname = test.name.as_slice();
652 let MetricMap(mm) = mm;
655 .insert_metric(format!("{}.{}",
664 st.metrics.insert_metric(test.name.as_slice(),
665 bs.ns_iter_summ.median,
666 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
671 st.failures.push((test, stdout));
679 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
680 fn len_if_padded(t: &TestDescAndFn) -> uint {
681 match t.testfn.padding() {
683 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
686 match tests.iter().max_by(|t|len_if_padded(*t)) {
688 let n = t.desc.name.as_slice();
689 st.max_name_len = n.len();
693 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
694 return st.write_run_finish();
698 fn should_sort_failures_before_printing_them() {
699 let test_a = TestDesc {
700 name: StaticTestName("a"),
702 should_fail: ShouldFail::No
705 let test_b = TestDesc {
706 name: StaticTestName("b"),
708 should_fail: ShouldFail::No
711 let mut st = ConsoleTestState {
713 out: Raw(Vec::new()),
721 metrics: MetricMap::new(),
722 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
725 st.write_failures().unwrap();
726 let s = match st.out {
727 Raw(ref m) => String::from_utf8_lossy(&m[]),
728 Pretty(_) => unreachable!()
731 let apos = s.find_str("a").unwrap();
732 let bpos = s.find_str("b").unwrap();
733 assert!(apos < bpos);
736 fn use_color(opts: &TestOpts) -> bool {
738 AutoColor => get_concurrency() == 1 && old_io::stdout().get_ref().isatty(),
746 TeFiltered(Vec<TestDesc> ),
747 TeWait(TestDesc, NamePadding),
748 TeResult(TestDesc, TestResult, Vec<u8> ),
751 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
754 fn run_tests<F>(opts: &TestOpts,
755 tests: Vec<TestDescAndFn> ,
756 mut callback: F) -> old_io::IoResult<()> where
757 F: FnMut(TestEvent) -> old_io::IoResult<()>,
759 let filtered_tests = filter_tests(opts, tests);
760 let filtered_descs = filtered_tests.iter()
761 .map(|t| t.desc.clone())
764 try!(callback(TeFiltered(filtered_descs)));
766 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
767 filtered_tests.into_iter().partition(|e| {
769 StaticTestFn(_) | DynTestFn(_) => true,
774 // It's tempting to just spawn all the tests at once, but since we have
775 // many tests that run in other processes we would be making a big mess.
776 let concurrency = get_concurrency();
778 let mut remaining = filtered_tests;
782 let (tx, rx) = channel::<MonitorMsg>();
784 while pending > 0 || !remaining.is_empty() {
785 while pending < concurrency && !remaining.is_empty() {
786 let test = remaining.pop().unwrap();
787 if concurrency == 1 {
788 // We are doing one test at a time so we can print the name
789 // of the test before we run it. Useful for debugging tests
790 // that hang forever.
791 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
793 run_test(opts, !opts.run_tests, test, tx.clone());
797 let (desc, result, stdout) = rx.recv().unwrap();
798 if concurrency != 1 {
799 try!(callback(TeWait(desc.clone(), PadNone)));
801 try!(callback(TeResult(desc, result, stdout)));
805 // All benchmarks run at the end, in serial.
806 // (this includes metric fns)
807 for b in filtered_benchs_and_metrics {
808 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
809 run_test(opts, !opts.run_benchmarks, b, tx.clone());
810 let (test, result, stdout) = rx.recv().unwrap();
811 try!(callback(TeResult(test, result, stdout)));
816 fn get_concurrency() -> uint {
818 match os::getenv("RUST_TEST_TASKS") {
820 let opt_n: Option<uint> = s.parse().ok();
822 Some(n) if n > 0 => n,
823 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
827 rt::default_sched_threads()
832 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
833 let mut filtered = tests;
835 // Remove tests that don't match the test filter
836 filtered = match opts.filter {
838 Some(ref filter) => {
839 filtered.into_iter().filter(|test| {
840 test.desc.name.as_slice().contains(&filter[])
845 // Maybe pull out the ignored test and unignore them
846 filtered = if !opts.run_ignored {
849 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
850 if test.desc.ignore {
851 let TestDescAndFn {desc, testfn} = test;
853 desc: TestDesc {ignore: false, ..desc},
860 filtered.into_iter().filter_map(|x| filter(x)).collect()
863 // Sort the tests alphabetically
864 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
869 pub fn run_test(opts: &TestOpts,
872 monitor_ch: Sender<MonitorMsg>) {
874 let TestDescAndFn {desc, testfn} = test;
876 if force_ignore || desc.ignore {
877 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
881 fn run_test_inner(desc: TestDesc,
882 monitor_ch: Sender<MonitorMsg>,
885 Thread::spawn(move || {
886 let (tx, rx) = channel();
887 let mut reader = ChanReader::new(rx);
888 let stdout = ChanWriter::new(tx.clone());
889 let stderr = ChanWriter::new(tx);
890 let mut cfg = thread::Builder::new().name(match desc.name {
891 DynTestName(ref name) => name.clone().to_string(),
892 StaticTestName(name) => name.to_string(),
895 drop((stdout, stderr));
897 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
898 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
901 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
902 let stdout = reader.read_to_end().unwrap().into_iter().collect();
903 let test_result = calc_result(&desc, result_guard.join());
904 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
909 DynBenchFn(bencher) => {
910 let bs = ::bench::benchmark(|harness| bencher.run(harness));
911 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
914 StaticBenchFn(benchfn) => {
915 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
916 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
920 let mut mm = MetricMap::new();
922 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
925 StaticMetricFn(f) => {
926 let mut mm = MetricMap::new();
928 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
931 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
932 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
933 Thunk::new(move|| f()))
937 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
938 match (&desc.should_fail, task_result) {
939 (&ShouldFail::No, Ok(())) |
940 (&ShouldFail::Yes(None), Err(_)) => TrOk,
941 (&ShouldFail::Yes(Some(msg)), Err(ref err))
942 if err.downcast_ref::<String>()
944 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
945 .map(|e| e.contains(msg))
946 .unwrap_or(false) => TrOk,
953 pub fn new() -> MetricMap {
954 MetricMap(BTreeMap::new())
957 /// Insert a named `value` (+/- `noise`) metric into the map. The value
958 /// must be non-negative. The `noise` indicates the uncertainty of the
959 /// metric, which doubles as the "noise range" of acceptable
960 /// pairwise-regressions on this named value, when comparing from one
961 /// metric to the next using `compare_to_old`.
963 /// If `noise` is positive, then it means this metric is of a value
964 /// you want to see grow smaller, so a change larger than `noise` in the
965 /// positive direction represents a regression.
967 /// If `noise` is negative, then it means this metric is of a value
968 /// you want to see grow larger, so a change larger than `noise` in the
969 /// negative direction represents a regression.
970 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
975 let MetricMap(ref mut map) = *self;
976 map.insert(name.to_string(), m);
979 pub fn fmt_metrics(&self) -> String {
980 let MetricMap(ref mm) = *self;
981 let v : Vec<String> = mm.iter()
982 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
983 v.value as f64, v.noise as f64))
992 /// A function that is opaque to the optimizer, to allow benchmarks to
993 /// pretend to use outputs to assist in avoiding dead-code
996 /// This function is a no-op, and does not even read from `dummy`.
997 pub fn black_box<T>(dummy: T) -> T {
998 // we need to "use" the argument in some way LLVM can't
1000 unsafe {asm!("" : : "r"(&dummy))}
1006 /// Callback for benchmark functions to run in their body.
1007 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1008 self.dur = Duration::span(|| {
1009 let k = self.iterations;
1016 pub fn ns_elapsed(&mut self) -> u64 {
1017 self.dur.num_nanoseconds().unwrap() as u64
1020 pub fn ns_per_iter(&mut self) -> u64 {
1021 if self.iterations == 0 {
1024 self.ns_elapsed() / cmp::max(self.iterations, 1)
1028 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1029 self.iterations = n;
1033 // This is a more statistics-driven benchmark algorithm
1034 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1035 // Initial bench run to get ballpark figure.
1037 self.bench_n(n, |x| f(x));
1039 // Try to estimate iter count for 1ms falling back to 1m
1040 // iterations if first run took < 1ns.
1041 if self.ns_per_iter() == 0 {
1044 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1046 // if the first run took more than 1ms we don't want to just
1047 // be left doing 0 iterations on every loop. The unfortunate
1048 // side effect of not being able to do as many runs is
1049 // automatically handled by the statistical analysis below
1050 // (i.e. larger error bars).
1051 if n == 0 { n = 1; }
1053 let mut total_run = Duration::nanoseconds(0);
1054 let samples : &mut [f64] = &mut [0.0_f64; 50];
1056 let mut summ = None;
1057 let mut summ5 = None;
1059 let loop_run = Duration::span(|| {
1061 for p in &mut *samples {
1062 self.bench_n(n, |x| f(x));
1063 *p = self.ns_per_iter() as f64;
1066 stats::winsorize(samples, 5.0);
1067 summ = Some(stats::Summary::new(samples));
1069 for p in &mut *samples {
1070 self.bench_n(5 * n, |x| f(x));
1071 *p = self.ns_per_iter() as f64;
1074 stats::winsorize(samples, 5.0);
1075 summ5 = Some(stats::Summary::new(samples));
1077 let summ = summ.unwrap();
1078 let summ5 = summ5.unwrap();
1080 // If we've run for 100ms and seem to have converged to a
1082 if loop_run.num_milliseconds() > 100 &&
1083 summ.median_abs_dev_pct < 1.0 &&
1084 summ.median - summ5.median < summ5.median_abs_dev {
1088 total_run = total_run + loop_run;
1089 // Longest we ever run for is 3s.
1090 if total_run.num_seconds() > 3 {
1101 use std::time::Duration;
1102 use super::{Bencher, BenchSamples};
1104 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1105 let mut bs = Bencher {
1107 dur: Duration::nanoseconds(0),
1111 let ns_iter_summ = bs.auto_bench(f);
1113 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1114 let iter_s = 1_000_000_000 / ns_iter;
1115 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1118 ns_iter_summ: ns_iter_summ,
1126 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1127 TestDesc, TestDescAndFn, TestOpts, run_test,
1129 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1130 use std::thunk::Thunk;
1131 use std::sync::mpsc::channel;
1134 pub fn do_not_run_ignored_tests() {
1135 fn f() { panic!(); }
1136 let desc = TestDescAndFn {
1138 name: StaticTestName("whatever"),
1140 should_fail: ShouldFail::No,
1142 testfn: DynTestFn(Thunk::new(move|| f())),
1144 let (tx, rx) = channel();
1145 run_test(&TestOpts::new(), false, desc, tx);
1146 let (_, res, _) = rx.recv().unwrap();
1147 assert!(res != TrOk);
1151 pub fn ignored_tests_result_in_ignored() {
1153 let desc = TestDescAndFn {
1155 name: StaticTestName("whatever"),
1157 should_fail: ShouldFail::No,
1159 testfn: DynTestFn(Thunk::new(move|| f())),
1161 let (tx, rx) = channel();
1162 run_test(&TestOpts::new(), false, desc, tx);
1163 let (_, res, _) = rx.recv().unwrap();
1164 assert!(res == TrIgnored);
1168 fn test_should_fail() {
1169 fn f() { panic!(); }
1170 let desc = TestDescAndFn {
1172 name: StaticTestName("whatever"),
1174 should_fail: ShouldFail::Yes(None)
1176 testfn: DynTestFn(Thunk::new(move|| f())),
1178 let (tx, rx) = channel();
1179 run_test(&TestOpts::new(), false, desc, tx);
1180 let (_, res, _) = rx.recv().unwrap();
1181 assert!(res == TrOk);
1185 fn test_should_fail_good_message() {
1186 fn f() { panic!("an error message"); }
1187 let desc = TestDescAndFn {
1189 name: StaticTestName("whatever"),
1191 should_fail: ShouldFail::Yes(Some("error message"))
1193 testfn: DynTestFn(Thunk::new(move|| f())),
1195 let (tx, rx) = channel();
1196 run_test(&TestOpts::new(), false, desc, tx);
1197 let (_, res, _) = rx.recv().unwrap();
1198 assert!(res == TrOk);
1202 fn test_should_fail_bad_message() {
1203 fn f() { panic!("an error message"); }
1204 let desc = TestDescAndFn {
1206 name: StaticTestName("whatever"),
1208 should_fail: ShouldFail::Yes(Some("foobar"))
1210 testfn: DynTestFn(Thunk::new(move|| f())),
1212 let (tx, rx) = channel();
1213 run_test(&TestOpts::new(), false, desc, tx);
1214 let (_, res, _) = rx.recv().unwrap();
1215 assert!(res == TrFailed);
1219 fn test_should_fail_but_succeeds() {
1221 let desc = TestDescAndFn {
1223 name: StaticTestName("whatever"),
1225 should_fail: ShouldFail::Yes(None)
1227 testfn: DynTestFn(Thunk::new(move|| f())),
1229 let (tx, rx) = channel();
1230 run_test(&TestOpts::new(), false, desc, tx);
1231 let (_, res, _) = rx.recv().unwrap();
1232 assert!(res == TrFailed);
1236 fn parse_ignored_flag() {
1237 let args = vec!("progname".to_string(),
1238 "filter".to_string(),
1239 "--ignored".to_string());
1240 let opts = match parse_opts(args.as_slice()) {
1242 _ => panic!("Malformed arg in parse_ignored_flag")
1244 assert!((opts.run_ignored));
1248 pub fn filter_for_ignored_option() {
1249 // When we run ignored tests the test filter should filter out all the
1250 // unignored tests and flip the ignore flag on the rest to false
1252 let mut opts = TestOpts::new();
1253 opts.run_tests = true;
1254 opts.run_ignored = true;
1259 name: StaticTestName("1"),
1261 should_fail: ShouldFail::No,
1263 testfn: DynTestFn(Thunk::new(move|| {})),
1267 name: StaticTestName("2"),
1269 should_fail: ShouldFail::No,
1271 testfn: DynTestFn(Thunk::new(move|| {})),
1273 let filtered = filter_tests(&opts, tests);
1275 assert_eq!(filtered.len(), 1);
1276 assert_eq!(filtered[0].desc.name.to_string(),
1278 assert!(filtered[0].desc.ignore == false);
1282 pub fn sort_tests() {
1283 let mut opts = TestOpts::new();
1284 opts.run_tests = true;
1287 vec!("sha1::test".to_string(),
1288 "int::test_to_str".to_string(),
1289 "int::test_pow".to_string(),
1290 "test::do_not_run_ignored_tests".to_string(),
1291 "test::ignored_tests_result_in_ignored".to_string(),
1292 "test::first_free_arg_should_be_a_filter".to_string(),
1293 "test::parse_ignored_flag".to_string(),
1294 "test::filter_for_ignored_option".to_string(),
1295 "test::sort_tests".to_string());
1299 let mut tests = Vec::new();
1300 for name in &names {
1301 let test = TestDescAndFn {
1303 name: DynTestName((*name).clone()),
1305 should_fail: ShouldFail::No,
1307 testfn: DynTestFn(Thunk::new(testfn)),
1313 let filtered = filter_tests(&opts, tests);
1316 vec!("int::test_pow".to_string(),
1317 "int::test_to_str".to_string(),
1318 "sha1::test".to_string(),
1319 "test::do_not_run_ignored_tests".to_string(),
1320 "test::filter_for_ignored_option".to_string(),
1321 "test::first_free_arg_should_be_a_filter".to_string(),
1322 "test::ignored_tests_result_in_ignored".to_string(),
1323 "test::parse_ignored_flag".to_string(),
1324 "test::sort_tests".to_string());
1326 for (a, b) in expected.iter().zip(filtered.iter()) {
1327 assert!(*a == b.desc.name.to_string());
1332 pub fn test_metricmap_compare() {
1333 let mut m1 = MetricMap::new();
1334 let mut m2 = MetricMap::new();
1335 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1336 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1338 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1339 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1341 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1342 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1344 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1345 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1347 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1348 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1350 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1351 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);