1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
34 #![allow(unknown_features)]
35 #![feature(asm, slicing_syntax)]
36 #![feature(box_syntax)]
37 #![allow(unknown_features)] #![feature(int_uint)]
41 extern crate serialize;
42 extern crate "serialize" as rustc_serialize;
45 pub use self::TestFn::*;
46 pub use self::ColorConfig::*;
47 pub use self::TestResult::*;
48 pub use self::TestName::*;
49 use self::TestEvent::*;
50 use self::NamePadding::*;
51 use self::OutputLocation::*;
54 use getopts::{OptGroup, optflag, optopt};
55 use serialize::Encodable;
57 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
61 use std::collections::BTreeMap;
63 use std::io::stdio::StdWriter;
64 use std::io::{File, ChanReader, ChanWriter};
66 use std::iter::repeat;
67 use std::num::{Float, Int};
69 use std::str::FromStr;
70 use std::sync::mpsc::{channel, Sender};
71 use std::thread::{self, Thread};
72 use std::thunk::{Thunk, Invoke};
73 use std::time::Duration;
75 // to be used by rustc to compile tests in libtest
77 pub use {Bencher, TestName, TestResult, TestDesc,
78 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
80 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
81 run_test, test_main, test_main_static, filter_tests,
82 parse_opts, StaticBenchFn, ShouldFail};
87 // The name of a test. By convention this follows the rules for rust
88 // paths; i.e. it should be a series of identifiers separated by double
89 // colons. This way if some test runner wants to arrange the tests
90 // hierarchically it may.
92 #[derive(Clone, PartialEq, Eq, Hash, Show)]
94 StaticTestName(&'static str),
98 fn as_slice<'a>(&'a self) -> &'a str {
100 StaticTestName(s) => s,
101 DynTestName(ref s) => s.as_slice()
105 impl fmt::Display for TestName {
106 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
107 fmt::Display::fmt(self.as_slice(), f)
111 #[derive(Clone, Copy)]
119 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
120 let mut name = String::from_str(self.name.as_slice());
121 let fill = column_count.saturating_sub(name.len());
122 let mut pad = repeat(" ").take(fill).collect::<String>();
126 pad.push_str(name.as_slice());
130 name.push_str(pad.as_slice());
137 /// Represents a benchmark function.
138 pub trait TDynBenchFn {
139 fn run(&self, harness: &mut Bencher);
142 // A function that runs a test. If the function returns successfully,
143 // the test succeeds; if the function panics then the test fails. We
144 // may need to come up with a more clever definition of test in order
145 // to support isolation of tests into tasks.
148 StaticBenchFn(fn(&mut Bencher)),
149 StaticMetricFn(fn(&mut MetricMap)),
151 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
152 DynBenchFn(Box<TDynBenchFn+'static>)
156 fn padding(&self) -> NamePadding {
158 &StaticTestFn(..) => PadNone,
159 &StaticBenchFn(..) => PadOnRight,
160 &StaticMetricFn(..) => PadOnRight,
161 &DynTestFn(..) => PadNone,
162 &DynMetricFn(..) => PadOnRight,
163 &DynBenchFn(..) => PadOnRight,
168 impl fmt::Debug for TestFn {
169 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
170 f.write_str(match *self {
171 StaticTestFn(..) => "StaticTestFn(..)",
172 StaticBenchFn(..) => "StaticBenchFn(..)",
173 StaticMetricFn(..) => "StaticMetricFn(..)",
174 DynTestFn(..) => "DynTestFn(..)",
175 DynMetricFn(..) => "DynMetricFn(..)",
176 DynBenchFn(..) => "DynBenchFn(..)"
181 /// Manager of the benchmarking runs.
183 /// This is fed into functions marked with `#[bench]` to allow for
184 /// set-up & tear-down before running a piece of code repeatedly via a
193 #[derive(Copy, Clone, Show, PartialEq, Eq, Hash)]
194 pub enum ShouldFail {
196 Yes(Option<&'static str>)
199 // The definition of a single test. A test runner will run a list of
201 #[derive(Clone, Show, PartialEq, Eq, Hash)]
202 pub struct TestDesc {
205 pub should_fail: ShouldFail,
208 unsafe impl Send for TestDesc {}
211 pub struct TestDescAndFn {
216 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Show, Copy)]
223 pub fn new(value: f64, noise: f64) -> Metric {
224 Metric {value: value, noise: noise}
229 pub struct MetricMap(BTreeMap<String,Metric>);
231 impl Clone for MetricMap {
232 fn clone(&self) -> MetricMap {
233 let MetricMap(ref map) = *self;
234 MetricMap(map.clone())
238 // The default console test runner. It accepts the command line
239 // arguments and a vector of test_descs.
240 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
242 match parse_opts(args) {
244 Some(Err(msg)) => panic!("{:?}", msg),
247 match run_tests_console(&opts, tests) {
249 Ok(false) => panic!("Some tests failed"),
250 Err(e) => panic!("io error when running tests: {:?}", e),
254 // A variant optimized for invocation with a static test vector.
255 // This will panic (intentionally) when fed any dynamic tests, because
256 // it is copying the static values out into a dynamic vector and cannot
257 // copy dynamic values. It is doing this because from this point on
258 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
259 // semantics into parallel test runners, which in turn requires a ~[]
260 // rather than a &[].
261 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
262 let owned_tests = tests.iter().map(|t| {
264 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
265 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
266 _ => panic!("non-static tests passed to test::test_main_static")
269 test_main(args, owned_tests)
273 pub enum ColorConfig {
279 pub struct TestOpts {
280 pub filter: Option<String>,
281 pub run_ignored: bool,
283 pub run_benchmarks: bool,
284 pub logfile: Option<Path>,
286 pub color: ColorConfig,
291 fn new() -> TestOpts {
296 run_benchmarks: false,
304 /// Result of parsing the options.
305 pub type OptRes = Result<TestOpts, String>;
307 fn optgroups() -> Vec<getopts::OptGroup> {
308 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
309 getopts::optflag("", "test", "Run tests and not benchmarks"),
310 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
311 getopts::optflag("h", "help", "Display this message (longer with --help)"),
312 getopts::optopt("", "logfile", "Write logs to the specified file instead \
314 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
315 task, allow printing directly"),
316 getopts::optopt("", "color", "Configure coloring of output:
317 auto = colorize if stdout is a tty and tests are run on serially (default);
318 always = always colorize output;
319 never = never colorize output;", "auto|always|never"))
322 fn usage(binary: &str) {
323 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
326 The FILTER regex is tested against the name of all tests to run, and
327 only those tests that match are run.
329 By default, all tests are run in parallel. This can be altered with the
330 RUST_TEST_TASKS environment variable when running tests (set it to 1).
332 All tests have their standard output and standard error captured by default.
333 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
334 environment variable. Logging is not captured by default.
338 #[test] - Indicates a function is a test to be run. This function
340 #[bench] - Indicates a function is a benchmark to be run. This
341 function takes one argument (test::Bencher).
342 #[should_fail] - This function (also labeled with #[test]) will only pass if
343 the code causes a failure (an assertion failure or panic!)
344 A message may be provided, which the failure string must
345 contain: #[should_fail(expected = "foo")].
346 #[ignore] - When applied to a function which is already attributed as a
347 test, then the test runner will ignore these tests during
348 normal test runs. Running with --ignored will run these
350 usage = getopts::usage(message.as_slice(),
351 optgroups().as_slice()));
354 // Parses command line arguments into test options
355 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
356 let args_ = args.tail();
358 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
360 Err(f) => return Some(Err(f.to_string()))
363 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
365 let filter = if matches.free.len() > 0 {
366 Some(matches.free[0].clone())
371 let run_ignored = matches.opt_present("ignored");
373 let logfile = matches.opt_str("logfile");
374 let logfile = logfile.map(|s| Path::new(s));
376 let run_benchmarks = matches.opt_present("bench");
377 let run_tests = ! run_benchmarks ||
378 matches.opt_present("test");
380 let mut nocapture = matches.opt_present("nocapture");
382 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
385 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
386 Some("auto") | None => AutoColor,
387 Some("always") => AlwaysColor,
388 Some("never") => NeverColor,
390 Some(v) => return Some(Err(format!("argument for --color must be \
391 auto, always, or never (was {})",
395 let test_opts = TestOpts {
397 run_ignored: run_ignored,
398 run_tests: run_tests,
399 run_benchmarks: run_benchmarks,
401 nocapture: nocapture,
408 #[derive(Clone, PartialEq)]
409 pub struct BenchSamples {
410 ns_iter_summ: stats::Summary<f64>,
414 #[derive(Clone, PartialEq)]
415 pub enum TestResult {
419 TrMetrics(MetricMap),
420 TrBench(BenchSamples),
423 unsafe impl Send for TestResult {}
425 enum OutputLocation<T> {
426 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
430 struct ConsoleTestState<T> {
431 log_out: Option<File>,
432 out: OutputLocation<T>,
440 failures: Vec<(TestDesc, Vec<u8> )> ,
441 max_name_len: uint, // number of columns to fill when aligning names
444 impl<T: Writer> ConsoleTestState<T> {
445 pub fn new(opts: &TestOpts,
446 _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
447 let log_out = match opts.logfile {
448 Some(ref path) => Some(try!(File::create(path))),
451 let out = match term::stdout() {
452 None => Raw(io::stdio::stdout_raw()),
456 Ok(ConsoleTestState {
459 use_color: use_color(opts),
465 metrics: MetricMap::new(),
466 failures: Vec::new(),
471 pub fn write_ok(&mut self) -> io::IoResult<()> {
472 self.write_pretty("ok", term::color::GREEN)
475 pub fn write_failed(&mut self) -> io::IoResult<()> {
476 self.write_pretty("FAILED", term::color::RED)
479 pub fn write_ignored(&mut self) -> io::IoResult<()> {
480 self.write_pretty("ignored", term::color::YELLOW)
483 pub fn write_metric(&mut self) -> io::IoResult<()> {
484 self.write_pretty("metric", term::color::CYAN)
487 pub fn write_bench(&mut self) -> io::IoResult<()> {
488 self.write_pretty("bench", term::color::CYAN)
491 pub fn write_pretty(&mut self,
493 color: term::color::Color) -> io::IoResult<()> {
495 Pretty(ref mut term) => {
497 try!(term.fg(color));
499 try!(term.write(word.as_bytes()));
505 Raw(ref mut stdout) => stdout.write(word.as_bytes())
509 pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
511 Pretty(ref mut term) => term.write(s.as_bytes()),
512 Raw(ref mut stdout) => stdout.write(s.as_bytes())
516 pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
518 let noun = if len != 1 { "tests" } else { "test" };
519 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
522 pub fn write_test_start(&mut self, test: &TestDesc,
523 align: NamePadding) -> io::IoResult<()> {
524 let name = test.padded_name(self.max_name_len, align);
525 self.write_plain(format!("test {} ... ", name).as_slice())
528 pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
530 TrOk => self.write_ok(),
531 TrFailed => self.write_failed(),
532 TrIgnored => self.write_ignored(),
533 TrMetrics(ref mm) => {
534 try!(self.write_metric());
535 self.write_plain(format!(": {}", mm.fmt_metrics()).as_slice())
538 try!(self.write_bench());
540 try!(self.write_plain(format!(": {}",
541 fmt_bench_samples(bs)).as_slice()));
546 self.write_plain("\n")
549 pub fn write_log(&mut self, test: &TestDesc,
550 result: &TestResult) -> io::IoResult<()> {
554 let s = format!("{} {}\n", match *result {
555 TrOk => "ok".to_string(),
556 TrFailed => "failed".to_string(),
557 TrIgnored => "ignored".to_string(),
558 TrMetrics(ref mm) => mm.fmt_metrics(),
559 TrBench(ref bs) => fmt_bench_samples(bs)
560 }, test.name.as_slice());
561 o.write(s.as_bytes())
566 pub fn write_failures(&mut self) -> io::IoResult<()> {
567 try!(self.write_plain("\nfailures:\n"));
568 let mut failures = Vec::new();
569 let mut fail_out = String::new();
570 for &(ref f, ref stdout) in self.failures.iter() {
571 failures.push(f.name.to_string());
572 if stdout.len() > 0 {
573 fail_out.push_str(format!("---- {} stdout ----\n\t",
574 f.name.as_slice()).as_slice());
575 let output = String::from_utf8_lossy(stdout.as_slice());
576 fail_out.push_str(output.as_slice());
577 fail_out.push_str("\n");
580 if fail_out.len() > 0 {
581 try!(self.write_plain("\n"));
582 try!(self.write_plain(fail_out.as_slice()));
585 try!(self.write_plain("\nfailures:\n"));
587 for name in failures.iter() {
588 try!(self.write_plain(format!(" {}\n",
589 name.as_slice()).as_slice()));
594 pub fn write_run_finish(&mut self) -> io::IoResult<bool> {
595 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
597 let success = self.failed == 0u;
599 try!(self.write_failures());
602 try!(self.write_plain("\ntest result: "));
604 // There's no parallelism at this point so it's safe to use color
605 try!(self.write_ok());
607 try!(self.write_failed());
609 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
610 self.passed, self.failed, self.ignored, self.measured);
611 try!(self.write_plain(s.as_slice()));
616 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
618 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
619 bs.ns_iter_summ.median as uint,
620 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
623 format!("{:>9} ns/iter (+/- {})",
624 bs.ns_iter_summ.median as uint,
625 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
629 // A simple console test runner
630 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::IoResult<bool> {
632 fn callback<T: Writer>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
633 match (*event).clone() {
634 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
635 TeWait(ref test, padding) => st.write_test_start(test, padding),
636 TeResult(test, result, stdout) => {
637 try!(st.write_log(&test, &result));
638 try!(st.write_result(&result));
640 TrOk => st.passed += 1,
641 TrIgnored => st.ignored += 1,
643 let tname = test.name.as_slice();
644 let MetricMap(mm) = mm;
645 for (k,v) in mm.iter() {
647 .insert_metric(format!("{}.{}",
656 st.metrics.insert_metric(test.name.as_slice(),
657 bs.ns_iter_summ.median,
658 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
663 st.failures.push((test, stdout));
671 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
672 fn len_if_padded(t: &TestDescAndFn) -> uint {
673 match t.testfn.padding() {
675 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
678 match tests.iter().max_by(|t|len_if_padded(*t)) {
680 let n = t.desc.name.as_slice();
681 st.max_name_len = n.len();
685 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
686 return st.write_run_finish();
690 fn should_sort_failures_before_printing_them() {
691 let test_a = TestDesc {
692 name: StaticTestName("a"),
694 should_fail: ShouldFail::No
697 let test_b = TestDesc {
698 name: StaticTestName("b"),
700 should_fail: ShouldFail::No
703 let mut st = ConsoleTestState {
705 out: Raw(Vec::new()),
713 metrics: MetricMap::new(),
714 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
717 st.write_failures().unwrap();
718 let s = match st.out {
719 Raw(ref m) => String::from_utf8_lossy(&m[]),
720 Pretty(_) => unreachable!()
723 let apos = s.find_str("a").unwrap();
724 let bpos = s.find_str("b").unwrap();
725 assert!(apos < bpos);
728 fn use_color(opts: &TestOpts) -> bool {
730 AutoColor => get_concurrency() == 1 && io::stdout().get_ref().isatty(),
738 TeFiltered(Vec<TestDesc> ),
739 TeWait(TestDesc, NamePadding),
740 TeResult(TestDesc, TestResult, Vec<u8> ),
743 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
746 fn run_tests<F>(opts: &TestOpts,
747 tests: Vec<TestDescAndFn> ,
748 mut callback: F) -> io::IoResult<()> where
749 F: FnMut(TestEvent) -> io::IoResult<()>,
751 let filtered_tests = filter_tests(opts, tests);
752 let filtered_descs = filtered_tests.iter()
753 .map(|t| t.desc.clone())
756 try!(callback(TeFiltered(filtered_descs)));
758 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
759 filtered_tests.into_iter().partition(|e| {
761 StaticTestFn(_) | DynTestFn(_) => true,
766 // It's tempting to just spawn all the tests at once, but since we have
767 // many tests that run in other processes we would be making a big mess.
768 let concurrency = get_concurrency();
770 let mut remaining = filtered_tests;
774 let (tx, rx) = channel::<MonitorMsg>();
776 while pending > 0 || !remaining.is_empty() {
777 while pending < concurrency && !remaining.is_empty() {
778 let test = remaining.pop().unwrap();
779 if concurrency == 1 {
780 // We are doing one test at a time so we can print the name
781 // of the test before we run it. Useful for debugging tests
782 // that hang forever.
783 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
785 run_test(opts, !opts.run_tests, test, tx.clone());
789 let (desc, result, stdout) = rx.recv().unwrap();
790 if concurrency != 1 {
791 try!(callback(TeWait(desc.clone(), PadNone)));
793 try!(callback(TeResult(desc, result, stdout)));
797 // All benchmarks run at the end, in serial.
798 // (this includes metric fns)
799 for b in filtered_benchs_and_metrics.into_iter() {
800 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
801 run_test(opts, !opts.run_benchmarks, b, tx.clone());
802 let (test, result, stdout) = rx.recv().unwrap();
803 try!(callback(TeResult(test, result, stdout)));
808 fn get_concurrency() -> uint {
810 match os::getenv("RUST_TEST_TASKS") {
812 let opt_n: Option<uint> = FromStr::from_str(s.as_slice());
814 Some(n) if n > 0 => n,
815 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
819 rt::default_sched_threads()
824 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
825 let mut filtered = tests;
827 // Remove tests that don't match the test filter
828 filtered = match opts.filter {
830 Some(ref filter) => {
831 filtered.into_iter().filter(|test| {
832 test.desc.name.as_slice().contains(&filter[])
837 // Maybe pull out the ignored test and unignore them
838 filtered = if !opts.run_ignored {
841 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
842 if test.desc.ignore {
843 let TestDescAndFn {desc, testfn} = test;
845 desc: TestDesc {ignore: false, ..desc},
852 filtered.into_iter().filter_map(|x| filter(x)).collect()
855 // Sort the tests alphabetically
856 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
861 pub fn run_test(opts: &TestOpts,
864 monitor_ch: Sender<MonitorMsg>) {
866 let TestDescAndFn {desc, testfn} = test;
868 if force_ignore || desc.ignore {
869 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
873 fn run_test_inner(desc: TestDesc,
874 monitor_ch: Sender<MonitorMsg>,
877 Thread::spawn(move || {
878 let (tx, rx) = channel();
879 let mut reader = ChanReader::new(rx);
880 let stdout = ChanWriter::new(tx.clone());
881 let stderr = ChanWriter::new(tx);
882 let mut cfg = thread::Builder::new().name(match desc.name {
883 DynTestName(ref name) => name.clone().to_string(),
884 StaticTestName(name) => name.to_string(),
887 drop((stdout, stderr));
889 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
890 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
893 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
894 let stdout = reader.read_to_end().unwrap().into_iter().collect();
895 let test_result = calc_result(&desc, result_guard.join());
896 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
901 DynBenchFn(bencher) => {
902 let bs = ::bench::benchmark(|harness| bencher.run(harness));
903 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
906 StaticBenchFn(benchfn) => {
907 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
908 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
912 let mut mm = MetricMap::new();
914 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
917 StaticMetricFn(f) => {
918 let mut mm = MetricMap::new();
920 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
923 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
924 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
925 Thunk::new(move|| f()))
929 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
930 match (&desc.should_fail, task_result) {
931 (&ShouldFail::No, Ok(())) |
932 (&ShouldFail::Yes(None), Err(_)) => TrOk,
933 (&ShouldFail::Yes(Some(msg)), Err(ref err))
934 if err.downcast_ref::<String>()
936 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
937 .map(|e| e.contains(msg))
938 .unwrap_or(false) => TrOk,
945 pub fn new() -> MetricMap {
946 MetricMap(BTreeMap::new())
949 /// Insert a named `value` (+/- `noise`) metric into the map. The value
950 /// must be non-negative. The `noise` indicates the uncertainty of the
951 /// metric, which doubles as the "noise range" of acceptable
952 /// pairwise-regressions on this named value, when comparing from one
953 /// metric to the next using `compare_to_old`.
955 /// If `noise` is positive, then it means this metric is of a value
956 /// you want to see grow smaller, so a change larger than `noise` in the
957 /// positive direction represents a regression.
959 /// If `noise` is negative, then it means this metric is of a value
960 /// you want to see grow larger, so a change larger than `noise` in the
961 /// negative direction represents a regression.
962 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
967 let MetricMap(ref mut map) = *self;
968 map.insert(name.to_string(), m);
971 pub fn fmt_metrics(&self) -> String {
972 let MetricMap(ref mm) = *self;
973 let v : Vec<String> = mm.iter()
974 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
975 v.value as f64, v.noise as f64))
984 /// A function that is opaque to the optimizer, to allow benchmarks to
985 /// pretend to use outputs to assist in avoiding dead-code
988 /// This function is a no-op, and does not even read from `dummy`.
989 pub fn black_box<T>(dummy: T) -> T {
990 // we need to "use" the argument in some way LLVM can't
992 unsafe {asm!("" : : "r"(&dummy))}
998 /// Callback for benchmark functions to run in their body.
999 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1000 self.dur = Duration::span(|| {
1001 let k = self.iterations;
1002 for _ in range(0u64, k) {
1008 pub fn ns_elapsed(&mut self) -> u64 {
1009 self.dur.num_nanoseconds().unwrap() as u64
1012 pub fn ns_per_iter(&mut self) -> u64 {
1013 if self.iterations == 0 {
1016 self.ns_elapsed() / cmp::max(self.iterations, 1)
1020 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1021 self.iterations = n;
1025 // This is a more statistics-driven benchmark algorithm
1026 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1027 // Initial bench run to get ballpark figure.
1029 self.bench_n(n, |x| f(x));
1031 // Try to estimate iter count for 1ms falling back to 1m
1032 // iterations if first run took < 1ns.
1033 if self.ns_per_iter() == 0 {
1036 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1038 // if the first run took more than 1ms we don't want to just
1039 // be left doing 0 iterations on every loop. The unfortunate
1040 // side effect of not being able to do as many runs is
1041 // automatically handled by the statistical analysis below
1042 // (i.e. larger error bars).
1043 if n == 0 { n = 1; }
1045 let mut total_run = Duration::nanoseconds(0);
1046 let samples : &mut [f64] = &mut [0.0_f64; 50];
1048 let mut summ = None;
1049 let mut summ5 = None;
1051 let loop_run = Duration::span(|| {
1053 for p in samples.iter_mut() {
1054 self.bench_n(n, |x| f(x));
1055 *p = self.ns_per_iter() as f64;
1058 stats::winsorize(samples, 5.0);
1059 summ = Some(stats::Summary::new(samples));
1061 for p in samples.iter_mut() {
1062 self.bench_n(5 * n, |x| f(x));
1063 *p = self.ns_per_iter() as f64;
1066 stats::winsorize(samples, 5.0);
1067 summ5 = Some(stats::Summary::new(samples));
1069 let summ = summ.unwrap();
1070 let summ5 = summ5.unwrap();
1072 // If we've run for 100ms and seem to have converged to a
1074 if loop_run.num_milliseconds() > 100 &&
1075 summ.median_abs_dev_pct < 1.0 &&
1076 summ.median - summ5.median < summ5.median_abs_dev {
1080 total_run = total_run + loop_run;
1081 // Longest we ever run for is 3s.
1082 if total_run.num_seconds() > 3 {
1093 use std::time::Duration;
1094 use super::{Bencher, BenchSamples};
1096 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1097 let mut bs = Bencher {
1099 dur: Duration::nanoseconds(0),
1103 let ns_iter_summ = bs.auto_bench(f);
1105 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1106 let iter_s = 1_000_000_000 / ns_iter;
1107 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1110 ns_iter_summ: ns_iter_summ,
1118 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1119 TestDesc, TestDescAndFn, TestOpts, run_test,
1121 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1122 use std::io::TempDir;
1123 use std::thunk::Thunk;
1124 use std::sync::mpsc::channel;
1127 pub fn do_not_run_ignored_tests() {
1128 fn f() { panic!(); }
1129 let desc = TestDescAndFn {
1131 name: StaticTestName("whatever"),
1133 should_fail: ShouldFail::No,
1135 testfn: DynTestFn(Thunk::new(move|| f())),
1137 let (tx, rx) = channel();
1138 run_test(&TestOpts::new(), false, desc, tx);
1139 let (_, res, _) = rx.recv().unwrap();
1140 assert!(res != TrOk);
1144 pub fn ignored_tests_result_in_ignored() {
1146 let desc = TestDescAndFn {
1148 name: StaticTestName("whatever"),
1150 should_fail: ShouldFail::No,
1152 testfn: DynTestFn(Thunk::new(move|| f())),
1154 let (tx, rx) = channel();
1155 run_test(&TestOpts::new(), false, desc, tx);
1156 let (_, res, _) = rx.recv().unwrap();
1157 assert!(res == TrIgnored);
1161 fn test_should_fail() {
1162 fn f() { panic!(); }
1163 let desc = TestDescAndFn {
1165 name: StaticTestName("whatever"),
1167 should_fail: ShouldFail::Yes(None)
1169 testfn: DynTestFn(Thunk::new(move|| f())),
1171 let (tx, rx) = channel();
1172 run_test(&TestOpts::new(), false, desc, tx);
1173 let (_, res, _) = rx.recv().unwrap();
1174 assert!(res == TrOk);
1178 fn test_should_fail_good_message() {
1179 fn f() { panic!("an error message"); }
1180 let desc = TestDescAndFn {
1182 name: StaticTestName("whatever"),
1184 should_fail: ShouldFail::Yes(Some("error message"))
1186 testfn: DynTestFn(Thunk::new(move|| f())),
1188 let (tx, rx) = channel();
1189 run_test(&TestOpts::new(), false, desc, tx);
1190 let (_, res, _) = rx.recv().unwrap();
1191 assert!(res == TrOk);
1195 fn test_should_fail_bad_message() {
1196 fn f() { panic!("an error message"); }
1197 let desc = TestDescAndFn {
1199 name: StaticTestName("whatever"),
1201 should_fail: ShouldFail::Yes(Some("foobar"))
1203 testfn: DynTestFn(Thunk::new(move|| f())),
1205 let (tx, rx) = channel();
1206 run_test(&TestOpts::new(), false, desc, tx);
1207 let (_, res, _) = rx.recv().unwrap();
1208 assert!(res == TrFailed);
1212 fn test_should_fail_but_succeeds() {
1214 let desc = TestDescAndFn {
1216 name: StaticTestName("whatever"),
1218 should_fail: ShouldFail::Yes(None)
1220 testfn: DynTestFn(Thunk::new(move|| f())),
1222 let (tx, rx) = channel();
1223 run_test(&TestOpts::new(), false, desc, tx);
1224 let (_, res, _) = rx.recv().unwrap();
1225 assert!(res == TrFailed);
1229 fn parse_ignored_flag() {
1230 let args = vec!("progname".to_string(),
1231 "filter".to_string(),
1232 "--ignored".to_string());
1233 let opts = match parse_opts(args.as_slice()) {
1235 _ => panic!("Malformed arg in parse_ignored_flag")
1237 assert!((opts.run_ignored));
1241 pub fn filter_for_ignored_option() {
1242 // When we run ignored tests the test filter should filter out all the
1243 // unignored tests and flip the ignore flag on the rest to false
1245 let mut opts = TestOpts::new();
1246 opts.run_tests = true;
1247 opts.run_ignored = true;
1252 name: StaticTestName("1"),
1254 should_fail: ShouldFail::No,
1256 testfn: DynTestFn(Thunk::new(move|| {})),
1260 name: StaticTestName("2"),
1262 should_fail: ShouldFail::No,
1264 testfn: DynTestFn(Thunk::new(move|| {})),
1266 let filtered = filter_tests(&opts, tests);
1268 assert_eq!(filtered.len(), 1);
1269 assert_eq!(filtered[0].desc.name.to_string(),
1271 assert!(filtered[0].desc.ignore == false);
1275 pub fn sort_tests() {
1276 let mut opts = TestOpts::new();
1277 opts.run_tests = true;
1280 vec!("sha1::test".to_string(),
1281 "int::test_to_str".to_string(),
1282 "int::test_pow".to_string(),
1283 "test::do_not_run_ignored_tests".to_string(),
1284 "test::ignored_tests_result_in_ignored".to_string(),
1285 "test::first_free_arg_should_be_a_filter".to_string(),
1286 "test::parse_ignored_flag".to_string(),
1287 "test::filter_for_ignored_option".to_string(),
1288 "test::sort_tests".to_string());
1292 let mut tests = Vec::new();
1293 for name in names.iter() {
1294 let test = TestDescAndFn {
1296 name: DynTestName((*name).clone()),
1298 should_fail: ShouldFail::No,
1300 testfn: DynTestFn(Thunk::new(testfn)),
1306 let filtered = filter_tests(&opts, tests);
1309 vec!("int::test_pow".to_string(),
1310 "int::test_to_str".to_string(),
1311 "sha1::test".to_string(),
1312 "test::do_not_run_ignored_tests".to_string(),
1313 "test::filter_for_ignored_option".to_string(),
1314 "test::first_free_arg_should_be_a_filter".to_string(),
1315 "test::ignored_tests_result_in_ignored".to_string(),
1316 "test::parse_ignored_flag".to_string(),
1317 "test::sort_tests".to_string());
1319 for (a, b) in expected.iter().zip(filtered.iter()) {
1320 assert!(*a == b.desc.name.to_string());
1325 pub fn test_metricmap_compare() {
1326 let mut m1 = MetricMap::new();
1327 let mut m2 = MetricMap::new();
1328 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1329 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1331 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1332 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1334 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1335 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1337 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1338 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1340 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1341 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1343 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1344 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);