1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test")]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
35 html_root_url = "http://doc.rust-lang.org/nightly/")]
38 #![feature(box_syntax)]
39 #![feature(collections)]
41 #![feature(rustc_private)]
42 #![feature(staged_api)]
45 #![feature(set_stdio)]
48 #![cfg_attr(test, feature(old_io))]
51 extern crate serialize;
52 extern crate serialize as rustc_serialize;
56 pub use self::TestFn::*;
57 pub use self::ColorConfig::*;
58 pub use self::TestResult::*;
59 pub use self::TestName::*;
60 use self::TestEvent::*;
61 use self::NamePadding::*;
62 use self::OutputLocation::*;
65 use getopts::{OptGroup, optflag, optopt};
66 use serialize::Encodable;
68 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
72 use std::collections::BTreeMap;
76 use std::io::prelude::*;
78 use std::iter::repeat;
79 use std::num::{Float, Int};
80 use std::path::{PathBuf};
81 use std::sync::mpsc::{channel, Sender};
82 use std::sync::{Arc, Mutex};
84 use std::thunk::{Thunk, Invoke};
85 use std::time::Duration;
87 // to be used by rustc to compile tests in libtest
89 pub use {Bencher, TestName, TestResult, TestDesc,
90 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
92 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
93 run_test, test_main, test_main_static, filter_tests,
94 parse_opts, StaticBenchFn, ShouldPanic};
99 // The name of a test. By convention this follows the rules for rust
100 // paths; i.e. it should be a series of identifiers separated by double
101 // colons. This way if some test runner wants to arrange the tests
102 // hierarchically it may.
104 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
106 StaticTestName(&'static str),
110 fn as_slice<'a>(&'a self) -> &'a str {
112 StaticTestName(s) => s,
113 DynTestName(ref s) => s
117 impl fmt::Display for TestName {
118 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
119 fmt::Display::fmt(self.as_slice(), f)
123 #[derive(Clone, Copy)]
131 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
132 let mut name = String::from_str(self.name.as_slice());
133 let fill = column_count.saturating_sub(name.len());
134 let mut pad = repeat(" ").take(fill).collect::<String>();
149 /// Represents a benchmark function.
150 pub trait TDynBenchFn {
151 fn run(&self, harness: &mut Bencher);
154 // A function that runs a test. If the function returns successfully,
155 // the test succeeds; if the function panics then the test fails. We
156 // may need to come up with a more clever definition of test in order
157 // to support isolation of tests into tasks.
160 StaticBenchFn(fn(&mut Bencher)),
161 StaticMetricFn(fn(&mut MetricMap)),
162 DynTestFn(Thunk<'static>),
163 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
164 DynBenchFn(Box<TDynBenchFn+'static>)
168 fn padding(&self) -> NamePadding {
170 &StaticTestFn(..) => PadNone,
171 &StaticBenchFn(..) => PadOnRight,
172 &StaticMetricFn(..) => PadOnRight,
173 &DynTestFn(..) => PadNone,
174 &DynMetricFn(..) => PadOnRight,
175 &DynBenchFn(..) => PadOnRight,
180 impl fmt::Debug for TestFn {
181 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
182 f.write_str(match *self {
183 StaticTestFn(..) => "StaticTestFn(..)",
184 StaticBenchFn(..) => "StaticBenchFn(..)",
185 StaticMetricFn(..) => "StaticMetricFn(..)",
186 DynTestFn(..) => "DynTestFn(..)",
187 DynMetricFn(..) => "DynMetricFn(..)",
188 DynBenchFn(..) => "DynBenchFn(..)"
193 /// Manager of the benchmarking runs.
195 /// This is fed into functions marked with `#[bench]` to allow for
196 /// set-up & tear-down before running a piece of code repeatedly via a
205 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
206 pub enum ShouldPanic {
208 Yes(Option<&'static str>)
211 // The definition of a single test. A test runner will run a list of
213 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
214 pub struct TestDesc {
217 pub should_panic: ShouldPanic,
220 unsafe impl Send for TestDesc {}
223 pub struct TestDescAndFn {
228 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
235 pub fn new(value: f64, noise: f64) -> Metric {
236 Metric {value: value, noise: noise}
241 pub struct MetricMap(BTreeMap<String,Metric>);
243 impl Clone for MetricMap {
244 fn clone(&self) -> MetricMap {
245 let MetricMap(ref map) = *self;
246 MetricMap(map.clone())
250 // The default console test runner. It accepts the command line
251 // arguments and a vector of test_descs.
252 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
254 match parse_opts(args) {
256 Some(Err(msg)) => panic!("{:?}", msg),
259 match run_tests_console(&opts, tests) {
261 Ok(false) => panic!("Some tests failed"),
262 Err(e) => panic!("io error when running tests: {:?}", e),
266 // A variant optimized for invocation with a static test vector.
267 // This will panic (intentionally) when fed any dynamic tests, because
268 // it is copying the static values out into a dynamic vector and cannot
269 // copy dynamic values. It is doing this because from this point on
270 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
271 // semantics into parallel test runners, which in turn requires a ~[]
272 // rather than a &[].
273 pub fn test_main_static(args: env::Args, tests: &[TestDescAndFn]) {
274 let args = args.collect::<Vec<_>>();
275 let owned_tests = tests.iter().map(|t| {
277 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
278 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
279 _ => panic!("non-static tests passed to test::test_main_static")
282 test_main(&args, owned_tests)
286 pub enum ColorConfig {
292 pub struct TestOpts {
293 pub filter: Option<String>,
294 pub run_ignored: bool,
296 pub run_benchmarks: bool,
297 pub logfile: Option<PathBuf>,
299 pub color: ColorConfig,
304 fn new() -> TestOpts {
309 run_benchmarks: false,
317 /// Result of parsing the options.
318 pub type OptRes = Result<TestOpts, String>;
320 fn optgroups() -> Vec<getopts::OptGroup> {
321 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
322 getopts::optflag("", "test", "Run tests and not benchmarks"),
323 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
324 getopts::optflag("h", "help", "Display this message (longer with --help)"),
325 getopts::optopt("", "logfile", "Write logs to the specified file instead \
327 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
328 task, allow printing directly"),
329 getopts::optopt("", "color", "Configure coloring of output:
330 auto = colorize if stdout is a tty and tests are run on serially (default);
331 always = always colorize output;
332 never = never colorize output;", "auto|always|never"))
335 fn usage(binary: &str) {
336 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
339 The FILTER regex is tested against the name of all tests to run, and
340 only those tests that match are run.
342 By default, all tests are run in parallel. This can be altered with the
343 RUST_TEST_THRADS environment variable when running tests (set it to 1).
345 All tests have their standard output and standard error captured by default.
346 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
347 environment variable. Logging is not captured by default.
351 #[test] - Indicates a function is a test to be run. This function
353 #[bench] - Indicates a function is a benchmark to be run. This
354 function takes one argument (test::Bencher).
355 #[should_panic] - This function (also labeled with #[test]) will only pass if
356 the code causes a panic (an assertion failure or panic!)
357 A message may be provided, which the failure string must
358 contain: #[should_panic(expected = "foo")].
359 #[ignore] - When applied to a function which is already attributed as a
360 test, then the test runner will ignore these tests during
361 normal test runs. Running with --ignored will run these
363 usage = getopts::usage(&message, &optgroups()));
366 // Parses command line arguments into test options
367 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
368 let args_ = args.tail();
370 match getopts::getopts(args_, &optgroups()) {
372 Err(f) => return Some(Err(f.to_string()))
375 if matches.opt_present("h") { usage(&args[0]); return None; }
377 let filter = if matches.free.len() > 0 {
378 Some(matches.free[0].clone())
383 let run_ignored = matches.opt_present("ignored");
385 let logfile = matches.opt_str("logfile");
386 let logfile = logfile.map(|s| PathBuf::from(&s));
388 let run_benchmarks = matches.opt_present("bench");
389 let run_tests = ! run_benchmarks ||
390 matches.opt_present("test");
392 let mut nocapture = matches.opt_present("nocapture");
394 nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
397 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
398 Some("auto") | None => AutoColor,
399 Some("always") => AlwaysColor,
400 Some("never") => NeverColor,
402 Some(v) => return Some(Err(format!("argument for --color must be \
403 auto, always, or never (was {})",
407 let test_opts = TestOpts {
409 run_ignored: run_ignored,
410 run_tests: run_tests,
411 run_benchmarks: run_benchmarks,
413 nocapture: nocapture,
420 #[derive(Clone, PartialEq)]
421 pub struct BenchSamples {
422 ns_iter_summ: stats::Summary<f64>,
426 #[derive(Clone, PartialEq)]
427 pub enum TestResult {
431 TrMetrics(MetricMap),
432 TrBench(BenchSamples),
435 unsafe impl Send for TestResult {}
437 enum OutputLocation<T> {
438 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
442 struct ConsoleTestState<T> {
443 log_out: Option<File>,
444 out: OutputLocation<T>,
452 failures: Vec<(TestDesc, Vec<u8> )> ,
453 max_name_len: usize, // number of columns to fill when aligning names
456 impl<T: Write> ConsoleTestState<T> {
457 pub fn new(opts: &TestOpts,
458 _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
459 let log_out = match opts.logfile {
460 Some(ref path) => Some(try!(File::create(path))),
463 let out = match term::stdout() {
464 None => Raw(io::stdout()),
468 Ok(ConsoleTestState {
471 use_color: use_color(opts),
477 metrics: MetricMap::new(),
478 failures: Vec::new(),
483 pub fn write_ok(&mut self) -> io::Result<()> {
484 self.write_pretty("ok", term::color::GREEN)
487 pub fn write_failed(&mut self) -> io::Result<()> {
488 self.write_pretty("FAILED", term::color::RED)
491 pub fn write_ignored(&mut self) -> io::Result<()> {
492 self.write_pretty("ignored", term::color::YELLOW)
495 pub fn write_metric(&mut self) -> io::Result<()> {
496 self.write_pretty("metric", term::color::CYAN)
499 pub fn write_bench(&mut self) -> io::Result<()> {
500 self.write_pretty("bench", term::color::CYAN)
503 pub fn write_pretty(&mut self,
505 color: term::color::Color) -> io::Result<()> {
507 Pretty(ref mut term) => {
509 try!(term.fg(color));
511 try!(term.write_all(word.as_bytes()));
517 Raw(ref mut stdout) => {
518 try!(stdout.write_all(word.as_bytes()));
524 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
526 Pretty(ref mut term) => {
527 try!(term.write_all(s.as_bytes()));
530 Raw(ref mut stdout) => {
531 try!(stdout.write_all(s.as_bytes()));
537 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
539 let noun = if len != 1 { "tests" } else { "test" };
540 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
543 pub fn write_test_start(&mut self, test: &TestDesc,
544 align: NamePadding) -> io::Result<()> {
545 let name = test.padded_name(self.max_name_len, align);
546 self.write_plain(&format!("test {} ... ", name))
549 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
551 TrOk => self.write_ok(),
552 TrFailed => self.write_failed(),
553 TrIgnored => self.write_ignored(),
554 TrMetrics(ref mm) => {
555 try!(self.write_metric());
556 self.write_plain(&format!(": {}", mm.fmt_metrics()))
559 try!(self.write_bench());
561 try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
566 self.write_plain("\n")
569 pub fn write_log(&mut self, test: &TestDesc,
570 result: &TestResult) -> io::Result<()> {
574 let s = format!("{} {}\n", match *result {
575 TrOk => "ok".to_string(),
576 TrFailed => "failed".to_string(),
577 TrIgnored => "ignored".to_string(),
578 TrMetrics(ref mm) => mm.fmt_metrics(),
579 TrBench(ref bs) => fmt_bench_samples(bs)
581 o.write_all(s.as_bytes())
586 pub fn write_failures(&mut self) -> io::Result<()> {
587 try!(self.write_plain("\nfailures:\n"));
588 let mut failures = Vec::new();
589 let mut fail_out = String::new();
590 for &(ref f, ref stdout) in &self.failures {
591 failures.push(f.name.to_string());
592 if stdout.len() > 0 {
593 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
594 let output = String::from_utf8_lossy(stdout);
595 fail_out.push_str(&output);
596 fail_out.push_str("\n");
599 if fail_out.len() > 0 {
600 try!(self.write_plain("\n"));
601 try!(self.write_plain(&fail_out));
604 try!(self.write_plain("\nfailures:\n"));
606 for name in &failures {
607 try!(self.write_plain(&format!(" {}\n", name)));
612 pub fn write_run_finish(&mut self) -> io::Result<bool> {
613 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
615 let success = self.failed == 0;
617 try!(self.write_failures());
620 try!(self.write_plain("\ntest result: "));
622 // There's no parallelism at this point so it's safe to use color
623 try!(self.write_ok());
625 try!(self.write_failed());
627 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
628 self.passed, self.failed, self.ignored, self.measured);
629 try!(self.write_plain(&s));
634 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
636 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
637 bs.ns_iter_summ.median as usize,
638 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize,
641 format!("{:>9} ns/iter (+/- {})",
642 bs.ns_iter_summ.median as usize,
643 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize)
647 // A simple console test runner
648 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Result<bool> {
650 fn callback<T: Write>(event: &TestEvent,
651 st: &mut ConsoleTestState<T>) -> io::Result<()> {
652 match (*event).clone() {
653 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
654 TeWait(ref test, padding) => st.write_test_start(test, padding),
655 TeResult(test, result, stdout) => {
656 try!(st.write_log(&test, &result));
657 try!(st.write_result(&result));
659 TrOk => st.passed += 1,
660 TrIgnored => st.ignored += 1,
662 let tname = test.name;
663 let MetricMap(mm) = mm;
666 .insert_metric(&format!("{}.{}",
675 st.metrics.insert_metric(test.name.as_slice(),
676 bs.ns_iter_summ.median,
677 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
682 st.failures.push((test, stdout));
690 let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
691 fn len_if_padded(t: &TestDescAndFn) -> usize {
692 match t.testfn.padding() {
694 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
697 match tests.iter().max_by(|t|len_if_padded(*t)) {
699 let n = t.desc.name.as_slice();
700 st.max_name_len = n.len();
704 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
705 return st.write_run_finish();
709 fn should_sort_failures_before_printing_them() {
710 let test_a = TestDesc {
711 name: StaticTestName("a"),
713 should_panic: ShouldPanic::No
716 let test_b = TestDesc {
717 name: StaticTestName("b"),
719 should_panic: ShouldPanic::No
722 let mut st = ConsoleTestState {
724 out: Raw(Vec::new()),
732 metrics: MetricMap::new(),
733 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
736 st.write_failures().unwrap();
737 let s = match st.out {
738 Raw(ref m) => String::from_utf8_lossy(&m[..]),
739 Pretty(_) => unreachable!()
742 let apos = s.find("a").unwrap();
743 let bpos = s.find("b").unwrap();
744 assert!(apos < bpos);
747 fn use_color(opts: &TestOpts) -> bool {
749 AutoColor => get_concurrency() == 1 && stdout_isatty(),
756 fn stdout_isatty() -> bool {
757 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
760 fn stdout_isatty() -> bool {
761 const STD_OUTPUT_HANDLE: libc::DWORD = -11;
763 fn GetStdHandle(which: libc::DWORD) -> libc::HANDLE;
764 fn GetConsoleMode(hConsoleHandle: libc::HANDLE,
765 lpMode: libc::LPDWORD) -> libc::BOOL;
768 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
770 GetConsoleMode(handle, &mut out) != 0
776 TeFiltered(Vec<TestDesc> ),
777 TeWait(TestDesc, NamePadding),
778 TeResult(TestDesc, TestResult, Vec<u8> ),
781 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
784 fn run_tests<F>(opts: &TestOpts,
785 tests: Vec<TestDescAndFn> ,
786 mut callback: F) -> io::Result<()> where
787 F: FnMut(TestEvent) -> io::Result<()>,
789 let filtered_tests = filter_tests(opts, tests);
790 let filtered_descs = filtered_tests.iter()
791 .map(|t| t.desc.clone())
794 try!(callback(TeFiltered(filtered_descs)));
796 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
797 filtered_tests.into_iter().partition(|e| {
799 StaticTestFn(_) | DynTestFn(_) => true,
804 // It's tempting to just spawn all the tests at once, but since we have
805 // many tests that run in other processes we would be making a big mess.
806 let concurrency = get_concurrency();
808 let mut remaining = filtered_tests;
812 let (tx, rx) = channel::<MonitorMsg>();
814 while pending > 0 || !remaining.is_empty() {
815 while pending < concurrency && !remaining.is_empty() {
816 let test = remaining.pop().unwrap();
817 if concurrency == 1 {
818 // We are doing one test at a time so we can print the name
819 // of the test before we run it. Useful for debugging tests
820 // that hang forever.
821 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
823 run_test(opts, !opts.run_tests, test, tx.clone());
827 let (desc, result, stdout) = rx.recv().unwrap();
828 if concurrency != 1 {
829 try!(callback(TeWait(desc.clone(), PadNone)));
831 try!(callback(TeResult(desc, result, stdout)));
835 // All benchmarks run at the end, in serial.
836 // (this includes metric fns)
837 for b in filtered_benchs_and_metrics {
838 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
839 run_test(opts, !opts.run_benchmarks, b, tx.clone());
840 let (test, result, stdout) = rx.recv().unwrap();
841 try!(callback(TeResult(test, result, stdout)));
847 fn get_concurrency() -> usize {
848 match env::var("RUST_TEST_THREADS") {
850 let opt_n: Option<usize> = s.parse().ok();
852 Some(n) if n > 0 => n,
853 _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
857 if std::rt::util::limit_thread_creation_due_to_osx_and_valgrind() {
866 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
867 let mut filtered = tests;
869 // Remove tests that don't match the test filter
870 filtered = match opts.filter {
872 Some(ref filter) => {
873 filtered.into_iter().filter(|test| {
874 test.desc.name.as_slice().contains(&filter[..])
879 // Maybe pull out the ignored test and unignore them
880 filtered = if !opts.run_ignored {
883 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
884 if test.desc.ignore {
885 let TestDescAndFn {desc, testfn} = test;
887 desc: TestDesc {ignore: false, ..desc},
894 filtered.into_iter().filter_map(|x| filter(x)).collect()
897 // Sort the tests alphabetically
898 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
903 pub fn run_test(opts: &TestOpts,
906 monitor_ch: Sender<MonitorMsg>) {
908 let TestDescAndFn {desc, testfn} = test;
910 if force_ignore || desc.ignore {
911 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
915 fn run_test_inner(desc: TestDesc,
916 monitor_ch: Sender<MonitorMsg>,
918 testfn: Thunk<'static>) {
919 struct Sink(Arc<Mutex<Vec<u8>>>);
920 impl Write for Sink {
921 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
922 Write::write(&mut *self.0.lock().unwrap(), data)
924 fn flush(&mut self) -> io::Result<()> { Ok(()) }
927 thread::spawn(move || {
928 let data = Arc::new(Mutex::new(Vec::new()));
929 let data2 = data.clone();
930 let cfg = thread::Builder::new().name(match desc.name {
931 DynTestName(ref name) => name.clone().to_string(),
932 StaticTestName(name) => name.to_string(),
935 let result_guard = cfg.spawn(move || {
937 io::set_print(box Sink(data2.clone()));
938 io::set_panic(box Sink(data2));
942 let test_result = calc_result(&desc, result_guard.join());
943 let stdout = data.lock().unwrap().to_vec();
944 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
949 DynBenchFn(bencher) => {
950 let bs = ::bench::benchmark(|harness| bencher.run(harness));
951 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
954 StaticBenchFn(benchfn) => {
955 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
956 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
960 let mut mm = MetricMap::new();
962 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
965 StaticMetricFn(f) => {
966 let mut mm = MetricMap::new();
968 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
971 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
972 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
973 Thunk::new(move|| f()))
977 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
978 match (&desc.should_panic, task_result) {
979 (&ShouldPanic::No, Ok(())) |
980 (&ShouldPanic::Yes(None), Err(_)) => TrOk,
981 (&ShouldPanic::Yes(Some(msg)), Err(ref err))
982 if err.downcast_ref::<String>()
984 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
985 .map(|e| e.contains(msg))
986 .unwrap_or(false) => TrOk,
993 pub fn new() -> MetricMap {
994 MetricMap(BTreeMap::new())
997 /// Insert a named `value` (+/- `noise`) metric into the map. The value
998 /// must be non-negative. The `noise` indicates the uncertainty of the
999 /// metric, which doubles as the "noise range" of acceptable
1000 /// pairwise-regressions on this named value, when comparing from one
1001 /// metric to the next using `compare_to_old`.
1003 /// If `noise` is positive, then it means this metric is of a value
1004 /// you want to see grow smaller, so a change larger than `noise` in the
1005 /// positive direction represents a regression.
1007 /// If `noise` is negative, then it means this metric is of a value
1008 /// you want to see grow larger, so a change larger than `noise` in the
1009 /// negative direction represents a regression.
1010 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1015 let MetricMap(ref mut map) = *self;
1016 map.insert(name.to_string(), m);
1019 pub fn fmt_metrics(&self) -> String {
1020 let MetricMap(ref mm) = *self;
1021 let v : Vec<String> = mm.iter()
1022 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1032 /// A function that is opaque to the optimizer, to allow benchmarks to
1033 /// pretend to use outputs to assist in avoiding dead-code
1036 /// This function is a no-op, and does not even read from `dummy`.
1037 pub fn black_box<T>(dummy: T) -> T {
1038 // we need to "use" the argument in some way LLVM can't
1040 unsafe {asm!("" : : "r"(&dummy))}
1046 /// Callback for benchmark functions to run in their body.
1047 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1048 self.dur = Duration::span(|| {
1049 let k = self.iterations;
1056 pub fn ns_elapsed(&mut self) -> u64 {
1057 self.dur.num_nanoseconds().unwrap() as u64
1060 pub fn ns_per_iter(&mut self) -> u64 {
1061 if self.iterations == 0 {
1064 self.ns_elapsed() / cmp::max(self.iterations, 1)
1068 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1069 self.iterations = n;
1073 // This is a more statistics-driven benchmark algorithm
1074 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1075 // Initial bench run to get ballpark figure.
1077 self.bench_n(n, |x| f(x));
1079 // Try to estimate iter count for 1ms falling back to 1m
1080 // iterations if first run took < 1ns.
1081 if self.ns_per_iter() == 0 {
1084 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1086 // if the first run took more than 1ms we don't want to just
1087 // be left doing 0 iterations on every loop. The unfortunate
1088 // side effect of not being able to do as many runs is
1089 // automatically handled by the statistical analysis below
1090 // (i.e. larger error bars).
1091 if n == 0 { n = 1; }
1093 let mut total_run = Duration::nanoseconds(0);
1094 let samples : &mut [f64] = &mut [0.0_f64; 50];
1096 let mut summ = None;
1097 let mut summ5 = None;
1099 let loop_run = Duration::span(|| {
1101 for p in &mut *samples {
1102 self.bench_n(n, |x| f(x));
1103 *p = self.ns_per_iter() as f64;
1106 stats::winsorize(samples, 5.0);
1107 summ = Some(stats::Summary::new(samples));
1109 for p in &mut *samples {
1110 self.bench_n(5 * n, |x| f(x));
1111 *p = self.ns_per_iter() as f64;
1114 stats::winsorize(samples, 5.0);
1115 summ5 = Some(stats::Summary::new(samples));
1117 let summ = summ.unwrap();
1118 let summ5 = summ5.unwrap();
1120 // If we've run for 100ms and seem to have converged to a
1122 if loop_run.num_milliseconds() > 100 &&
1123 summ.median_abs_dev_pct < 1.0 &&
1124 summ.median - summ5.median < summ5.median_abs_dev {
1128 total_run = total_run + loop_run;
1129 // Longest we ever run for is 3s.
1130 if total_run.num_seconds() > 3 {
1134 // If we overflow here just return the results so far. We check a
1135 // multiplier of 10 because we're about to multiply by 2 and the
1136 // next iteration of the loop will also multiply by 5 (to calculate
1137 // the summ5 result)
1138 n = match n.checked_mul(10) {
1140 None => return summ5,
1148 use std::time::Duration;
1149 use super::{Bencher, BenchSamples};
1151 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1152 let mut bs = Bencher {
1154 dur: Duration::nanoseconds(0),
1158 let ns_iter_summ = bs.auto_bench(f);
1160 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1161 let iter_s = 1_000_000_000 / ns_iter;
1162 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1165 ns_iter_summ: ns_iter_summ,
1173 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1174 TestDesc, TestDescAndFn, TestOpts, run_test,
1176 StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1177 use std::thunk::Thunk;
1178 use std::sync::mpsc::channel;
1181 pub fn do_not_run_ignored_tests() {
1182 fn f() { panic!(); }
1183 let desc = TestDescAndFn {
1185 name: StaticTestName("whatever"),
1187 should_panic: ShouldPanic::No,
1189 testfn: DynTestFn(Thunk::new(move|| f())),
1191 let (tx, rx) = channel();
1192 run_test(&TestOpts::new(), false, desc, tx);
1193 let (_, res, _) = rx.recv().unwrap();
1194 assert!(res != TrOk);
1198 pub fn ignored_tests_result_in_ignored() {
1200 let desc = TestDescAndFn {
1202 name: StaticTestName("whatever"),
1204 should_panic: ShouldPanic::No,
1206 testfn: DynTestFn(Thunk::new(move|| f())),
1208 let (tx, rx) = channel();
1209 run_test(&TestOpts::new(), false, desc, tx);
1210 let (_, res, _) = rx.recv().unwrap();
1211 assert!(res == TrIgnored);
1215 fn test_should_panic() {
1216 fn f() { panic!(); }
1217 let desc = TestDescAndFn {
1219 name: StaticTestName("whatever"),
1221 should_panic: ShouldPanic::Yes(None)
1223 testfn: DynTestFn(Thunk::new(move|| f())),
1225 let (tx, rx) = channel();
1226 run_test(&TestOpts::new(), false, desc, tx);
1227 let (_, res, _) = rx.recv().unwrap();
1228 assert!(res == TrOk);
1232 fn test_should_panic_good_message() {
1233 fn f() { panic!("an error message"); }
1234 let desc = TestDescAndFn {
1236 name: StaticTestName("whatever"),
1238 should_panic: ShouldPanic::Yes(Some("error message"))
1240 testfn: DynTestFn(Thunk::new(move|| f())),
1242 let (tx, rx) = channel();
1243 run_test(&TestOpts::new(), false, desc, tx);
1244 let (_, res, _) = rx.recv().unwrap();
1245 assert!(res == TrOk);
1249 fn test_should_panic_bad_message() {
1250 fn f() { panic!("an error message"); }
1251 let desc = TestDescAndFn {
1253 name: StaticTestName("whatever"),
1255 should_panic: ShouldPanic::Yes(Some("foobar"))
1257 testfn: DynTestFn(Thunk::new(move|| f())),
1259 let (tx, rx) = channel();
1260 run_test(&TestOpts::new(), false, desc, tx);
1261 let (_, res, _) = rx.recv().unwrap();
1262 assert!(res == TrFailed);
1266 fn test_should_panic_but_succeeds() {
1268 let desc = TestDescAndFn {
1270 name: StaticTestName("whatever"),
1272 should_panic: ShouldPanic::Yes(None)
1274 testfn: DynTestFn(Thunk::new(move|| f())),
1276 let (tx, rx) = channel();
1277 run_test(&TestOpts::new(), false, desc, tx);
1278 let (_, res, _) = rx.recv().unwrap();
1279 assert!(res == TrFailed);
1283 fn parse_ignored_flag() {
1284 let args = vec!("progname".to_string(),
1285 "filter".to_string(),
1286 "--ignored".to_string());
1287 let opts = match parse_opts(&args) {
1289 _ => panic!("Malformed arg in parse_ignored_flag")
1291 assert!((opts.run_ignored));
1295 pub fn filter_for_ignored_option() {
1296 // When we run ignored tests the test filter should filter out all the
1297 // unignored tests and flip the ignore flag on the rest to false
1299 let mut opts = TestOpts::new();
1300 opts.run_tests = true;
1301 opts.run_ignored = true;
1306 name: StaticTestName("1"),
1308 should_panic: ShouldPanic::No,
1310 testfn: DynTestFn(Thunk::new(move|| {})),
1314 name: StaticTestName("2"),
1316 should_panic: ShouldPanic::No,
1318 testfn: DynTestFn(Thunk::new(move|| {})),
1320 let filtered = filter_tests(&opts, tests);
1322 assert_eq!(filtered.len(), 1);
1323 assert_eq!(filtered[0].desc.name.to_string(),
1325 assert!(filtered[0].desc.ignore == false);
1329 pub fn sort_tests() {
1330 let mut opts = TestOpts::new();
1331 opts.run_tests = true;
1334 vec!("sha1::test".to_string(),
1335 "isize::test_to_str".to_string(),
1336 "isize::test_pow".to_string(),
1337 "test::do_not_run_ignored_tests".to_string(),
1338 "test::ignored_tests_result_in_ignored".to_string(),
1339 "test::first_free_arg_should_be_a_filter".to_string(),
1340 "test::parse_ignored_flag".to_string(),
1341 "test::filter_for_ignored_option".to_string(),
1342 "test::sort_tests".to_string());
1346 let mut tests = Vec::new();
1347 for name in &names {
1348 let test = TestDescAndFn {
1350 name: DynTestName((*name).clone()),
1352 should_panic: ShouldPanic::No,
1354 testfn: DynTestFn(Thunk::new(testfn)),
1360 let filtered = filter_tests(&opts, tests);
1363 vec!("isize::test_pow".to_string(),
1364 "isize::test_to_str".to_string(),
1365 "sha1::test".to_string(),
1366 "test::do_not_run_ignored_tests".to_string(),
1367 "test::filter_for_ignored_option".to_string(),
1368 "test::first_free_arg_should_be_a_filter".to_string(),
1369 "test::ignored_tests_result_in_ignored".to_string(),
1370 "test::parse_ignored_flag".to_string(),
1371 "test::sort_tests".to_string());
1373 for (a, b) in expected.iter().zip(filtered.iter()) {
1374 assert!(*a == b.desc.name.to_string());
1379 pub fn test_metricmap_compare() {
1380 let mut m1 = MetricMap::new();
1381 let mut m2 = MetricMap::new();
1382 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1383 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1385 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1386 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1388 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1389 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1391 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1392 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1394 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1395 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1397 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1398 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);