1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test")]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
35 html_root_url = "http://doc.rust-lang.org/nightly/")]
38 #![feature(box_syntax)]
40 #![feature(duration_span)]
45 #![feature(rustc_private)]
46 #![feature(set_stdio)]
47 #![feature(slice_extras)]
48 #![feature(staged_api)]
51 extern crate serialize;
52 extern crate serialize as rustc_serialize;
56 pub use self::TestFn::*;
57 pub use self::ColorConfig::*;
58 pub use self::TestResult::*;
59 pub use self::TestName::*;
60 use self::TestEvent::*;
61 use self::NamePadding::*;
62 use self::OutputLocation::*;
65 use getopts::{OptGroup, optflag, optopt};
66 use serialize::Encodable;
67 use std::boxed::FnBox;
69 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
73 use std::collections::BTreeMap;
77 use std::io::prelude::*;
79 use std::iter::repeat;
80 use std::path::PathBuf;
81 use std::sync::mpsc::{channel, Sender};
82 use std::sync::{Arc, Mutex};
84 use std::time::Duration;
86 // to be used by rustc to compile tests in libtest
88 pub use {Bencher, TestName, TestResult, TestDesc,
89 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
91 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
92 run_test, test_main, test_main_static, filter_tests,
93 parse_opts, StaticBenchFn, ShouldPanic};
98 // The name of a test. By convention this follows the rules for rust
99 // paths; i.e. it should be a series of identifiers separated by double
100 // colons. This way if some test runner wants to arrange the tests
101 // hierarchically it may.
103 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
105 StaticTestName(&'static str),
109 fn as_slice<'a>(&'a self) -> &'a str {
111 StaticTestName(s) => s,
112 DynTestName(ref s) => s
116 impl fmt::Display for TestName {
117 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
118 fmt::Display::fmt(self.as_slice(), f)
122 #[derive(Clone, Copy)]
129 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
130 let mut name = String::from(self.name.as_slice());
131 let fill = column_count.saturating_sub(name.len());
132 let pad = repeat(" ").take(fill).collect::<String>();
143 /// Represents a benchmark function.
144 pub trait TDynBenchFn: Send {
145 fn run(&self, harness: &mut Bencher);
148 // A function that runs a test. If the function returns successfully,
149 // the test succeeds; if the function panics then the test fails. We
150 // may need to come up with a more clever definition of test in order
151 // to support isolation of tests into threads.
154 StaticBenchFn(fn(&mut Bencher)),
155 StaticMetricFn(fn(&mut MetricMap)),
156 DynTestFn(Box<FnBox() + Send>),
157 DynMetricFn(Box<FnBox(&mut MetricMap)+Send>),
158 DynBenchFn(Box<TDynBenchFn+'static>)
162 fn padding(&self) -> NamePadding {
164 &StaticTestFn(..) => PadNone,
165 &StaticBenchFn(..) => PadOnRight,
166 &StaticMetricFn(..) => PadOnRight,
167 &DynTestFn(..) => PadNone,
168 &DynMetricFn(..) => PadOnRight,
169 &DynBenchFn(..) => PadOnRight,
174 impl fmt::Debug for TestFn {
175 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
176 f.write_str(match *self {
177 StaticTestFn(..) => "StaticTestFn(..)",
178 StaticBenchFn(..) => "StaticBenchFn(..)",
179 StaticMetricFn(..) => "StaticMetricFn(..)",
180 DynTestFn(..) => "DynTestFn(..)",
181 DynMetricFn(..) => "DynMetricFn(..)",
182 DynBenchFn(..) => "DynBenchFn(..)"
187 /// Manager of the benchmarking runs.
189 /// This is fed into functions marked with `#[bench]` to allow for
190 /// set-up & tear-down before running a piece of code repeatedly via a
192 #[derive(Copy, Clone)]
199 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
200 pub enum ShouldPanic {
202 Yes(Option<&'static str>)
205 // The definition of a single test. A test runner will run a list of
207 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
208 pub struct TestDesc {
211 pub should_panic: ShouldPanic,
214 unsafe impl Send for TestDesc {}
217 pub struct TestDescAndFn {
222 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
229 pub fn new(value: f64, noise: f64) -> Metric {
230 Metric {value: value, noise: noise}
235 pub struct MetricMap(BTreeMap<String,Metric>);
237 impl Clone for MetricMap {
238 fn clone(&self) -> MetricMap {
239 let MetricMap(ref map) = *self;
240 MetricMap(map.clone())
244 // The default console test runner. It accepts the command line
245 // arguments and a vector of test_descs.
246 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
248 match parse_opts(args) {
250 Some(Err(msg)) => panic!("{:?}", msg),
253 match run_tests_console(&opts, tests) {
255 Ok(false) => panic!("Some tests failed"),
256 Err(e) => panic!("io error when running tests: {:?}", e),
260 // A variant optimized for invocation with a static test vector.
261 // This will panic (intentionally) when fed any dynamic tests, because
262 // it is copying the static values out into a dynamic vector and cannot
263 // copy dynamic values. It is doing this because from this point on
264 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
265 // semantics into parallel test runners, which in turn requires a Vec<>
266 // rather than a &[].
267 pub fn test_main_static(args: env::Args, tests: &[TestDescAndFn]) {
268 let args = args.collect::<Vec<_>>();
269 let owned_tests = tests.iter().map(|t| {
271 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
272 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
273 _ => panic!("non-static tests passed to test::test_main_static")
276 test_main(&args, owned_tests)
279 #[derive(Copy, Clone)]
280 pub enum ColorConfig {
286 pub struct TestOpts {
287 pub filter: Option<String>,
288 pub run_ignored: bool,
290 pub bench_benchmarks: bool,
291 pub logfile: Option<PathBuf>,
293 pub color: ColorConfig,
298 fn new() -> TestOpts {
303 bench_benchmarks: false,
311 /// Result of parsing the options.
312 pub type OptRes = Result<TestOpts, String>;
314 fn optgroups() -> Vec<getopts::OptGroup> {
315 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
316 getopts::optflag("", "test", "Run tests and not benchmarks"),
317 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
318 getopts::optflag("h", "help", "Display this message (longer with --help)"),
319 getopts::optopt("", "logfile", "Write logs to the specified file instead \
321 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
322 task, allow printing directly"),
323 getopts::optopt("", "color", "Configure coloring of output:
324 auto = colorize if stdout is a tty and tests are run on serially (default);
325 always = always colorize output;
326 never = never colorize output;", "auto|always|never"))
329 fn usage(binary: &str) {
330 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
333 The FILTER regex is tested against the name of all tests to run, and
334 only those tests that match are run.
336 By default, all tests are run in parallel. This can be altered with the
337 RUST_TEST_THREADS environment variable when running tests (set it to 1).
339 All tests have their standard output and standard error captured by default.
340 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
341 environment variable. Logging is not captured by default.
345 #[test] - Indicates a function is a test to be run. This function
347 #[bench] - Indicates a function is a benchmark to be run. This
348 function takes one argument (test::Bencher).
349 #[should_panic] - This function (also labeled with #[test]) will only pass if
350 the code causes a panic (an assertion failure or panic!)
351 A message may be provided, which the failure string must
352 contain: #[should_panic(expected = "foo")].
353 #[ignore] - When applied to a function which is already attributed as a
354 test, then the test runner will ignore these tests during
355 normal test runs. Running with --ignored will run these
357 usage = getopts::usage(&message, &optgroups()));
360 // Parses command line arguments into test options
361 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
362 let args_ = args.tail();
364 match getopts::getopts(args_, &optgroups()) {
366 Err(f) => return Some(Err(f.to_string()))
369 if matches.opt_present("h") { usage(&args[0]); return None; }
371 let filter = if !matches.free.is_empty() {
372 Some(matches.free[0].clone())
377 let run_ignored = matches.opt_present("ignored");
379 let logfile = matches.opt_str("logfile");
380 let logfile = logfile.map(|s| PathBuf::from(&s));
382 let bench_benchmarks = matches.opt_present("bench");
383 let run_tests = ! bench_benchmarks ||
384 matches.opt_present("test");
386 let mut nocapture = matches.opt_present("nocapture");
388 nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
391 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
392 Some("auto") | None => AutoColor,
393 Some("always") => AlwaysColor,
394 Some("never") => NeverColor,
396 Some(v) => return Some(Err(format!("argument for --color must be \
397 auto, always, or never (was {})",
401 let test_opts = TestOpts {
403 run_ignored: run_ignored,
404 run_tests: run_tests,
405 bench_benchmarks: bench_benchmarks,
407 nocapture: nocapture,
414 #[derive(Clone, PartialEq)]
415 pub struct BenchSamples {
416 ns_iter_summ: stats::Summary,
420 #[derive(Clone, PartialEq)]
421 pub enum TestResult {
425 TrMetrics(MetricMap),
426 TrBench(BenchSamples),
429 unsafe impl Send for TestResult {}
431 enum OutputLocation<T> {
432 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
436 struct ConsoleTestState<T> {
437 log_out: Option<File>,
438 out: OutputLocation<T>,
446 failures: Vec<(TestDesc, Vec<u8> )> ,
447 max_name_len: usize, // number of columns to fill when aligning names
450 impl<T: Write> ConsoleTestState<T> {
451 pub fn new(opts: &TestOpts,
452 _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
453 let log_out = match opts.logfile {
454 Some(ref path) => Some(try!(File::create(path))),
457 let out = match term::stdout() {
458 None => Raw(io::stdout()),
462 Ok(ConsoleTestState {
465 use_color: use_color(opts),
471 metrics: MetricMap::new(),
472 failures: Vec::new(),
477 pub fn write_ok(&mut self) -> io::Result<()> {
478 self.write_pretty("ok", term::color::GREEN)
481 pub fn write_failed(&mut self) -> io::Result<()> {
482 self.write_pretty("FAILED", term::color::RED)
485 pub fn write_ignored(&mut self) -> io::Result<()> {
486 self.write_pretty("ignored", term::color::YELLOW)
489 pub fn write_metric(&mut self) -> io::Result<()> {
490 self.write_pretty("metric", term::color::CYAN)
493 pub fn write_bench(&mut self) -> io::Result<()> {
494 self.write_pretty("bench", term::color::CYAN)
497 pub fn write_pretty(&mut self,
499 color: term::color::Color) -> io::Result<()> {
501 Pretty(ref mut term) => {
503 try!(term.fg(color));
505 try!(term.write_all(word.as_bytes()));
511 Raw(ref mut stdout) => {
512 try!(stdout.write_all(word.as_bytes()));
518 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
520 Pretty(ref mut term) => {
521 try!(term.write_all(s.as_bytes()));
524 Raw(ref mut stdout) => {
525 try!(stdout.write_all(s.as_bytes()));
531 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
533 let noun = if len != 1 { "tests" } else { "test" };
534 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
537 pub fn write_test_start(&mut self, test: &TestDesc,
538 align: NamePadding) -> io::Result<()> {
539 let name = test.padded_name(self.max_name_len, align);
540 self.write_plain(&format!("test {} ... ", name))
543 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
545 TrOk => self.write_ok(),
546 TrFailed => self.write_failed(),
547 TrIgnored => self.write_ignored(),
548 TrMetrics(ref mm) => {
549 try!(self.write_metric());
550 self.write_plain(&format!(": {}", mm.fmt_metrics()))
553 try!(self.write_bench());
555 try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
560 self.write_plain("\n")
563 pub fn write_log(&mut self, test: &TestDesc,
564 result: &TestResult) -> io::Result<()> {
568 let s = format!("{} {}\n", match *result {
569 TrOk => "ok".to_string(),
570 TrFailed => "failed".to_string(),
571 TrIgnored => "ignored".to_string(),
572 TrMetrics(ref mm) => mm.fmt_metrics(),
573 TrBench(ref bs) => fmt_bench_samples(bs)
575 o.write_all(s.as_bytes())
580 pub fn write_failures(&mut self) -> io::Result<()> {
581 try!(self.write_plain("\nfailures:\n"));
582 let mut failures = Vec::new();
583 let mut fail_out = String::new();
584 for &(ref f, ref stdout) in &self.failures {
585 failures.push(f.name.to_string());
586 if !stdout.is_empty() {
587 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
588 let output = String::from_utf8_lossy(stdout);
589 fail_out.push_str(&output);
590 fail_out.push_str("\n");
593 if !fail_out.is_empty() {
594 try!(self.write_plain("\n"));
595 try!(self.write_plain(&fail_out));
598 try!(self.write_plain("\nfailures:\n"));
600 for name in &failures {
601 try!(self.write_plain(&format!(" {}\n", name)));
606 pub fn write_run_finish(&mut self) -> io::Result<bool> {
607 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
609 let success = self.failed == 0;
611 try!(self.write_failures());
614 try!(self.write_plain("\ntest result: "));
616 // There's no parallelism at this point so it's safe to use color
617 try!(self.write_ok());
619 try!(self.write_failed());
621 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
622 self.passed, self.failed, self.ignored, self.measured);
623 try!(self.write_plain(&s));
628 // Format a number with thousands separators
629 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
631 let mut output = String::new();
632 let mut trailing = false;
633 for &pow in &[9, 6, 3, 0] {
634 let base = 10_usize.pow(pow);
635 if pow == 0 || trailing || n / base != 0 {
637 output.write_fmt(format_args!("{}", n / base)).unwrap();
639 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
652 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
654 let mut output = String::new();
656 let median = bs.ns_iter_summ.median as usize;
657 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
659 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
660 fmt_thousands_sep(median, ','),
661 fmt_thousands_sep(deviation, ','))).unwrap();
663 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
668 // A simple console test runner
669 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Result<bool> {
671 fn callback<T: Write>(event: &TestEvent,
672 st: &mut ConsoleTestState<T>) -> io::Result<()> {
673 match (*event).clone() {
674 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
675 TeWait(ref test, padding) => st.write_test_start(test, padding),
676 TeResult(test, result, stdout) => {
677 try!(st.write_log(&test, &result));
678 try!(st.write_result(&result));
680 TrOk => st.passed += 1,
681 TrIgnored => st.ignored += 1,
683 let tname = test.name;
684 let MetricMap(mm) = mm;
687 .insert_metric(&format!("{}.{}",
696 st.metrics.insert_metric(test.name.as_slice(),
697 bs.ns_iter_summ.median,
698 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
703 st.failures.push((test, stdout));
711 let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
712 fn len_if_padded(t: &TestDescAndFn) -> usize {
713 match t.testfn.padding() {
715 PadOnRight => t.desc.name.as_slice().len(),
718 match tests.iter().max_by(|t|len_if_padded(*t)) {
720 let n = t.desc.name.as_slice();
721 st.max_name_len = n.len();
725 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
726 return st.write_run_finish();
730 fn should_sort_failures_before_printing_them() {
731 let test_a = TestDesc {
732 name: StaticTestName("a"),
734 should_panic: ShouldPanic::No
737 let test_b = TestDesc {
738 name: StaticTestName("b"),
740 should_panic: ShouldPanic::No
743 let mut st = ConsoleTestState {
745 out: Raw(Vec::new()),
753 metrics: MetricMap::new(),
754 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
757 st.write_failures().unwrap();
758 let s = match st.out {
759 Raw(ref m) => String::from_utf8_lossy(&m[..]),
760 Pretty(_) => unreachable!()
763 let apos = s.find("a").unwrap();
764 let bpos = s.find("b").unwrap();
765 assert!(apos < bpos);
768 fn use_color(opts: &TestOpts) -> bool {
770 AutoColor => !opts.nocapture && stdout_isatty(),
777 fn stdout_isatty() -> bool {
778 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
781 fn stdout_isatty() -> bool {
782 const STD_OUTPUT_HANDLE: libc::DWORD = -11i32 as libc::DWORD;
784 fn GetStdHandle(which: libc::DWORD) -> libc::HANDLE;
785 fn GetConsoleMode(hConsoleHandle: libc::HANDLE,
786 lpMode: libc::LPDWORD) -> libc::BOOL;
789 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
791 GetConsoleMode(handle, &mut out) != 0
797 TeFiltered(Vec<TestDesc> ),
798 TeWait(TestDesc, NamePadding),
799 TeResult(TestDesc, TestResult, Vec<u8> ),
802 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
805 fn run_tests<F>(opts: &TestOpts,
806 tests: Vec<TestDescAndFn> ,
807 mut callback: F) -> io::Result<()> where
808 F: FnMut(TestEvent) -> io::Result<()>,
810 let mut filtered_tests = filter_tests(opts, tests);
811 if !opts.bench_benchmarks {
812 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
815 let filtered_descs = filtered_tests.iter()
816 .map(|t| t.desc.clone())
819 try!(callback(TeFiltered(filtered_descs)));
821 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
822 filtered_tests.into_iter().partition(|e| {
824 StaticTestFn(_) | DynTestFn(_) => true,
829 // It's tempting to just spawn all the tests at once, but since we have
830 // many tests that run in other processes we would be making a big mess.
831 let concurrency = get_concurrency();
833 let mut remaining = filtered_tests;
837 let (tx, rx) = channel::<MonitorMsg>();
839 while pending > 0 || !remaining.is_empty() {
840 while pending < concurrency && !remaining.is_empty() {
841 let test = remaining.pop().unwrap();
842 if concurrency == 1 {
843 // We are doing one test at a time so we can print the name
844 // of the test before we run it. Useful for debugging tests
845 // that hang forever.
846 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
848 run_test(opts, !opts.run_tests, test, tx.clone());
852 let (desc, result, stdout) = rx.recv().unwrap();
853 if concurrency != 1 {
854 try!(callback(TeWait(desc.clone(), PadNone)));
856 try!(callback(TeResult(desc, result, stdout)));
860 if opts.bench_benchmarks {
861 // All benchmarks run at the end, in serial.
862 // (this includes metric fns)
863 for b in filtered_benchs_and_metrics {
864 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
865 run_test(opts, false, b, tx.clone());
866 let (test, result, stdout) = rx.recv().unwrap();
867 try!(callback(TeResult(test, result, stdout)));
874 fn get_concurrency() -> usize {
875 return match env::var("RUST_TEST_THREADS") {
877 let opt_n: Option<usize> = s.parse().ok();
879 Some(n) if n > 0 => n,
880 _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
884 if std::rt::util::limit_thread_creation_due_to_osx_and_valgrind() {
893 fn num_cpus() -> usize {
895 let mut sysinfo = std::mem::zeroed();
896 libc::GetSystemInfo(&mut sysinfo);
897 sysinfo.dwNumberOfProcessors as usize
902 fn num_cpus() -> usize {
903 extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
904 unsafe { rust_get_num_cpus() as usize }
908 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
909 let mut filtered = tests;
911 // Remove tests that don't match the test filter
912 filtered = match opts.filter {
914 Some(ref filter) => {
915 filtered.into_iter().filter(|test| {
916 test.desc.name.as_slice().contains(&filter[..])
921 // Maybe pull out the ignored test and unignore them
922 filtered = if !opts.run_ignored {
925 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
926 if test.desc.ignore {
927 let TestDescAndFn {desc, testfn} = test;
929 desc: TestDesc {ignore: false, ..desc},
936 filtered.into_iter().filter_map(|x| filter(x)).collect()
939 // Sort the tests alphabetically
940 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
945 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
946 // convert benchmarks to tests, if we're not benchmarking them
947 tests.into_iter().map(|x| {
948 let testfn = match x.testfn {
949 DynBenchFn(bench) => {
950 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
952 StaticBenchFn(benchfn) => {
953 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
957 TestDescAndFn { desc: x.desc, testfn: testfn }
961 pub fn run_test(opts: &TestOpts,
964 monitor_ch: Sender<MonitorMsg>) {
966 let TestDescAndFn {desc, testfn} = test;
968 if force_ignore || desc.ignore {
969 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
973 fn run_test_inner(desc: TestDesc,
974 monitor_ch: Sender<MonitorMsg>,
976 testfn: Box<FnBox() + Send>) {
977 struct Sink(Arc<Mutex<Vec<u8>>>);
978 impl Write for Sink {
979 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
980 Write::write(&mut *self.0.lock().unwrap(), data)
982 fn flush(&mut self) -> io::Result<()> { Ok(()) }
985 thread::spawn(move || {
986 let data = Arc::new(Mutex::new(Vec::new()));
987 let data2 = data.clone();
988 let cfg = thread::Builder::new().name(match desc.name {
989 DynTestName(ref name) => name.clone().to_string(),
990 StaticTestName(name) => name.to_string(),
993 let result_guard = cfg.spawn(move || {
995 io::set_print(box Sink(data2.clone()));
996 io::set_panic(box Sink(data2));
1000 let test_result = calc_result(&desc, result_guard.join());
1001 let stdout = data.lock().unwrap().to_vec();
1002 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1007 DynBenchFn(bencher) => {
1008 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1009 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1012 StaticBenchFn(benchfn) => {
1013 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1014 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1018 let mut mm = MetricMap::new();
1019 f.call_box((&mut mm,));
1020 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1023 StaticMetricFn(f) => {
1024 let mut mm = MetricMap::new();
1026 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1029 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1030 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1031 Box::new(move|| f()))
1035 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1036 match (&desc.should_panic, task_result) {
1037 (&ShouldPanic::No, Ok(())) |
1038 (&ShouldPanic::Yes(None), Err(_)) => TrOk,
1039 (&ShouldPanic::Yes(Some(msg)), Err(ref err))
1040 if err.downcast_ref::<String>()
1042 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1043 .map(|e| e.contains(msg))
1044 .unwrap_or(false) => TrOk,
1051 pub fn new() -> MetricMap {
1052 MetricMap(BTreeMap::new())
1055 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1056 /// must be non-negative. The `noise` indicates the uncertainty of the
1057 /// metric, which doubles as the "noise range" of acceptable
1058 /// pairwise-regressions on this named value, when comparing from one
1059 /// metric to the next using `compare_to_old`.
1061 /// If `noise` is positive, then it means this metric is of a value
1062 /// you want to see grow smaller, so a change larger than `noise` in the
1063 /// positive direction represents a regression.
1065 /// If `noise` is negative, then it means this metric is of a value
1066 /// you want to see grow larger, so a change larger than `noise` in the
1067 /// negative direction represents a regression.
1068 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1073 let MetricMap(ref mut map) = *self;
1074 map.insert(name.to_string(), m);
1077 pub fn fmt_metrics(&self) -> String {
1078 let MetricMap(ref mm) = *self;
1079 let v : Vec<String> = mm.iter()
1080 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1090 /// A function that is opaque to the optimizer, to allow benchmarks to
1091 /// pretend to use outputs to assist in avoiding dead-code
1094 /// This function is a no-op, and does not even read from `dummy`.
1095 pub fn black_box<T>(dummy: T) -> T {
1096 // we need to "use" the argument in some way LLVM can't
1098 unsafe {asm!("" : : "r"(&dummy))}
1104 /// Callback for benchmark functions to run in their body.
1105 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1106 self.dur = Duration::span(|| {
1107 let k = self.iterations;
1114 pub fn ns_elapsed(&mut self) -> u64 {
1115 self.dur.secs() * 1_000_000_000 + (self.dur.extra_nanos() as u64)
1118 pub fn ns_per_iter(&mut self) -> u64 {
1119 if self.iterations == 0 {
1122 self.ns_elapsed() / cmp::max(self.iterations, 1)
1126 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1127 self.iterations = n;
1131 // This is a more statistics-driven benchmark algorithm
1132 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary where F: FnMut(&mut Bencher) {
1133 // Initial bench run to get ballpark figure.
1135 self.bench_n(n, |x| f(x));
1137 // Try to estimate iter count for 1ms falling back to 1m
1138 // iterations if first run took < 1ns.
1139 if self.ns_per_iter() == 0 {
1142 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1144 // if the first run took more than 1ms we don't want to just
1145 // be left doing 0 iterations on every loop. The unfortunate
1146 // side effect of not being able to do as many runs is
1147 // automatically handled by the statistical analysis below
1148 // (i.e. larger error bars).
1149 if n == 0 { n = 1; }
1151 let mut total_run = Duration::new(0, 0);
1152 let samples : &mut [f64] = &mut [0.0_f64; 50];
1154 let mut summ = None;
1155 let mut summ5 = None;
1157 let loop_run = Duration::span(|| {
1159 for p in &mut *samples {
1160 self.bench_n(n, |x| f(x));
1161 *p = self.ns_per_iter() as f64;
1164 stats::winsorize(samples, 5.0);
1165 summ = Some(stats::Summary::new(samples));
1167 for p in &mut *samples {
1168 self.bench_n(5 * n, |x| f(x));
1169 *p = self.ns_per_iter() as f64;
1172 stats::winsorize(samples, 5.0);
1173 summ5 = Some(stats::Summary::new(samples));
1175 let summ = summ.unwrap();
1176 let summ5 = summ5.unwrap();
1178 // If we've run for 100ms and seem to have converged to a
1180 if loop_run > Duration::from_millis(100) &&
1181 summ.median_abs_dev_pct < 1.0 &&
1182 summ.median - summ5.median < summ5.median_abs_dev {
1186 total_run = total_run + loop_run;
1187 // Longest we ever run for is 3s.
1188 if total_run > Duration::from_secs(3) {
1192 // If we overflow here just return the results so far. We check a
1193 // multiplier of 10 because we're about to multiply by 2 and the
1194 // next iteration of the loop will also multiply by 5 (to calculate
1195 // the summ5 result)
1196 n = match n.checked_mul(10) {
1198 None => return summ5,
1206 use std::time::Duration;
1207 use super::{Bencher, BenchSamples};
1209 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1210 let mut bs = Bencher {
1212 dur: Duration::new(0, 0),
1216 let ns_iter_summ = bs.auto_bench(f);
1218 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1219 let iter_s = 1_000_000_000 / ns_iter;
1220 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1223 ns_iter_summ: ns_iter_summ,
1228 pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
1229 let mut bs = Bencher {
1231 dur: Duration::new(0, 0),
1240 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1241 TestDesc, TestDescAndFn, TestOpts, run_test,
1243 StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1244 use std::sync::mpsc::channel;
1247 pub fn do_not_run_ignored_tests() {
1248 fn f() { panic!(); }
1249 let desc = TestDescAndFn {
1251 name: StaticTestName("whatever"),
1253 should_panic: ShouldPanic::No,
1255 testfn: DynTestFn(Box::new(move|| f())),
1257 let (tx, rx) = channel();
1258 run_test(&TestOpts::new(), false, desc, tx);
1259 let (_, res, _) = rx.recv().unwrap();
1260 assert!(res != TrOk);
1264 pub fn ignored_tests_result_in_ignored() {
1266 let desc = TestDescAndFn {
1268 name: StaticTestName("whatever"),
1270 should_panic: ShouldPanic::No,
1272 testfn: DynTestFn(Box::new(move|| f())),
1274 let (tx, rx) = channel();
1275 run_test(&TestOpts::new(), false, desc, tx);
1276 let (_, res, _) = rx.recv().unwrap();
1277 assert!(res == TrIgnored);
1281 fn test_should_panic() {
1282 fn f() { panic!(); }
1283 let desc = TestDescAndFn {
1285 name: StaticTestName("whatever"),
1287 should_panic: ShouldPanic::Yes(None)
1289 testfn: DynTestFn(Box::new(move|| f())),
1291 let (tx, rx) = channel();
1292 run_test(&TestOpts::new(), false, desc, tx);
1293 let (_, res, _) = rx.recv().unwrap();
1294 assert!(res == TrOk);
1298 fn test_should_panic_good_message() {
1299 fn f() { panic!("an error message"); }
1300 let desc = TestDescAndFn {
1302 name: StaticTestName("whatever"),
1304 should_panic: ShouldPanic::Yes(Some("error message"))
1306 testfn: DynTestFn(Box::new(move|| f())),
1308 let (tx, rx) = channel();
1309 run_test(&TestOpts::new(), false, desc, tx);
1310 let (_, res, _) = rx.recv().unwrap();
1311 assert!(res == TrOk);
1315 fn test_should_panic_bad_message() {
1316 fn f() { panic!("an error message"); }
1317 let desc = TestDescAndFn {
1319 name: StaticTestName("whatever"),
1321 should_panic: ShouldPanic::Yes(Some("foobar"))
1323 testfn: DynTestFn(Box::new(move|| f())),
1325 let (tx, rx) = channel();
1326 run_test(&TestOpts::new(), false, desc, tx);
1327 let (_, res, _) = rx.recv().unwrap();
1328 assert!(res == TrFailed);
1332 fn test_should_panic_but_succeeds() {
1334 let desc = TestDescAndFn {
1336 name: StaticTestName("whatever"),
1338 should_panic: ShouldPanic::Yes(None)
1340 testfn: DynTestFn(Box::new(move|| f())),
1342 let (tx, rx) = channel();
1343 run_test(&TestOpts::new(), false, desc, tx);
1344 let (_, res, _) = rx.recv().unwrap();
1345 assert!(res == TrFailed);
1349 fn parse_ignored_flag() {
1350 let args = vec!("progname".to_string(),
1351 "filter".to_string(),
1352 "--ignored".to_string());
1353 let opts = match parse_opts(&args) {
1355 _ => panic!("Malformed arg in parse_ignored_flag")
1357 assert!((opts.run_ignored));
1361 pub fn filter_for_ignored_option() {
1362 // When we run ignored tests the test filter should filter out all the
1363 // unignored tests and flip the ignore flag on the rest to false
1365 let mut opts = TestOpts::new();
1366 opts.run_tests = true;
1367 opts.run_ignored = true;
1372 name: StaticTestName("1"),
1374 should_panic: ShouldPanic::No,
1376 testfn: DynTestFn(Box::new(move|| {})),
1380 name: StaticTestName("2"),
1382 should_panic: ShouldPanic::No,
1384 testfn: DynTestFn(Box::new(move|| {})),
1386 let filtered = filter_tests(&opts, tests);
1388 assert_eq!(filtered.len(), 1);
1389 assert_eq!(filtered[0].desc.name.to_string(),
1391 assert!(filtered[0].desc.ignore == false);
1395 pub fn sort_tests() {
1396 let mut opts = TestOpts::new();
1397 opts.run_tests = true;
1400 vec!("sha1::test".to_string(),
1401 "isize::test_to_str".to_string(),
1402 "isize::test_pow".to_string(),
1403 "test::do_not_run_ignored_tests".to_string(),
1404 "test::ignored_tests_result_in_ignored".to_string(),
1405 "test::first_free_arg_should_be_a_filter".to_string(),
1406 "test::parse_ignored_flag".to_string(),
1407 "test::filter_for_ignored_option".to_string(),
1408 "test::sort_tests".to_string());
1412 let mut tests = Vec::new();
1413 for name in &names {
1414 let test = TestDescAndFn {
1416 name: DynTestName((*name).clone()),
1418 should_panic: ShouldPanic::No,
1420 testfn: DynTestFn(Box::new(testfn)),
1426 let filtered = filter_tests(&opts, tests);
1429 vec!("isize::test_pow".to_string(),
1430 "isize::test_to_str".to_string(),
1431 "sha1::test".to_string(),
1432 "test::do_not_run_ignored_tests".to_string(),
1433 "test::filter_for_ignored_option".to_string(),
1434 "test::first_free_arg_should_be_a_filter".to_string(),
1435 "test::ignored_tests_result_in_ignored".to_string(),
1436 "test::parse_ignored_flag".to_string(),
1437 "test::sort_tests".to_string());
1439 for (a, b) in expected.iter().zip(filtered) {
1440 assert!(*a == b.desc.name.to_string());
1445 pub fn test_metricmap_compare() {
1446 let mut m1 = MetricMap::new();
1447 let mut m2 = MetricMap::new();
1448 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1449 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1451 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1452 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1454 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1455 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1457 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1458 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1460 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1461 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1463 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1464 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);