1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test", issue = "27812")]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
35 html_root_url = "https://doc.rust-lang.org/nightly/")]
38 #![feature(box_syntax)]
39 #![feature(duration_span)]
43 #![feature(rustc_private)]
44 #![feature(set_stdio)]
45 #![feature(staged_api)]
48 extern crate serialize;
49 extern crate serialize as rustc_serialize;
53 pub use self::TestFn::*;
54 pub use self::ColorConfig::*;
55 pub use self::TestResult::*;
56 pub use self::TestName::*;
57 use self::TestEvent::*;
58 use self::NamePadding::*;
59 use self::OutputLocation::*;
62 use getopts::{OptGroup, optflag, optopt};
63 use serialize::Encodable;
64 use std::boxed::FnBox;
66 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
70 use std::collections::BTreeMap;
74 use std::io::prelude::*;
76 use std::iter::repeat;
77 use std::path::PathBuf;
78 use std::sync::mpsc::{channel, Sender};
79 use std::sync::{Arc, Mutex};
81 use std::time::Duration;
83 // to be used by rustc to compile tests in libtest
85 pub use {Bencher, TestName, TestResult, TestDesc,
86 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
88 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
89 run_test, test_main, test_main_static, filter_tests,
90 parse_opts, StaticBenchFn, ShouldPanic};
95 // The name of a test. By convention this follows the rules for rust
96 // paths; i.e. it should be a series of identifiers separated by double
97 // colons. This way if some test runner wants to arrange the tests
98 // hierarchically it may.
100 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
102 StaticTestName(&'static str),
106 fn as_slice<'a>(&'a self) -> &'a str {
108 StaticTestName(s) => s,
109 DynTestName(ref s) => s
113 impl fmt::Display for TestName {
114 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
115 fmt::Display::fmt(self.as_slice(), f)
119 #[derive(Clone, Copy)]
126 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
127 let mut name = String::from(self.name.as_slice());
128 let fill = column_count.saturating_sub(name.len());
129 let pad = repeat(" ").take(fill).collect::<String>();
140 /// Represents a benchmark function.
141 pub trait TDynBenchFn: Send {
142 fn run(&self, harness: &mut Bencher);
145 // A function that runs a test. If the function returns successfully,
146 // the test succeeds; if the function panics then the test fails. We
147 // may need to come up with a more clever definition of test in order
148 // to support isolation of tests into threads.
151 StaticBenchFn(fn(&mut Bencher)),
152 StaticMetricFn(fn(&mut MetricMap)),
153 DynTestFn(Box<FnBox() + Send>),
154 DynMetricFn(Box<FnBox(&mut MetricMap)+Send>),
155 DynBenchFn(Box<TDynBenchFn+'static>)
159 fn padding(&self) -> NamePadding {
161 &StaticTestFn(..) => PadNone,
162 &StaticBenchFn(..) => PadOnRight,
163 &StaticMetricFn(..) => PadOnRight,
164 &DynTestFn(..) => PadNone,
165 &DynMetricFn(..) => PadOnRight,
166 &DynBenchFn(..) => PadOnRight,
171 impl fmt::Debug for TestFn {
172 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
173 f.write_str(match *self {
174 StaticTestFn(..) => "StaticTestFn(..)",
175 StaticBenchFn(..) => "StaticBenchFn(..)",
176 StaticMetricFn(..) => "StaticMetricFn(..)",
177 DynTestFn(..) => "DynTestFn(..)",
178 DynMetricFn(..) => "DynMetricFn(..)",
179 DynBenchFn(..) => "DynBenchFn(..)"
184 /// Manager of the benchmarking runs.
186 /// This is fed into functions marked with `#[bench]` to allow for
187 /// set-up & tear-down before running a piece of code repeatedly via a
189 #[derive(Copy, Clone)]
196 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
197 pub enum ShouldPanic {
200 YesWithMessage(&'static str)
203 // The definition of a single test. A test runner will run a list of
205 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
206 pub struct TestDesc {
209 pub should_panic: ShouldPanic,
212 unsafe impl Send for TestDesc {}
215 pub struct TestDescAndFn {
220 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
227 pub fn new(value: f64, noise: f64) -> Metric {
228 Metric {value: value, noise: noise}
233 pub struct MetricMap(BTreeMap<String,Metric>);
235 impl Clone for MetricMap {
236 fn clone(&self) -> MetricMap {
237 let MetricMap(ref map) = *self;
238 MetricMap(map.clone())
242 // The default console test runner. It accepts the command line
243 // arguments and a vector of test_descs.
244 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
246 match parse_opts(args) {
248 Some(Err(msg)) => panic!("{:?}", msg),
251 match run_tests_console(&opts, tests) {
253 Ok(false) => std::process::exit(101),
254 Err(e) => panic!("io error when running tests: {:?}", e),
258 // A variant optimized for invocation with a static test vector.
259 // This will panic (intentionally) when fed any dynamic tests, because
260 // it is copying the static values out into a dynamic vector and cannot
261 // copy dynamic values. It is doing this because from this point on
262 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
263 // semantics into parallel test runners, which in turn requires a Vec<>
264 // rather than a &[].
265 pub fn test_main_static(tests: &[TestDescAndFn]) {
266 let args = env::args().collect::<Vec<_>>();
267 let owned_tests = tests.iter().map(|t| {
269 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
270 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
271 _ => panic!("non-static tests passed to test::test_main_static")
274 test_main(&args, owned_tests)
277 #[derive(Copy, Clone)]
278 pub enum ColorConfig {
284 pub struct TestOpts {
285 pub filter: Option<String>,
286 pub run_ignored: bool,
288 pub bench_benchmarks: bool,
289 pub logfile: Option<PathBuf>,
291 pub color: ColorConfig,
296 fn new() -> TestOpts {
301 bench_benchmarks: false,
309 /// Result of parsing the options.
310 pub type OptRes = Result<TestOpts, String>;
312 fn optgroups() -> Vec<getopts::OptGroup> {
313 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
314 getopts::optflag("", "test", "Run tests and not benchmarks"),
315 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
316 getopts::optflag("h", "help", "Display this message (longer with --help)"),
317 getopts::optopt("", "logfile", "Write logs to the specified file instead \
319 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
320 task, allow printing directly"),
321 getopts::optopt("", "color", "Configure coloring of output:
322 auto = colorize if stdout is a tty and tests are run on serially (default);
323 always = always colorize output;
324 never = never colorize output;", "auto|always|never"))
327 fn usage(binary: &str) {
328 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
331 The FILTER regex is tested against the name of all tests to run, and
332 only those tests that match are run.
334 By default, all tests are run in parallel. This can be altered with the
335 RUST_TEST_THREADS environment variable when running tests (set it to 1).
337 All tests have their standard output and standard error captured by default.
338 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
339 environment variable. Logging is not captured by default.
343 #[test] - Indicates a function is a test to be run. This function
345 #[bench] - Indicates a function is a benchmark to be run. This
346 function takes one argument (test::Bencher).
347 #[should_panic] - This function (also labeled with #[test]) will only pass if
348 the code causes a panic (an assertion failure or panic!)
349 A message may be provided, which the failure string must
350 contain: #[should_panic(expected = "foo")].
351 #[ignore] - When applied to a function which is already attributed as a
352 test, then the test runner will ignore these tests during
353 normal test runs. Running with --ignored will run these
355 usage = getopts::usage(&message, &optgroups()));
358 // Parses command line arguments into test options
359 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
360 let args_ = &args[1..];
362 match getopts::getopts(args_, &optgroups()) {
364 Err(f) => return Some(Err(f.to_string()))
367 if matches.opt_present("h") { usage(&args[0]); return None; }
369 let filter = if !matches.free.is_empty() {
370 Some(matches.free[0].clone())
375 let run_ignored = matches.opt_present("ignored");
377 let logfile = matches.opt_str("logfile");
378 let logfile = logfile.map(|s| PathBuf::from(&s));
380 let bench_benchmarks = matches.opt_present("bench");
381 let run_tests = ! bench_benchmarks ||
382 matches.opt_present("test");
384 let mut nocapture = matches.opt_present("nocapture");
386 nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
389 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
390 Some("auto") | None => AutoColor,
391 Some("always") => AlwaysColor,
392 Some("never") => NeverColor,
394 Some(v) => return Some(Err(format!("argument for --color must be \
395 auto, always, or never (was {})",
399 let test_opts = TestOpts {
401 run_ignored: run_ignored,
402 run_tests: run_tests,
403 bench_benchmarks: bench_benchmarks,
405 nocapture: nocapture,
412 #[derive(Clone, PartialEq)]
413 pub struct BenchSamples {
414 ns_iter_summ: stats::Summary,
418 #[derive(Clone, PartialEq)]
419 pub enum TestResult {
423 TrMetrics(MetricMap),
424 TrBench(BenchSamples),
427 unsafe impl Send for TestResult {}
429 enum OutputLocation<T> {
430 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
434 struct ConsoleTestState<T> {
435 log_out: Option<File>,
436 out: OutputLocation<T>,
444 failures: Vec<(TestDesc, Vec<u8> )> ,
445 max_name_len: usize, // number of columns to fill when aligning names
448 impl<T: Write> ConsoleTestState<T> {
449 pub fn new(opts: &TestOpts,
450 _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
451 let log_out = match opts.logfile {
452 Some(ref path) => Some(try!(File::create(path))),
455 let out = match term::stdout() {
456 None => Raw(io::stdout()),
460 Ok(ConsoleTestState {
463 use_color: use_color(opts),
469 metrics: MetricMap::new(),
470 failures: Vec::new(),
475 pub fn write_ok(&mut self) -> io::Result<()> {
476 self.write_pretty("ok", term::color::GREEN)
479 pub fn write_failed(&mut self) -> io::Result<()> {
480 self.write_pretty("FAILED", term::color::RED)
483 pub fn write_ignored(&mut self) -> io::Result<()> {
484 self.write_pretty("ignored", term::color::YELLOW)
487 pub fn write_metric(&mut self) -> io::Result<()> {
488 self.write_pretty("metric", term::color::CYAN)
491 pub fn write_bench(&mut self) -> io::Result<()> {
492 self.write_pretty("bench", term::color::CYAN)
495 pub fn write_pretty(&mut self,
497 color: term::color::Color) -> io::Result<()> {
499 Pretty(ref mut term) => {
501 try!(term.fg(color));
503 try!(term.write_all(word.as_bytes()));
509 Raw(ref mut stdout) => {
510 try!(stdout.write_all(word.as_bytes()));
516 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
518 Pretty(ref mut term) => {
519 try!(term.write_all(s.as_bytes()));
522 Raw(ref mut stdout) => {
523 try!(stdout.write_all(s.as_bytes()));
529 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
531 let noun = if len != 1 { "tests" } else { "test" };
532 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
535 pub fn write_test_start(&mut self, test: &TestDesc,
536 align: NamePadding) -> io::Result<()> {
537 let name = test.padded_name(self.max_name_len, align);
538 self.write_plain(&format!("test {} ... ", name))
541 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
543 TrOk => self.write_ok(),
544 TrFailed => self.write_failed(),
545 TrIgnored => self.write_ignored(),
546 TrMetrics(ref mm) => {
547 try!(self.write_metric());
548 self.write_plain(&format!(": {}", mm.fmt_metrics()))
551 try!(self.write_bench());
553 try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
558 self.write_plain("\n")
561 pub fn write_log(&mut self, test: &TestDesc,
562 result: &TestResult) -> io::Result<()> {
566 let s = format!("{} {}\n", match *result {
567 TrOk => "ok".to_string(),
568 TrFailed => "failed".to_string(),
569 TrIgnored => "ignored".to_string(),
570 TrMetrics(ref mm) => mm.fmt_metrics(),
571 TrBench(ref bs) => fmt_bench_samples(bs)
573 o.write_all(s.as_bytes())
578 pub fn write_failures(&mut self) -> io::Result<()> {
579 try!(self.write_plain("\nfailures:\n"));
580 let mut failures = Vec::new();
581 let mut fail_out = String::new();
582 for &(ref f, ref stdout) in &self.failures {
583 failures.push(f.name.to_string());
584 if !stdout.is_empty() {
585 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
586 let output = String::from_utf8_lossy(stdout);
587 fail_out.push_str(&output);
588 fail_out.push_str("\n");
591 if !fail_out.is_empty() {
592 try!(self.write_plain("\n"));
593 try!(self.write_plain(&fail_out));
596 try!(self.write_plain("\nfailures:\n"));
598 for name in &failures {
599 try!(self.write_plain(&format!(" {}\n", name)));
604 pub fn write_run_finish(&mut self) -> io::Result<bool> {
605 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
607 let success = self.failed == 0;
609 try!(self.write_failures());
612 try!(self.write_plain("\ntest result: "));
614 // There's no parallelism at this point so it's safe to use color
615 try!(self.write_ok());
617 try!(self.write_failed());
619 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
620 self.passed, self.failed, self.ignored, self.measured);
621 try!(self.write_plain(&s));
626 // Format a number with thousands separators
627 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
629 let mut output = String::new();
630 let mut trailing = false;
631 for &pow in &[9, 6, 3, 0] {
632 let base = 10_usize.pow(pow);
633 if pow == 0 || trailing || n / base != 0 {
635 output.write_fmt(format_args!("{}", n / base)).unwrap();
637 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
650 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
652 let mut output = String::new();
654 let median = bs.ns_iter_summ.median as usize;
655 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
657 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
658 fmt_thousands_sep(median, ','),
659 fmt_thousands_sep(deviation, ','))).unwrap();
661 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
666 // A simple console test runner
667 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Result<bool> {
669 fn callback<T: Write>(event: &TestEvent,
670 st: &mut ConsoleTestState<T>) -> io::Result<()> {
671 match (*event).clone() {
672 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
673 TeWait(ref test, padding) => st.write_test_start(test, padding),
674 TeResult(test, result, stdout) => {
675 try!(st.write_log(&test, &result));
676 try!(st.write_result(&result));
678 TrOk => st.passed += 1,
679 TrIgnored => st.ignored += 1,
681 let tname = test.name;
682 let MetricMap(mm) = mm;
685 .insert_metric(&format!("{}.{}",
694 st.metrics.insert_metric(test.name.as_slice(),
695 bs.ns_iter_summ.median,
696 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
701 st.failures.push((test, stdout));
709 let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
710 fn len_if_padded(t: &TestDescAndFn) -> usize {
711 match t.testfn.padding() {
713 PadOnRight => t.desc.name.as_slice().len(),
716 match tests.iter().max_by(|t|len_if_padded(*t)) {
718 let n = t.desc.name.as_slice();
719 st.max_name_len = n.len();
723 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
724 return st.write_run_finish();
728 fn should_sort_failures_before_printing_them() {
729 let test_a = TestDesc {
730 name: StaticTestName("a"),
732 should_panic: ShouldPanic::No
735 let test_b = TestDesc {
736 name: StaticTestName("b"),
738 should_panic: ShouldPanic::No
741 let mut st = ConsoleTestState {
743 out: Raw(Vec::new()),
751 metrics: MetricMap::new(),
752 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
755 st.write_failures().unwrap();
756 let s = match st.out {
757 Raw(ref m) => String::from_utf8_lossy(&m[..]),
758 Pretty(_) => unreachable!()
761 let apos = s.find("a").unwrap();
762 let bpos = s.find("b").unwrap();
763 assert!(apos < bpos);
766 fn use_color(opts: &TestOpts) -> bool {
768 AutoColor => !opts.nocapture && stdout_isatty(),
775 fn stdout_isatty() -> bool {
776 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
779 fn stdout_isatty() -> bool {
780 const STD_OUTPUT_HANDLE: libc::DWORD = -11i32 as libc::DWORD;
782 fn GetStdHandle(which: libc::DWORD) -> libc::HANDLE;
783 fn GetConsoleMode(hConsoleHandle: libc::HANDLE,
784 lpMode: libc::LPDWORD) -> libc::BOOL;
787 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
789 GetConsoleMode(handle, &mut out) != 0
795 TeFiltered(Vec<TestDesc> ),
796 TeWait(TestDesc, NamePadding),
797 TeResult(TestDesc, TestResult, Vec<u8> ),
800 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
803 fn run_tests<F>(opts: &TestOpts,
804 tests: Vec<TestDescAndFn> ,
805 mut callback: F) -> io::Result<()> where
806 F: FnMut(TestEvent) -> io::Result<()>,
808 let mut filtered_tests = filter_tests(opts, tests);
809 if !opts.bench_benchmarks {
810 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
813 let filtered_descs = filtered_tests.iter()
814 .map(|t| t.desc.clone())
817 try!(callback(TeFiltered(filtered_descs)));
819 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
820 filtered_tests.into_iter().partition(|e| {
822 StaticTestFn(_) | DynTestFn(_) => true,
827 // It's tempting to just spawn all the tests at once, but since we have
828 // many tests that run in other processes we would be making a big mess.
829 let concurrency = get_concurrency();
831 let mut remaining = filtered_tests;
835 let (tx, rx) = channel::<MonitorMsg>();
837 while pending > 0 || !remaining.is_empty() {
838 while pending < concurrency && !remaining.is_empty() {
839 let test = remaining.pop().unwrap();
840 if concurrency == 1 {
841 // We are doing one test at a time so we can print the name
842 // of the test before we run it. Useful for debugging tests
843 // that hang forever.
844 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
846 run_test(opts, !opts.run_tests, test, tx.clone());
850 let (desc, result, stdout) = rx.recv().unwrap();
851 if concurrency != 1 {
852 try!(callback(TeWait(desc.clone(), PadNone)));
854 try!(callback(TeResult(desc, result, stdout)));
858 if opts.bench_benchmarks {
859 // All benchmarks run at the end, in serial.
860 // (this includes metric fns)
861 for b in filtered_benchs_and_metrics {
862 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
863 run_test(opts, false, b, tx.clone());
864 let (test, result, stdout) = rx.recv().unwrap();
865 try!(callback(TeResult(test, result, stdout)));
872 fn get_concurrency() -> usize {
873 return match env::var("RUST_TEST_THREADS") {
875 let opt_n: Option<usize> = s.parse().ok();
877 Some(n) if n > 0 => n,
878 _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
881 Err(..) => num_cpus(),
885 fn num_cpus() -> usize {
887 let mut sysinfo = std::mem::zeroed();
888 libc::GetSystemInfo(&mut sysinfo);
889 sysinfo.dwNumberOfProcessors as usize
894 fn num_cpus() -> usize {
895 extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
896 unsafe { rust_get_num_cpus() as usize }
900 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
901 let mut filtered = tests;
903 // Remove tests that don't match the test filter
904 filtered = match opts.filter {
906 Some(ref filter) => {
907 filtered.into_iter().filter(|test| {
908 test.desc.name.as_slice().contains(&filter[..])
913 // Maybe pull out the ignored test and unignore them
914 filtered = if !opts.run_ignored {
917 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
918 if test.desc.ignore {
919 let TestDescAndFn {desc, testfn} = test;
921 desc: TestDesc {ignore: false, ..desc},
928 filtered.into_iter().filter_map(|x| filter(x)).collect()
931 // Sort the tests alphabetically
932 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
937 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
938 // convert benchmarks to tests, if we're not benchmarking them
939 tests.into_iter().map(|x| {
940 let testfn = match x.testfn {
941 DynBenchFn(bench) => {
942 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
944 StaticBenchFn(benchfn) => {
945 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
949 TestDescAndFn { desc: x.desc, testfn: testfn }
953 pub fn run_test(opts: &TestOpts,
956 monitor_ch: Sender<MonitorMsg>) {
958 let TestDescAndFn {desc, testfn} = test;
960 if force_ignore || desc.ignore {
961 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
965 fn run_test_inner(desc: TestDesc,
966 monitor_ch: Sender<MonitorMsg>,
968 testfn: Box<FnBox() + Send>) {
969 struct Sink(Arc<Mutex<Vec<u8>>>);
970 impl Write for Sink {
971 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
972 Write::write(&mut *self.0.lock().unwrap(), data)
974 fn flush(&mut self) -> io::Result<()> { Ok(()) }
977 thread::spawn(move || {
978 let data = Arc::new(Mutex::new(Vec::new()));
979 let data2 = data.clone();
980 let cfg = thread::Builder::new().name(match desc.name {
981 DynTestName(ref name) => name.clone().to_string(),
982 StaticTestName(name) => name.to_string(),
985 let result_guard = cfg.spawn(move || {
987 io::set_print(box Sink(data2.clone()));
988 io::set_panic(box Sink(data2));
992 let test_result = calc_result(&desc, result_guard.join());
993 let stdout = data.lock().unwrap().to_vec();
994 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
999 DynBenchFn(bencher) => {
1000 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1001 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1004 StaticBenchFn(benchfn) => {
1005 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1006 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1010 let mut mm = MetricMap::new();
1011 f.call_box((&mut mm,));
1012 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1015 StaticMetricFn(f) => {
1016 let mut mm = MetricMap::new();
1018 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1021 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1022 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1023 Box::new(move|| f()))
1027 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1028 match (&desc.should_panic, task_result) {
1029 (&ShouldPanic::No, Ok(())) |
1030 (&ShouldPanic::Yes, Err(_)) => TrOk,
1031 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1032 if err.downcast_ref::<String>()
1034 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1035 .map(|e| e.contains(msg))
1036 .unwrap_or(false) => TrOk,
1043 pub fn new() -> MetricMap {
1044 MetricMap(BTreeMap::new())
1047 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1048 /// must be non-negative. The `noise` indicates the uncertainty of the
1049 /// metric, which doubles as the "noise range" of acceptable
1050 /// pairwise-regressions on this named value, when comparing from one
1051 /// metric to the next using `compare_to_old`.
1053 /// If `noise` is positive, then it means this metric is of a value
1054 /// you want to see grow smaller, so a change larger than `noise` in the
1055 /// positive direction represents a regression.
1057 /// If `noise` is negative, then it means this metric is of a value
1058 /// you want to see grow larger, so a change larger than `noise` in the
1059 /// negative direction represents a regression.
1060 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1065 let MetricMap(ref mut map) = *self;
1066 map.insert(name.to_string(), m);
1069 pub fn fmt_metrics(&self) -> String {
1070 let MetricMap(ref mm) = *self;
1071 let v : Vec<String> = mm.iter()
1072 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1082 /// A function that is opaque to the optimizer, to allow benchmarks to
1083 /// pretend to use outputs to assist in avoiding dead-code
1086 /// This function is a no-op, and does not even read from `dummy`.
1087 pub fn black_box<T>(dummy: T) -> T {
1088 // we need to "use" the argument in some way LLVM can't
1090 unsafe {asm!("" : : "r"(&dummy))}
1096 /// Callback for benchmark functions to run in their body.
1097 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1098 self.dur = Duration::span(|| {
1099 let k = self.iterations;
1106 pub fn ns_elapsed(&mut self) -> u64 {
1107 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1110 pub fn ns_per_iter(&mut self) -> u64 {
1111 if self.iterations == 0 {
1114 self.ns_elapsed() / cmp::max(self.iterations, 1)
1118 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1119 self.iterations = n;
1123 // This is a more statistics-driven benchmark algorithm
1124 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary where F: FnMut(&mut Bencher) {
1125 // Initial bench run to get ballpark figure.
1127 self.bench_n(n, |x| f(x));
1129 // Try to estimate iter count for 1ms falling back to 1m
1130 // iterations if first run took < 1ns.
1131 if self.ns_per_iter() == 0 {
1134 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1136 // if the first run took more than 1ms we don't want to just
1137 // be left doing 0 iterations on every loop. The unfortunate
1138 // side effect of not being able to do as many runs is
1139 // automatically handled by the statistical analysis below
1140 // (i.e. larger error bars).
1141 if n == 0 { n = 1; }
1143 let mut total_run = Duration::new(0, 0);
1144 let samples : &mut [f64] = &mut [0.0_f64; 50];
1146 let mut summ = None;
1147 let mut summ5 = None;
1149 let loop_run = Duration::span(|| {
1151 for p in &mut *samples {
1152 self.bench_n(n, |x| f(x));
1153 *p = self.ns_per_iter() as f64;
1156 stats::winsorize(samples, 5.0);
1157 summ = Some(stats::Summary::new(samples));
1159 for p in &mut *samples {
1160 self.bench_n(5 * n, |x| f(x));
1161 *p = self.ns_per_iter() as f64;
1164 stats::winsorize(samples, 5.0);
1165 summ5 = Some(stats::Summary::new(samples));
1167 let summ = summ.unwrap();
1168 let summ5 = summ5.unwrap();
1170 // If we've run for 100ms and seem to have converged to a
1172 if loop_run > Duration::from_millis(100) &&
1173 summ.median_abs_dev_pct < 1.0 &&
1174 summ.median - summ5.median < summ5.median_abs_dev {
1178 total_run = total_run + loop_run;
1179 // Longest we ever run for is 3s.
1180 if total_run > Duration::from_secs(3) {
1184 // If we overflow here just return the results so far. We check a
1185 // multiplier of 10 because we're about to multiply by 2 and the
1186 // next iteration of the loop will also multiply by 5 (to calculate
1187 // the summ5 result)
1188 n = match n.checked_mul(10) {
1190 None => return summ5,
1198 use std::time::Duration;
1199 use super::{Bencher, BenchSamples};
1201 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1202 let mut bs = Bencher {
1204 dur: Duration::new(0, 0),
1208 let ns_iter_summ = bs.auto_bench(f);
1210 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1211 let iter_s = 1_000_000_000 / ns_iter;
1212 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1215 ns_iter_summ: ns_iter_summ,
1220 pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
1221 let mut bs = Bencher {
1223 dur: Duration::new(0, 0),
1232 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1233 TestDesc, TestDescAndFn, TestOpts, run_test,
1235 StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1236 use std::sync::mpsc::channel;
1239 pub fn do_not_run_ignored_tests() {
1240 fn f() { panic!(); }
1241 let desc = TestDescAndFn {
1243 name: StaticTestName("whatever"),
1245 should_panic: ShouldPanic::No,
1247 testfn: DynTestFn(Box::new(move|| f())),
1249 let (tx, rx) = channel();
1250 run_test(&TestOpts::new(), false, desc, tx);
1251 let (_, res, _) = rx.recv().unwrap();
1252 assert!(res != TrOk);
1256 pub fn ignored_tests_result_in_ignored() {
1258 let desc = TestDescAndFn {
1260 name: StaticTestName("whatever"),
1262 should_panic: ShouldPanic::No,
1264 testfn: DynTestFn(Box::new(move|| f())),
1266 let (tx, rx) = channel();
1267 run_test(&TestOpts::new(), false, desc, tx);
1268 let (_, res, _) = rx.recv().unwrap();
1269 assert!(res == TrIgnored);
1273 fn test_should_panic() {
1274 fn f() { panic!(); }
1275 let desc = TestDescAndFn {
1277 name: StaticTestName("whatever"),
1279 should_panic: ShouldPanic::Yes,
1281 testfn: DynTestFn(Box::new(move|| f())),
1283 let (tx, rx) = channel();
1284 run_test(&TestOpts::new(), false, desc, tx);
1285 let (_, res, _) = rx.recv().unwrap();
1286 assert!(res == TrOk);
1290 fn test_should_panic_good_message() {
1291 fn f() { panic!("an error message"); }
1292 let desc = TestDescAndFn {
1294 name: StaticTestName("whatever"),
1296 should_panic: ShouldPanic::YesWithMessage("error message"),
1298 testfn: DynTestFn(Box::new(move|| f())),
1300 let (tx, rx) = channel();
1301 run_test(&TestOpts::new(), false, desc, tx);
1302 let (_, res, _) = rx.recv().unwrap();
1303 assert!(res == TrOk);
1307 fn test_should_panic_bad_message() {
1308 fn f() { panic!("an error message"); }
1309 let desc = TestDescAndFn {
1311 name: StaticTestName("whatever"),
1313 should_panic: ShouldPanic::YesWithMessage("foobar"),
1315 testfn: DynTestFn(Box::new(move|| f())),
1317 let (tx, rx) = channel();
1318 run_test(&TestOpts::new(), false, desc, tx);
1319 let (_, res, _) = rx.recv().unwrap();
1320 assert!(res == TrFailed);
1324 fn test_should_panic_but_succeeds() {
1326 let desc = TestDescAndFn {
1328 name: StaticTestName("whatever"),
1330 should_panic: ShouldPanic::Yes,
1332 testfn: DynTestFn(Box::new(move|| f())),
1334 let (tx, rx) = channel();
1335 run_test(&TestOpts::new(), false, desc, tx);
1336 let (_, res, _) = rx.recv().unwrap();
1337 assert!(res == TrFailed);
1341 fn parse_ignored_flag() {
1342 let args = vec!("progname".to_string(),
1343 "filter".to_string(),
1344 "--ignored".to_string());
1345 let opts = match parse_opts(&args) {
1347 _ => panic!("Malformed arg in parse_ignored_flag")
1349 assert!((opts.run_ignored));
1353 pub fn filter_for_ignored_option() {
1354 // When we run ignored tests the test filter should filter out all the
1355 // unignored tests and flip the ignore flag on the rest to false
1357 let mut opts = TestOpts::new();
1358 opts.run_tests = true;
1359 opts.run_ignored = true;
1364 name: StaticTestName("1"),
1366 should_panic: ShouldPanic::No,
1368 testfn: DynTestFn(Box::new(move|| {})),
1372 name: StaticTestName("2"),
1374 should_panic: ShouldPanic::No,
1376 testfn: DynTestFn(Box::new(move|| {})),
1378 let filtered = filter_tests(&opts, tests);
1380 assert_eq!(filtered.len(), 1);
1381 assert_eq!(filtered[0].desc.name.to_string(),
1383 assert!(filtered[0].desc.ignore == false);
1387 pub fn sort_tests() {
1388 let mut opts = TestOpts::new();
1389 opts.run_tests = true;
1392 vec!("sha1::test".to_string(),
1393 "isize::test_to_str".to_string(),
1394 "isize::test_pow".to_string(),
1395 "test::do_not_run_ignored_tests".to_string(),
1396 "test::ignored_tests_result_in_ignored".to_string(),
1397 "test::first_free_arg_should_be_a_filter".to_string(),
1398 "test::parse_ignored_flag".to_string(),
1399 "test::filter_for_ignored_option".to_string(),
1400 "test::sort_tests".to_string());
1404 let mut tests = Vec::new();
1405 for name in &names {
1406 let test = TestDescAndFn {
1408 name: DynTestName((*name).clone()),
1410 should_panic: ShouldPanic::No,
1412 testfn: DynTestFn(Box::new(testfn)),
1418 let filtered = filter_tests(&opts, tests);
1421 vec!("isize::test_pow".to_string(),
1422 "isize::test_to_str".to_string(),
1423 "sha1::test".to_string(),
1424 "test::do_not_run_ignored_tests".to_string(),
1425 "test::filter_for_ignored_option".to_string(),
1426 "test::first_free_arg_should_be_a_filter".to_string(),
1427 "test::ignored_tests_result_in_ignored".to_string(),
1428 "test::parse_ignored_flag".to_string(),
1429 "test::sort_tests".to_string());
1431 for (a, b) in expected.iter().zip(filtered) {
1432 assert!(*a == b.desc.name.to_string());
1437 pub fn test_metricmap_compare() {
1438 let mut m1 = MetricMap::new();
1439 let mut m2 = MetricMap::new();
1440 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1441 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1443 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1444 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1446 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1447 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1449 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1450 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1452 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1453 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1455 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1456 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);