1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
34 #![cfg_attr(not(stage0), deny(warnings))]
37 #![feature(box_syntax)]
40 #![feature(rustc_private)]
41 #![feature(set_stdio)]
42 #![feature(staged_api)]
43 #![feature(question_mark)]
44 #![feature(panic_unwind)]
49 extern crate panic_unwind;
51 pub use self::TestFn::*;
52 pub use self::ColorConfig::*;
53 pub use self::TestResult::*;
54 pub use self::TestName::*;
55 use self::TestEvent::*;
56 use self::NamePadding::*;
57 use self::OutputLocation::*;
59 use std::boxed::FnBox;
63 use std::collections::BTreeMap;
67 use std::io::prelude::*;
69 use std::iter::repeat;
70 use std::path::PathBuf;
71 use std::sync::mpsc::{channel, Sender};
72 use std::sync::{Arc, Mutex};
74 use std::time::{Instant, Duration};
76 const TEST_WARN_TIMEOUT_S: u64 = 60;
78 // to be used by rustc to compile tests in libtest
80 pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
81 TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName, DynTestName,
82 DynTestFn, run_test, test_main, test_main_static, filter_tests, parse_opts,
83 StaticBenchFn, ShouldPanic};
88 // The name of a test. By convention this follows the rules for rust
89 // paths; i.e. it should be a series of identifiers separated by double
90 // colons. This way if some test runner wants to arrange the tests
91 // hierarchically it may.
93 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
95 StaticTestName(&'static str),
99 fn as_slice(&self) -> &str {
101 StaticTestName(s) => s,
102 DynTestName(ref s) => s,
106 impl fmt::Display for TestName {
107 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
108 fmt::Display::fmt(self.as_slice(), f)
112 #[derive(Clone, Copy, PartialEq, Eq)]
119 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
120 let mut name = String::from(self.name.as_slice());
121 let fill = column_count.saturating_sub(name.len());
122 let pad = repeat(" ").take(fill).collect::<String>();
133 /// Represents a benchmark function.
134 pub trait TDynBenchFn: Send {
135 fn run(&self, harness: &mut Bencher);
138 // A function that runs a test. If the function returns successfully,
139 // the test succeeds; if the function panics then the test fails. We
140 // may need to come up with a more clever definition of test in order
141 // to support isolation of tests into threads.
144 StaticBenchFn(fn(&mut Bencher)),
145 StaticMetricFn(fn(&mut MetricMap)),
146 DynTestFn(Box<FnBox() + Send>),
147 DynMetricFn(Box<FnBox(&mut MetricMap) + Send>),
148 DynBenchFn(Box<TDynBenchFn + 'static>),
152 fn padding(&self) -> NamePadding {
154 StaticTestFn(..) => PadNone,
155 StaticBenchFn(..) => PadOnRight,
156 StaticMetricFn(..) => PadOnRight,
157 DynTestFn(..) => PadNone,
158 DynMetricFn(..) => PadOnRight,
159 DynBenchFn(..) => PadOnRight,
164 impl fmt::Debug for TestFn {
165 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
166 f.write_str(match *self {
167 StaticTestFn(..) => "StaticTestFn(..)",
168 StaticBenchFn(..) => "StaticBenchFn(..)",
169 StaticMetricFn(..) => "StaticMetricFn(..)",
170 DynTestFn(..) => "DynTestFn(..)",
171 DynMetricFn(..) => "DynMetricFn(..)",
172 DynBenchFn(..) => "DynBenchFn(..)",
177 /// Manager of the benchmarking runs.
179 /// This is fed into functions marked with `#[bench]` to allow for
180 /// set-up & tear-down before running a piece of code repeatedly via a
182 #[derive(Copy, Clone)]
189 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
190 pub enum ShouldPanic {
193 YesWithMessage(&'static str),
196 // The definition of a single test. A test runner will run a list of
198 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
199 pub struct TestDesc {
202 pub should_panic: ShouldPanic,
206 pub struct TestPaths {
207 pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
208 pub base: PathBuf, // e.g., compile-test, auxiliary
209 pub relative_dir: PathBuf, // e.g., foo/bar
213 pub struct TestDescAndFn {
218 #[derive(Clone, PartialEq, Debug, Copy)]
225 pub fn new(value: f64, noise: f64) -> Metric {
234 pub struct MetricMap(BTreeMap<String, Metric>);
236 impl Clone for MetricMap {
237 fn clone(&self) -> MetricMap {
238 let MetricMap(ref map) = *self;
239 MetricMap(map.clone())
243 // The default console test runner. It accepts the command line
244 // arguments and a vector of test_descs.
245 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>) {
246 let opts = match parse_opts(args) {
248 Some(Err(msg)) => panic!("{:?}", msg),
251 match run_tests_console(&opts, tests) {
253 Ok(false) => std::process::exit(101),
254 Err(e) => panic!("io error when running tests: {:?}", e),
258 // A variant optimized for invocation with a static test vector.
259 // This will panic (intentionally) when fed any dynamic tests, because
260 // it is copying the static values out into a dynamic vector and cannot
261 // copy dynamic values. It is doing this because from this point on
262 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
263 // semantics into parallel test runners, which in turn requires a Vec<>
264 // rather than a &[].
265 pub fn test_main_static(tests: &[TestDescAndFn]) {
266 let args = env::args().collect::<Vec<_>>();
267 let owned_tests = tests.iter()
272 testfn: StaticTestFn(f),
273 desc: t.desc.clone(),
276 StaticBenchFn(f) => {
278 testfn: StaticBenchFn(f),
279 desc: t.desc.clone(),
282 _ => panic!("non-static tests passed to test::test_main_static"),
286 test_main(&args, owned_tests)
289 #[derive(Copy, Clone)]
290 pub enum ColorConfig {
296 pub struct TestOpts {
297 pub filter: Option<String>,
298 pub run_ignored: bool,
300 pub bench_benchmarks: bool,
301 pub logfile: Option<PathBuf>,
303 pub color: ColorConfig,
305 pub test_threads: Option<usize>,
310 fn new() -> TestOpts {
315 bench_benchmarks: false,
325 /// Result of parsing the options.
326 pub type OptRes = Result<TestOpts, String>;
328 #[cfg_attr(rustfmt, rustfmt_skip)]
329 fn optgroups() -> Vec<getopts::OptGroup> {
330 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
331 getopts::optflag("", "test", "Run tests and not benchmarks"),
332 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
333 getopts::optflag("h", "help", "Display this message (longer with --help)"),
334 getopts::optopt("", "logfile", "Write logs to the specified file instead \
336 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
337 task, allow printing directly"),
338 getopts::optopt("", "test-threads", "Number of threads used for running tests \
339 in parallel", "n_threads"),
340 getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
341 getopts::optopt("", "color", "Configure coloring of output:
342 auto = colorize if stdout is a tty and tests are run on serially (default);
343 always = always colorize output;
344 never = never colorize output;", "auto|always|never"))
347 fn usage(binary: &str) {
348 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
351 The FILTER string is tested against the name of all tests, and only those
352 tests whose names contain the filter are run.
354 By default, all tests are run in parallel. This can be altered with the
355 --test-threads flag or the RUST_TEST_THREADS environment variable when running
358 All tests have their standard output and standard error captured by default.
359 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
360 environment variable to a value other than "0". Logging is not captured by default.
364 #[test] - Indicates a function is a test to be run. This function
366 #[bench] - Indicates a function is a benchmark to be run. This
367 function takes one argument (test::Bencher).
368 #[should_panic] - This function (also labeled with #[test]) will only pass if
369 the code causes a panic (an assertion failure or panic!)
370 A message may be provided, which the failure string must
371 contain: #[should_panic(expected = "foo")].
372 #[ignore] - When applied to a function which is already attributed as a
373 test, then the test runner will ignore these tests during
374 normal test runs. Running with --ignored will run these
376 usage = getopts::usage(&message, &optgroups()));
379 // Parses command line arguments into test options
380 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
381 let args_ = &args[1..];
382 let matches = match getopts::getopts(args_, &optgroups()) {
384 Err(f) => return Some(Err(f.to_string())),
387 if matches.opt_present("h") {
392 let filter = if !matches.free.is_empty() {
393 Some(matches.free[0].clone())
398 let run_ignored = matches.opt_present("ignored");
399 let quiet = matches.opt_present("quiet");
401 let logfile = matches.opt_str("logfile");
402 let logfile = logfile.map(|s| PathBuf::from(&s));
404 let bench_benchmarks = matches.opt_present("bench");
405 let run_tests = !bench_benchmarks || matches.opt_present("test");
407 let mut nocapture = matches.opt_present("nocapture");
409 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
410 Ok(val) => &val != "0",
415 let test_threads = match matches.opt_str("test-threads") {
417 match n_str.parse::<usize>() {
420 return Some(Err(format!("argument for --test-threads must be a number > 0 \
427 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
428 Some("auto") | None => AutoColor,
429 Some("always") => AlwaysColor,
430 Some("never") => NeverColor,
433 return Some(Err(format!("argument for --color must be auto, always, or never (was \
439 let test_opts = TestOpts {
441 run_ignored: run_ignored,
442 run_tests: run_tests,
443 bench_benchmarks: bench_benchmarks,
445 nocapture: nocapture,
448 test_threads: test_threads,
454 #[derive(Clone, PartialEq)]
455 pub struct BenchSamples {
456 ns_iter_summ: stats::Summary,
460 #[derive(Clone, PartialEq)]
461 pub enum TestResult {
465 TrMetrics(MetricMap),
466 TrBench(BenchSamples),
469 unsafe impl Send for TestResult {}
471 enum OutputLocation<T> {
472 Pretty(Box<term::StdoutTerminal>),
476 struct ConsoleTestState<T> {
477 log_out: Option<File>,
478 out: OutputLocation<T>,
487 failures: Vec<(TestDesc, Vec<u8>)>,
488 max_name_len: usize, // number of columns to fill when aligning names
491 impl<T: Write> ConsoleTestState<T> {
492 pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
493 let log_out = match opts.logfile {
494 Some(ref path) => Some(File::create(path)?),
497 let out = match term::stdout() {
498 None => Raw(io::stdout()),
499 Some(t) => Pretty(t),
502 Ok(ConsoleTestState {
505 use_color: use_color(opts),
512 metrics: MetricMap::new(),
513 failures: Vec::new(),
518 pub fn write_ok(&mut self) -> io::Result<()> {
519 self.write_short_result("ok", ".", term::color::GREEN)
522 pub fn write_failed(&mut self) -> io::Result<()> {
523 self.write_short_result("FAILED", "F", term::color::RED)
526 pub fn write_ignored(&mut self) -> io::Result<()> {
527 self.write_short_result("ignored", "i", term::color::YELLOW)
530 pub fn write_metric(&mut self) -> io::Result<()> {
531 self.write_pretty("metric", term::color::CYAN)
534 pub fn write_bench(&mut self) -> io::Result<()> {
535 self.write_pretty("bench", term::color::CYAN)
538 pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
541 self.write_pretty(quiet, color)
543 self.write_pretty(verbose, color)?;
544 self.write_plain("\n")
548 pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
550 Pretty(ref mut term) => {
554 term.write_all(word.as_bytes())?;
560 Raw(ref mut stdout) => {
561 stdout.write_all(word.as_bytes())?;
567 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
569 Pretty(ref mut term) => {
570 term.write_all(s.as_bytes())?;
573 Raw(ref mut stdout) => {
574 stdout.write_all(s.as_bytes())?;
580 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
582 let noun = if len != 1 {
587 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
590 pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
591 if self.quiet && align != PadOnRight {
594 let name = test.padded_name(self.max_name_len, align);
595 self.write_plain(&format!("test {} ... ", name))
599 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
601 TrOk => self.write_ok(),
602 TrFailed => self.write_failed(),
603 TrIgnored => self.write_ignored(),
604 TrMetrics(ref mm) => {
605 self.write_metric()?;
606 self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
610 self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
615 pub fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
616 self.write_plain(&format!("test {} has been running for over {} seconds\n",
618 TEST_WARN_TIMEOUT_S))
621 pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
625 let s = format!("{} {}\n",
627 TrOk => "ok".to_owned(),
628 TrFailed => "failed".to_owned(),
629 TrIgnored => "ignored".to_owned(),
630 TrMetrics(ref mm) => mm.fmt_metrics(),
631 TrBench(ref bs) => fmt_bench_samples(bs),
634 o.write_all(s.as_bytes())
639 pub fn write_failures(&mut self) -> io::Result<()> {
640 self.write_plain("\nfailures:\n")?;
641 let mut failures = Vec::new();
642 let mut fail_out = String::new();
643 for &(ref f, ref stdout) in &self.failures {
644 failures.push(f.name.to_string());
645 if !stdout.is_empty() {
646 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
647 let output = String::from_utf8_lossy(stdout);
648 fail_out.push_str(&output);
649 fail_out.push_str("\n");
652 if !fail_out.is_empty() {
653 self.write_plain("\n")?;
654 self.write_plain(&fail_out)?;
657 self.write_plain("\nfailures:\n")?;
659 for name in &failures {
660 self.write_plain(&format!(" {}\n", name))?;
665 pub fn write_run_finish(&mut self) -> io::Result<bool> {
666 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
668 let success = self.failed == 0;
670 self.write_failures()?;
673 self.write_plain("\ntest result: ")?;
675 // There's no parallelism at this point so it's safe to use color
676 self.write_pretty("ok", term::color::GREEN)?;
678 self.write_pretty("FAILED", term::color::RED)?;
680 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
685 self.write_plain(&s)?;
690 // Format a number with thousands separators
691 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
693 let mut output = String::new();
694 let mut trailing = false;
695 for &pow in &[9, 6, 3, 0] {
696 let base = 10_usize.pow(pow);
697 if pow == 0 || trailing || n / base != 0 {
699 output.write_fmt(format_args!("{}", n / base)).unwrap();
701 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
714 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
716 let mut output = String::new();
718 let median = bs.ns_iter_summ.median as usize;
719 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
721 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
722 fmt_thousands_sep(median, ','),
723 fmt_thousands_sep(deviation, ',')))
726 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
731 // A simple console test runner
732 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
734 fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
735 match (*event).clone() {
736 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
737 TeWait(ref test, padding) => st.write_test_start(test, padding),
738 TeTimeout(ref test) => st.write_timeout(test),
739 TeResult(test, result, stdout) => {
740 st.write_log(&test, &result)?;
741 st.write_result(&result)?;
743 TrOk => st.passed += 1,
744 TrIgnored => st.ignored += 1,
746 let tname = test.name;
747 let MetricMap(mm) = mm;
750 .insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
755 st.metrics.insert_metric(test.name.as_slice(),
756 bs.ns_iter_summ.median,
757 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
762 st.failures.push((test, stdout));
770 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
771 fn len_if_padded(t: &TestDescAndFn) -> usize {
772 match t.testfn.padding() {
774 PadOnRight => t.desc.name.as_slice().len(),
777 if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
778 let n = t.desc.name.as_slice();
779 st.max_name_len = n.len();
781 run_tests(opts, tests, |x| callback(&x, &mut st))?;
782 return st.write_run_finish();
786 fn should_sort_failures_before_printing_them() {
787 let test_a = TestDesc {
788 name: StaticTestName("a"),
790 should_panic: ShouldPanic::No,
793 let test_b = TestDesc {
794 name: StaticTestName("b"),
796 should_panic: ShouldPanic::No,
799 let mut st = ConsoleTestState {
801 out: Raw(Vec::new()),
810 metrics: MetricMap::new(),
811 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
814 st.write_failures().unwrap();
815 let s = match st.out {
816 Raw(ref m) => String::from_utf8_lossy(&m[..]),
817 Pretty(_) => unreachable!(),
820 let apos = s.find("a").unwrap();
821 let bpos = s.find("b").unwrap();
822 assert!(apos < bpos);
825 fn use_color(opts: &TestOpts) -> bool {
827 AutoColor => !opts.nocapture && stdout_isatty(),
834 fn stdout_isatty() -> bool {
835 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
838 fn stdout_isatty() -> bool {
841 type HANDLE = *mut u8;
842 type LPDWORD = *mut u32;
843 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
845 fn GetStdHandle(which: DWORD) -> HANDLE;
846 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
849 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
851 GetConsoleMode(handle, &mut out) != 0
857 TeFiltered(Vec<TestDesc>),
858 TeWait(TestDesc, NamePadding),
859 TeResult(TestDesc, TestResult, Vec<u8>),
863 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
866 fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
867 where F: FnMut(TestEvent) -> io::Result<()>
869 use std::collections::HashMap;
870 use std::sync::mpsc::RecvTimeoutError;
872 let mut filtered_tests = filter_tests(opts, tests);
873 if !opts.bench_benchmarks {
874 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
877 let filtered_descs = filtered_tests.iter()
878 .map(|t| t.desc.clone())
881 callback(TeFiltered(filtered_descs))?;
883 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
884 filtered_tests.into_iter().partition(|e| {
886 StaticTestFn(_) | DynTestFn(_) => true,
891 let concurrency = match opts.test_threads {
893 None => get_concurrency(),
896 let mut remaining = filtered_tests;
900 let (tx, rx) = channel::<MonitorMsg>();
902 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
904 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
905 let now = Instant::now();
906 let timed_out = running_tests.iter()
907 .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone())} else { None })
909 for test in &timed_out {
910 running_tests.remove(test);
915 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
916 running_tests.values().min().map(|next_timeout| {
917 let now = Instant::now();
918 if *next_timeout >= now {
925 while pending > 0 || !remaining.is_empty() {
926 while pending < concurrency && !remaining.is_empty() {
927 let test = remaining.pop().unwrap();
928 if concurrency == 1 {
929 // We are doing one test at a time so we can print the name
930 // of the test before we run it. Useful for debugging tests
931 // that hang forever.
932 callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
934 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
935 running_tests.insert(test.desc.clone(), timeout);
936 run_test(opts, !opts.run_tests, test, tx.clone());
942 if let Some(timeout) = calc_timeout(&running_tests) {
943 res = rx.recv_timeout(timeout);
944 for test in get_timed_out_tests(&mut running_tests) {
945 callback(TeTimeout(test))?;
947 if res != Err(RecvTimeoutError::Timeout) {
951 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
956 let (desc, result, stdout) = res.unwrap();
957 running_tests.remove(&desc);
959 if concurrency != 1 {
960 callback(TeWait(desc.clone(), PadNone))?;
962 callback(TeResult(desc, result, stdout))?;
966 if opts.bench_benchmarks {
967 // All benchmarks run at the end, in serial.
968 // (this includes metric fns)
969 for b in filtered_benchs_and_metrics {
970 callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
971 run_test(opts, false, b, tx.clone());
972 let (test, result, stdout) = rx.recv().unwrap();
973 callback(TeResult(test, result, stdout))?;
980 fn get_concurrency() -> usize {
981 return match env::var("RUST_TEST_THREADS") {
983 let opt_n: Option<usize> = s.parse().ok();
985 Some(n) if n > 0 => n,
987 panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
992 Err(..) => num_cpus(),
997 fn num_cpus() -> usize {
1000 wProcessorArchitecture: u16,
1003 lpMinimumApplicationAddress: *mut u8,
1004 lpMaximumApplicationAddress: *mut u8,
1005 dwActiveProcessorMask: *mut u8,
1006 dwNumberOfProcessors: u32,
1007 dwProcessorType: u32,
1008 dwAllocationGranularity: u32,
1009 wProcessorLevel: u16,
1010 wProcessorRevision: u16,
1013 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1016 let mut sysinfo = std::mem::zeroed();
1017 GetSystemInfo(&mut sysinfo);
1018 sysinfo.dwNumberOfProcessors as usize
1022 #[cfg(any(target_os = "linux",
1023 target_os = "macos",
1025 target_os = "android",
1026 target_os = "solaris",
1027 target_os = "emscripten"))]
1028 fn num_cpus() -> usize {
1029 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1032 #[cfg(any(target_os = "freebsd",
1033 target_os = "dragonfly",
1034 target_os = "bitrig",
1035 target_os = "netbsd"))]
1036 fn num_cpus() -> usize {
1039 let mut cpus: libc::c_uint = 0;
1040 let mut cpus_size = std::mem::size_of_val(&cpus);
1043 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1046 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1048 libc::sysctl(mib.as_mut_ptr(),
1050 &mut cpus as *mut _ as *mut _,
1051 &mut cpus_size as *mut _ as *mut _,
1062 #[cfg(target_os = "openbsd")]
1063 fn num_cpus() -> usize {
1066 let mut cpus: libc::c_uint = 0;
1067 let mut cpus_size = std::mem::size_of_val(&cpus);
1068 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1071 libc::sysctl(mib.as_mut_ptr(),
1073 &mut cpus as *mut _ as *mut _,
1074 &mut cpus_size as *mut _ as *mut _,
1084 #[cfg(target_os = "haiku")]
1085 fn num_cpus() -> usize {
1091 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1092 let mut filtered = tests;
1094 // Remove tests that don't match the test filter
1095 filtered = match opts.filter {
1097 Some(ref filter) => {
1098 filtered.into_iter()
1099 .filter(|test| test.desc.name.as_slice().contains(&filter[..]))
1104 // Maybe pull out the ignored test and unignore them
1105 filtered = if !opts.run_ignored {
1108 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1109 if test.desc.ignore {
1110 let TestDescAndFn {desc, testfn} = test;
1111 Some(TestDescAndFn {
1112 desc: TestDesc { ignore: false, ..desc },
1119 filtered.into_iter().filter_map(filter).collect()
1122 // Sort the tests alphabetically
1123 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1128 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1129 // convert benchmarks to tests, if we're not benchmarking them
1132 let testfn = match x.testfn {
1133 DynBenchFn(bench) => {
1134 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
1136 StaticBenchFn(benchfn) => {
1137 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
1149 pub fn run_test(opts: &TestOpts,
1151 test: TestDescAndFn,
1152 monitor_ch: Sender<MonitorMsg>) {
1154 let TestDescAndFn {desc, testfn} = test;
1156 if force_ignore || desc.ignore {
1157 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1161 fn run_test_inner(desc: TestDesc,
1162 monitor_ch: Sender<MonitorMsg>,
1164 testfn: Box<FnBox() + Send>) {
1165 struct Sink(Arc<Mutex<Vec<u8>>>);
1166 impl Write for Sink {
1167 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1168 Write::write(&mut *self.0.lock().unwrap(), data)
1170 fn flush(&mut self) -> io::Result<()> {
1175 thread::spawn(move || {
1176 let data = Arc::new(Mutex::new(Vec::new()));
1177 let data2 = data.clone();
1178 let cfg = thread::Builder::new().name(match desc.name {
1179 DynTestName(ref name) => name.clone(),
1180 StaticTestName(name) => name.to_owned(),
1183 let result_guard = cfg.spawn(move || {
1185 io::set_print(box Sink(data2.clone()));
1186 io::set_panic(box Sink(data2));
1191 let test_result = calc_result(&desc, result_guard.join());
1192 let stdout = data.lock().unwrap().to_vec();
1193 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1198 DynBenchFn(bencher) => {
1199 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1200 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1203 StaticBenchFn(benchfn) => {
1204 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1205 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1209 let mut mm = MetricMap::new();
1210 f.call_box((&mut mm,));
1211 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1214 StaticMetricFn(f) => {
1215 let mut mm = MetricMap::new();
1217 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1220 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1221 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(f)),
1225 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
1226 match (&desc.should_panic, task_result) {
1227 (&ShouldPanic::No, Ok(())) |
1228 (&ShouldPanic::Yes, Err(_)) => TrOk,
1229 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1230 if err.downcast_ref::<String>()
1232 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1233 .map(|e| e.contains(msg))
1234 .unwrap_or(false) => TrOk,
1240 pub fn new() -> MetricMap {
1241 MetricMap(BTreeMap::new())
1244 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1245 /// must be non-negative. The `noise` indicates the uncertainty of the
1246 /// metric, which doubles as the "noise range" of acceptable
1247 /// pairwise-regressions on this named value, when comparing from one
1248 /// metric to the next using `compare_to_old`.
1250 /// If `noise` is positive, then it means this metric is of a value
1251 /// you want to see grow smaller, so a change larger than `noise` in the
1252 /// positive direction represents a regression.
1254 /// If `noise` is negative, then it means this metric is of a value
1255 /// you want to see grow larger, so a change larger than `noise` in the
1256 /// negative direction represents a regression.
1257 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1262 let MetricMap(ref mut map) = *self;
1263 map.insert(name.to_owned(), m);
1266 pub fn fmt_metrics(&self) -> String {
1267 let MetricMap(ref mm) = *self;
1268 let v: Vec<String> = mm.iter()
1269 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1278 /// A function that is opaque to the optimizer, to allow benchmarks to
1279 /// pretend to use outputs to assist in avoiding dead-code
1282 /// This function is a no-op, and does not even read from `dummy`.
1283 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1284 target_arch = "asmjs")))]
1285 pub fn black_box<T>(dummy: T) -> T {
1286 // we need to "use" the argument in some way LLVM can't
1288 unsafe { asm!("" : : "r"(&dummy)) }
1291 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1292 target_arch = "asmjs"))]
1294 pub fn black_box<T>(dummy: T) -> T {
1300 /// Callback for benchmark functions to run in their body.
1301 pub fn iter<T, F>(&mut self, mut inner: F)
1302 where F: FnMut() -> T
1304 let start = Instant::now();
1305 let k = self.iterations;
1309 self.dur = start.elapsed();
1312 pub fn ns_elapsed(&mut self) -> u64 {
1313 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1316 pub fn ns_per_iter(&mut self) -> u64 {
1317 if self.iterations == 0 {
1320 self.ns_elapsed() / cmp::max(self.iterations, 1)
1324 pub fn bench_n<F>(&mut self, n: u64, f: F)
1325 where F: FnOnce(&mut Bencher)
1327 self.iterations = n;
1331 // This is a more statistics-driven benchmark algorithm
1332 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary
1333 where F: FnMut(&mut Bencher)
1335 // Initial bench run to get ballpark figure.
1337 self.bench_n(n, |x| f(x));
1339 // Try to estimate iter count for 1ms falling back to 1m
1340 // iterations if first run took < 1ns.
1341 if self.ns_per_iter() == 0 {
1344 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1346 // if the first run took more than 1ms we don't want to just
1347 // be left doing 0 iterations on every loop. The unfortunate
1348 // side effect of not being able to do as many runs is
1349 // automatically handled by the statistical analysis below
1350 // (i.e. larger error bars).
1355 let mut total_run = Duration::new(0, 0);
1356 let samples: &mut [f64] = &mut [0.0_f64; 50];
1358 let loop_start = Instant::now();
1360 for p in &mut *samples {
1361 self.bench_n(n, |x| f(x));
1362 *p = self.ns_per_iter() as f64;
1365 stats::winsorize(samples, 5.0);
1366 let summ = stats::Summary::new(samples);
1368 for p in &mut *samples {
1369 self.bench_n(5 * n, |x| f(x));
1370 *p = self.ns_per_iter() as f64;
1373 stats::winsorize(samples, 5.0);
1374 let summ5 = stats::Summary::new(samples);
1375 let loop_run = loop_start.elapsed();
1377 // If we've run for 100ms and seem to have converged to a
1379 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
1380 summ.median - summ5.median < summ5.median_abs_dev {
1384 total_run = total_run + loop_run;
1385 // Longest we ever run for is 3s.
1386 if total_run > Duration::from_secs(3) {
1390 // If we overflow here just return the results so far. We check a
1391 // multiplier of 10 because we're about to multiply by 2 and the
1392 // next iteration of the loop will also multiply by 5 (to calculate
1393 // the summ5 result)
1394 n = match n.checked_mul(10) {
1396 None => return summ5,
1404 use std::time::Duration;
1405 use super::{Bencher, BenchSamples};
1407 pub fn benchmark<F>(f: F) -> BenchSamples
1408 where F: FnMut(&mut Bencher)
1410 let mut bs = Bencher {
1412 dur: Duration::new(0, 0),
1416 let ns_iter_summ = bs.auto_bench(f);
1418 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1419 let mb_s = bs.bytes * 1000 / ns_iter;
1422 ns_iter_summ: ns_iter_summ,
1423 mb_s: mb_s as usize,
1427 pub fn run_once<F>(f: F)
1428 where F: FnOnce(&mut Bencher)
1430 let mut bs = Bencher {
1432 dur: Duration::new(0, 0),
1441 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc, TestDescAndFn,
1442 TestOpts, run_test, MetricMap, StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1443 use std::sync::mpsc::channel;
1446 pub fn do_not_run_ignored_tests() {
1450 let desc = TestDescAndFn {
1452 name: StaticTestName("whatever"),
1454 should_panic: ShouldPanic::No,
1456 testfn: DynTestFn(Box::new(move || f())),
1458 let (tx, rx) = channel();
1459 run_test(&TestOpts::new(), false, desc, tx);
1460 let (_, res, _) = rx.recv().unwrap();
1461 assert!(res != TrOk);
1465 pub fn ignored_tests_result_in_ignored() {
1467 let desc = TestDescAndFn {
1469 name: StaticTestName("whatever"),
1471 should_panic: ShouldPanic::No,
1473 testfn: DynTestFn(Box::new(move || f())),
1475 let (tx, rx) = channel();
1476 run_test(&TestOpts::new(), false, desc, tx);
1477 let (_, res, _) = rx.recv().unwrap();
1478 assert!(res == TrIgnored);
1482 fn test_should_panic() {
1486 let desc = TestDescAndFn {
1488 name: StaticTestName("whatever"),
1490 should_panic: ShouldPanic::Yes,
1492 testfn: DynTestFn(Box::new(move || f())),
1494 let (tx, rx) = channel();
1495 run_test(&TestOpts::new(), false, desc, tx);
1496 let (_, res, _) = rx.recv().unwrap();
1497 assert!(res == TrOk);
1501 fn test_should_panic_good_message() {
1503 panic!("an error message");
1505 let desc = TestDescAndFn {
1507 name: StaticTestName("whatever"),
1509 should_panic: ShouldPanic::YesWithMessage("error message"),
1511 testfn: DynTestFn(Box::new(move || f())),
1513 let (tx, rx) = channel();
1514 run_test(&TestOpts::new(), false, desc, tx);
1515 let (_, res, _) = rx.recv().unwrap();
1516 assert!(res == TrOk);
1520 fn test_should_panic_bad_message() {
1522 panic!("an error message");
1524 let desc = TestDescAndFn {
1526 name: StaticTestName("whatever"),
1528 should_panic: ShouldPanic::YesWithMessage("foobar"),
1530 testfn: DynTestFn(Box::new(move || f())),
1532 let (tx, rx) = channel();
1533 run_test(&TestOpts::new(), false, desc, tx);
1534 let (_, res, _) = rx.recv().unwrap();
1535 assert!(res == TrFailed);
1539 fn test_should_panic_but_succeeds() {
1541 let desc = TestDescAndFn {
1543 name: StaticTestName("whatever"),
1545 should_panic: ShouldPanic::Yes,
1547 testfn: DynTestFn(Box::new(move || f())),
1549 let (tx, rx) = channel();
1550 run_test(&TestOpts::new(), false, desc, tx);
1551 let (_, res, _) = rx.recv().unwrap();
1552 assert!(res == TrFailed);
1556 fn parse_ignored_flag() {
1557 let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1558 let opts = match parse_opts(&args) {
1560 _ => panic!("Malformed arg in parse_ignored_flag"),
1562 assert!((opts.run_ignored));
1566 pub fn filter_for_ignored_option() {
1567 // When we run ignored tests the test filter should filter out all the
1568 // unignored tests and flip the ignore flag on the rest to false
1570 let mut opts = TestOpts::new();
1571 opts.run_tests = true;
1572 opts.run_ignored = true;
1574 let tests = vec![TestDescAndFn {
1576 name: StaticTestName("1"),
1578 should_panic: ShouldPanic::No,
1580 testfn: DynTestFn(Box::new(move || {})),
1584 name: StaticTestName("2"),
1586 should_panic: ShouldPanic::No,
1588 testfn: DynTestFn(Box::new(move || {})),
1590 let filtered = filter_tests(&opts, tests);
1592 assert_eq!(filtered.len(), 1);
1593 assert_eq!(filtered[0].desc.name.to_string(), "1");
1594 assert!(!filtered[0].desc.ignore);
1598 pub fn sort_tests() {
1599 let mut opts = TestOpts::new();
1600 opts.run_tests = true;
1602 let names = vec!["sha1::test".to_string(),
1603 "isize::test_to_str".to_string(),
1604 "isize::test_pow".to_string(),
1605 "test::do_not_run_ignored_tests".to_string(),
1606 "test::ignored_tests_result_in_ignored".to_string(),
1607 "test::first_free_arg_should_be_a_filter".to_string(),
1608 "test::parse_ignored_flag".to_string(),
1609 "test::filter_for_ignored_option".to_string(),
1610 "test::sort_tests".to_string()];
1613 let mut tests = Vec::new();
1614 for name in &names {
1615 let test = TestDescAndFn {
1617 name: DynTestName((*name).clone()),
1619 should_panic: ShouldPanic::No,
1621 testfn: DynTestFn(Box::new(testfn)),
1627 let filtered = filter_tests(&opts, tests);
1629 let expected = vec!["isize::test_pow".to_string(),
1630 "isize::test_to_str".to_string(),
1631 "sha1::test".to_string(),
1632 "test::do_not_run_ignored_tests".to_string(),
1633 "test::filter_for_ignored_option".to_string(),
1634 "test::first_free_arg_should_be_a_filter".to_string(),
1635 "test::ignored_tests_result_in_ignored".to_string(),
1636 "test::parse_ignored_flag".to_string(),
1637 "test::sort_tests".to_string()];
1639 for (a, b) in expected.iter().zip(filtered) {
1640 assert!(*a == b.desc.name.to_string());
1645 pub fn test_metricmap_compare() {
1646 let mut m1 = MetricMap::new();
1647 let mut m2 = MetricMap::new();
1648 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1649 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1651 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1652 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1654 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1655 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1657 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1658 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1660 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1661 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1663 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1664 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);