1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
34 #![cfg_attr(not(stage0), deny(warnings))]
37 #![feature(box_syntax)]
40 #![feature(rustc_private)]
41 #![feature(set_stdio)]
42 #![feature(staged_api)]
43 #![feature(question_mark)]
44 #![feature(panic_unwind)]
49 extern crate panic_unwind;
51 pub use self::TestFn::*;
52 pub use self::ColorConfig::*;
53 pub use self::TestResult::*;
54 pub use self::TestName::*;
55 use self::TestEvent::*;
56 use self::NamePadding::*;
57 use self::OutputLocation::*;
59 use std::boxed::FnBox;
63 use std::collections::BTreeMap;
67 use std::io::prelude::*;
69 use std::iter::repeat;
70 use std::path::PathBuf;
71 use std::sync::mpsc::{channel, Sender};
72 use std::sync::{Arc, Mutex};
74 use std::time::{Instant, Duration};
76 const TEST_WARN_TIMEOUT_S: u64 = 60;
78 // to be used by rustc to compile tests in libtest
80 pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
81 TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName, DynTestName,
82 DynTestFn, run_test, test_main, test_main_static, filter_tests, parse_opts,
83 StaticBenchFn, ShouldPanic};
88 // The name of a test. By convention this follows the rules for rust
89 // paths; i.e. it should be a series of identifiers separated by double
90 // colons. This way if some test runner wants to arrange the tests
91 // hierarchically it may.
93 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
95 StaticTestName(&'static str),
99 fn as_slice(&self) -> &str {
101 StaticTestName(s) => s,
102 DynTestName(ref s) => s,
106 impl fmt::Display for TestName {
107 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
108 fmt::Display::fmt(self.as_slice(), f)
112 #[derive(Clone, Copy, PartialEq, Eq)]
119 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
120 let mut name = String::from(self.name.as_slice());
121 let fill = column_count.saturating_sub(name.len());
122 let pad = repeat(" ").take(fill).collect::<String>();
133 /// Represents a benchmark function.
134 pub trait TDynBenchFn: Send {
135 fn run(&self, harness: &mut Bencher);
138 // A function that runs a test. If the function returns successfully,
139 // the test succeeds; if the function panics then the test fails. We
140 // may need to come up with a more clever definition of test in order
141 // to support isolation of tests into threads.
144 StaticBenchFn(fn(&mut Bencher)),
145 StaticMetricFn(fn(&mut MetricMap)),
146 DynTestFn(Box<FnBox() + Send>),
147 DynMetricFn(Box<FnBox(&mut MetricMap) + Send>),
148 DynBenchFn(Box<TDynBenchFn + 'static>),
152 fn padding(&self) -> NamePadding {
154 StaticTestFn(..) => PadNone,
155 StaticBenchFn(..) => PadOnRight,
156 StaticMetricFn(..) => PadOnRight,
157 DynTestFn(..) => PadNone,
158 DynMetricFn(..) => PadOnRight,
159 DynBenchFn(..) => PadOnRight,
164 impl fmt::Debug for TestFn {
165 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
166 f.write_str(match *self {
167 StaticTestFn(..) => "StaticTestFn(..)",
168 StaticBenchFn(..) => "StaticBenchFn(..)",
169 StaticMetricFn(..) => "StaticMetricFn(..)",
170 DynTestFn(..) => "DynTestFn(..)",
171 DynMetricFn(..) => "DynMetricFn(..)",
172 DynBenchFn(..) => "DynBenchFn(..)",
177 /// Manager of the benchmarking runs.
179 /// This is fed into functions marked with `#[bench]` to allow for
180 /// set-up & tear-down before running a piece of code repeatedly via a
182 #[derive(Copy, Clone)]
189 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
190 pub enum ShouldPanic {
193 YesWithMessage(&'static str),
196 // The definition of a single test. A test runner will run a list of
198 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
199 pub struct TestDesc {
202 pub should_panic: ShouldPanic,
206 pub struct TestPaths {
207 pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
208 pub base: PathBuf, // e.g., compile-test, auxiliary
209 pub relative_dir: PathBuf, // e.g., foo/bar
213 pub struct TestDescAndFn {
218 #[derive(Clone, PartialEq, Debug, Copy)]
225 pub fn new(value: f64, noise: f64) -> Metric {
234 pub struct MetricMap(BTreeMap<String, Metric>);
236 impl Clone for MetricMap {
237 fn clone(&self) -> MetricMap {
238 let MetricMap(ref map) = *self;
239 MetricMap(map.clone())
243 // The default console test runner. It accepts the command line
244 // arguments and a vector of test_descs.
245 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>) {
246 let opts = match parse_opts(args) {
248 Some(Err(msg)) => panic!("{:?}", msg),
251 match run_tests_console(&opts, tests) {
253 Ok(false) => std::process::exit(101),
254 Err(e) => panic!("io error when running tests: {:?}", e),
258 // A variant optimized for invocation with a static test vector.
259 // This will panic (intentionally) when fed any dynamic tests, because
260 // it is copying the static values out into a dynamic vector and cannot
261 // copy dynamic values. It is doing this because from this point on
262 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
263 // semantics into parallel test runners, which in turn requires a Vec<>
264 // rather than a &[].
265 pub fn test_main_static(tests: &[TestDescAndFn]) {
266 let args = env::args().collect::<Vec<_>>();
267 let owned_tests = tests.iter()
272 testfn: StaticTestFn(f),
273 desc: t.desc.clone(),
276 StaticBenchFn(f) => {
278 testfn: StaticBenchFn(f),
279 desc: t.desc.clone(),
282 _ => panic!("non-static tests passed to test::test_main_static"),
286 test_main(&args, owned_tests)
289 #[derive(Copy, Clone)]
290 pub enum ColorConfig {
296 pub struct TestOpts {
297 pub filter: Option<String>,
298 pub run_ignored: bool,
300 pub bench_benchmarks: bool,
301 pub logfile: Option<PathBuf>,
303 pub color: ColorConfig,
305 pub test_threads: Option<usize>,
310 fn new() -> TestOpts {
315 bench_benchmarks: false,
325 /// Result of parsing the options.
326 pub type OptRes = Result<TestOpts, String>;
328 #[cfg_attr(rustfmt, rustfmt_skip)]
329 fn optgroups() -> Vec<getopts::OptGroup> {
330 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
331 getopts::optflag("", "test", "Run tests and not benchmarks"),
332 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
333 getopts::optflag("h", "help", "Display this message (longer with --help)"),
334 getopts::optopt("", "logfile", "Write logs to the specified file instead \
336 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
337 task, allow printing directly"),
338 getopts::optopt("", "test-threads", "Number of threads used for running tests \
339 in parallel", "n_threads"),
340 getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
341 getopts::optopt("", "color", "Configure coloring of output:
342 auto = colorize if stdout is a tty and tests are run on serially (default);
343 always = always colorize output;
344 never = never colorize output;", "auto|always|never"))
347 fn usage(binary: &str) {
348 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
351 The FILTER string is tested against the name of all tests, and only those
352 tests whose names contain the filter are run.
354 By default, all tests are run in parallel. This can be altered with the
355 --test-threads flag or the RUST_TEST_THREADS environment variable when running
358 All tests have their standard output and standard error captured by default.
359 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
360 environment variable to a value other than "0". Logging is not captured by default.
364 #[test] - Indicates a function is a test to be run. This function
366 #[bench] - Indicates a function is a benchmark to be run. This
367 function takes one argument (test::Bencher).
368 #[should_panic] - This function (also labeled with #[test]) will only pass if
369 the code causes a panic (an assertion failure or panic!)
370 A message may be provided, which the failure string must
371 contain: #[should_panic(expected = "foo")].
372 #[ignore] - When applied to a function which is already attributed as a
373 test, then the test runner will ignore these tests during
374 normal test runs. Running with --ignored will run these
376 usage = getopts::usage(&message, &optgroups()));
379 // Parses command line arguments into test options
380 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
381 let args_ = &args[1..];
382 let matches = match getopts::getopts(args_, &optgroups()) {
384 Err(f) => return Some(Err(f.to_string())),
387 if matches.opt_present("h") {
392 let filter = if !matches.free.is_empty() {
393 Some(matches.free[0].clone())
398 let run_ignored = matches.opt_present("ignored");
399 let quiet = matches.opt_present("quiet");
401 let logfile = matches.opt_str("logfile");
402 let logfile = logfile.map(|s| PathBuf::from(&s));
404 let bench_benchmarks = matches.opt_present("bench");
405 let run_tests = !bench_benchmarks || matches.opt_present("test");
407 let mut nocapture = matches.opt_present("nocapture");
409 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
410 Ok(val) => &val != "0",
415 let test_threads = match matches.opt_str("test-threads") {
417 match n_str.parse::<usize>() {
420 return Some(Err(format!("argument for --test-threads must be a number > 0 \
427 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
428 Some("auto") | None => AutoColor,
429 Some("always") => AlwaysColor,
430 Some("never") => NeverColor,
433 return Some(Err(format!("argument for --color must be auto, always, or never (was \
439 let test_opts = TestOpts {
441 run_ignored: run_ignored,
442 run_tests: run_tests,
443 bench_benchmarks: bench_benchmarks,
445 nocapture: nocapture,
448 test_threads: test_threads,
454 #[derive(Clone, PartialEq)]
455 pub struct BenchSamples {
456 ns_iter_summ: stats::Summary,
460 #[derive(Clone, PartialEq)]
461 pub enum TestResult {
465 TrMetrics(MetricMap),
466 TrBench(BenchSamples),
469 unsafe impl Send for TestResult {}
471 enum OutputLocation<T> {
472 Pretty(Box<term::StdoutTerminal>),
476 struct ConsoleTestState<T> {
477 log_out: Option<File>,
478 out: OutputLocation<T>,
487 failures: Vec<(TestDesc, Vec<u8>)>,
488 max_name_len: usize, // number of columns to fill when aligning names
491 impl<T: Write> ConsoleTestState<T> {
492 pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
493 let log_out = match opts.logfile {
494 Some(ref path) => Some(File::create(path)?),
497 let out = match term::stdout() {
498 None => Raw(io::stdout()),
499 Some(t) => Pretty(t),
502 Ok(ConsoleTestState {
505 use_color: use_color(opts),
512 metrics: MetricMap::new(),
513 failures: Vec::new(),
518 pub fn write_ok(&mut self) -> io::Result<()> {
519 self.write_short_result("ok", ".", term::color::GREEN)
522 pub fn write_failed(&mut self) -> io::Result<()> {
523 self.write_short_result("FAILED", "F", term::color::RED)
526 pub fn write_ignored(&mut self) -> io::Result<()> {
527 self.write_short_result("ignored", "i", term::color::YELLOW)
530 pub fn write_metric(&mut self) -> io::Result<()> {
531 self.write_pretty("metric", term::color::CYAN)
534 pub fn write_bench(&mut self) -> io::Result<()> {
535 self.write_pretty("bench", term::color::CYAN)
538 pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
541 self.write_pretty(quiet, color)
543 self.write_pretty(verbose, color)?;
544 self.write_plain("\n")
548 pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
550 Pretty(ref mut term) => {
554 term.write_all(word.as_bytes())?;
560 Raw(ref mut stdout) => {
561 stdout.write_all(word.as_bytes())?;
567 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
569 Pretty(ref mut term) => {
570 term.write_all(s.as_bytes())?;
573 Raw(ref mut stdout) => {
574 stdout.write_all(s.as_bytes())?;
580 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
582 let noun = if len != 1 {
587 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
590 pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
591 if self.quiet && align != PadOnRight {
594 let name = test.padded_name(self.max_name_len, align);
595 self.write_plain(&format!("test {} ... ", name))
599 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
601 TrOk => self.write_ok(),
602 TrFailed => self.write_failed(),
603 TrIgnored => self.write_ignored(),
604 TrMetrics(ref mm) => {
605 self.write_metric()?;
606 self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
610 self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
615 pub fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
616 self.write_plain(&format!("test {} has been running for over {} seconds\n",
618 TEST_WARN_TIMEOUT_S))
621 pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
625 let s = format!("{} {}\n",
627 TrOk => "ok".to_owned(),
628 TrFailed => "failed".to_owned(),
629 TrIgnored => "ignored".to_owned(),
630 TrMetrics(ref mm) => mm.fmt_metrics(),
631 TrBench(ref bs) => fmt_bench_samples(bs),
634 o.write_all(s.as_bytes())
639 pub fn write_failures(&mut self) -> io::Result<()> {
640 self.write_plain("\nfailures:\n")?;
641 let mut failures = Vec::new();
642 let mut fail_out = String::new();
643 for &(ref f, ref stdout) in &self.failures {
644 failures.push(f.name.to_string());
645 if !stdout.is_empty() {
646 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
647 let output = String::from_utf8_lossy(stdout);
648 fail_out.push_str(&output);
649 fail_out.push_str("\n");
652 if !fail_out.is_empty() {
653 self.write_plain("\n")?;
654 self.write_plain(&fail_out)?;
657 self.write_plain("\nfailures:\n")?;
659 for name in &failures {
660 self.write_plain(&format!(" {}\n", name))?;
665 pub fn write_run_finish(&mut self) -> io::Result<bool> {
666 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
668 let success = self.failed == 0;
670 self.write_failures()?;
673 self.write_plain("\ntest result: ")?;
675 // There's no parallelism at this point so it's safe to use color
676 self.write_pretty("ok", term::color::GREEN)?;
678 self.write_pretty("FAILED", term::color::RED)?;
680 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
685 self.write_plain(&s)?;
690 // Format a number with thousands separators
691 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
693 let mut output = String::new();
694 let mut trailing = false;
695 for &pow in &[9, 6, 3, 0] {
696 let base = 10_usize.pow(pow);
697 if pow == 0 || trailing || n / base != 0 {
699 output.write_fmt(format_args!("{}", n / base)).unwrap();
701 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
714 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
716 let mut output = String::new();
718 let median = bs.ns_iter_summ.median as usize;
719 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
721 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
722 fmt_thousands_sep(median, ','),
723 fmt_thousands_sep(deviation, ',')))
726 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
731 // A simple console test runner
732 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
734 fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
735 match (*event).clone() {
736 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
737 TeWait(ref test, padding) => st.write_test_start(test, padding),
738 TeTimeout(ref test) => st.write_timeout(test),
739 TeResult(test, result, stdout) => {
740 st.write_log(&test, &result)?;
741 st.write_result(&result)?;
743 TrOk => st.passed += 1,
744 TrIgnored => st.ignored += 1,
746 let tname = test.name;
747 let MetricMap(mm) = mm;
750 .insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
755 st.metrics.insert_metric(test.name.as_slice(),
756 bs.ns_iter_summ.median,
757 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
762 st.failures.push((test, stdout));
770 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
771 fn len_if_padded(t: &TestDescAndFn) -> usize {
772 match t.testfn.padding() {
774 PadOnRight => t.desc.name.as_slice().len(),
777 if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
778 let n = t.desc.name.as_slice();
779 st.max_name_len = n.len();
781 run_tests(opts, tests, |x| callback(&x, &mut st))?;
782 return st.write_run_finish();
786 fn should_sort_failures_before_printing_them() {
787 let test_a = TestDesc {
788 name: StaticTestName("a"),
790 should_panic: ShouldPanic::No,
793 let test_b = TestDesc {
794 name: StaticTestName("b"),
796 should_panic: ShouldPanic::No,
799 let mut st = ConsoleTestState {
801 out: Raw(Vec::new()),
810 metrics: MetricMap::new(),
811 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
814 st.write_failures().unwrap();
815 let s = match st.out {
816 Raw(ref m) => String::from_utf8_lossy(&m[..]),
817 Pretty(_) => unreachable!(),
820 let apos = s.find("a").unwrap();
821 let bpos = s.find("b").unwrap();
822 assert!(apos < bpos);
825 fn use_color(opts: &TestOpts) -> bool {
827 AutoColor => !opts.nocapture && stdout_isatty(),
834 fn stdout_isatty() -> bool {
835 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
838 fn stdout_isatty() -> bool {
841 type HANDLE = *mut u8;
842 type LPDWORD = *mut u32;
843 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
845 fn GetStdHandle(which: DWORD) -> HANDLE;
846 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
849 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
851 GetConsoleMode(handle, &mut out) != 0
857 TeFiltered(Vec<TestDesc>),
858 TeWait(TestDesc, NamePadding),
859 TeResult(TestDesc, TestResult, Vec<u8>),
863 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
866 fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
867 where F: FnMut(TestEvent) -> io::Result<()>
869 use std::collections::HashMap;
870 use std::sync::mpsc::RecvTimeoutError;
872 let mut filtered_tests = filter_tests(opts, tests);
873 if !opts.bench_benchmarks {
874 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
877 let filtered_descs = filtered_tests.iter()
878 .map(|t| t.desc.clone())
881 callback(TeFiltered(filtered_descs))?;
883 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
884 filtered_tests.into_iter().partition(|e| {
886 StaticTestFn(_) | DynTestFn(_) => true,
891 let concurrency = match opts.test_threads {
893 None => get_concurrency(),
896 let mut remaining = filtered_tests;
900 let (tx, rx) = channel::<MonitorMsg>();
902 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
904 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
905 let now = Instant::now();
906 let timed_out = running_tests.iter()
907 .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone())} else { None })
909 for test in &timed_out {
910 running_tests.remove(test);
915 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
916 running_tests.values().min().map(|next_timeout| {
917 let now = Instant::now();
918 if *next_timeout >= now {
925 while pending > 0 || !remaining.is_empty() {
926 while pending < concurrency && !remaining.is_empty() {
927 let test = remaining.pop().unwrap();
928 if concurrency == 1 {
929 // We are doing one test at a time so we can print the name
930 // of the test before we run it. Useful for debugging tests
931 // that hang forever.
932 callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
934 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
935 running_tests.insert(test.desc.clone(), timeout);
936 run_test(opts, !opts.run_tests, test, tx.clone());
942 if let Some(timeout) = calc_timeout(&running_tests) {
943 res = rx.recv_timeout(timeout);
944 for test in get_timed_out_tests(&mut running_tests) {
945 callback(TeTimeout(test))?;
947 if res != Err(RecvTimeoutError::Timeout) {
951 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
956 let (desc, result, stdout) = res.unwrap();
957 running_tests.remove(&desc);
959 if concurrency != 1 {
960 callback(TeWait(desc.clone(), PadNone))?;
962 callback(TeResult(desc, result, stdout))?;
966 if opts.bench_benchmarks {
967 // All benchmarks run at the end, in serial.
968 // (this includes metric fns)
969 for b in filtered_benchs_and_metrics {
970 callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
971 run_test(opts, false, b, tx.clone());
972 let (test, result, stdout) = rx.recv().unwrap();
973 callback(TeResult(test, result, stdout))?;
980 fn get_concurrency() -> usize {
981 return match env::var("RUST_TEST_THREADS") {
983 let opt_n: Option<usize> = s.parse().ok();
985 Some(n) if n > 0 => n,
987 panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
992 Err(..) => num_cpus(),
997 fn num_cpus() -> usize {
1000 wProcessorArchitecture: u16,
1003 lpMinimumApplicationAddress: *mut u8,
1004 lpMaximumApplicationAddress: *mut u8,
1005 dwActiveProcessorMask: *mut u8,
1006 dwNumberOfProcessors: u32,
1007 dwProcessorType: u32,
1008 dwAllocationGranularity: u32,
1009 wProcessorLevel: u16,
1010 wProcessorRevision: u16,
1013 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1016 let mut sysinfo = std::mem::zeroed();
1017 GetSystemInfo(&mut sysinfo);
1018 sysinfo.dwNumberOfProcessors as usize
1022 #[cfg(any(target_os = "linux",
1023 target_os = "macos",
1025 target_os = "android",
1026 target_os = "solaris",
1027 target_os = "emscripten"))]
1028 fn num_cpus() -> usize {
1029 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1032 #[cfg(any(target_os = "freebsd",
1033 target_os = "dragonfly",
1034 target_os = "bitrig",
1035 target_os = "netbsd"))]
1036 fn num_cpus() -> usize {
1039 let mut cpus: libc::c_uint = 0;
1040 let mut cpus_size = std::mem::size_of_val(&cpus);
1043 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1046 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1048 libc::sysctl(mib.as_mut_ptr(),
1050 &mut cpus as *mut _ as *mut _,
1051 &mut cpus_size as *mut _ as *mut _,
1062 #[cfg(target_os = "openbsd")]
1063 fn num_cpus() -> usize {
1066 let mut cpus: libc::c_uint = 0;
1067 let mut cpus_size = std::mem::size_of_val(&cpus);
1068 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1071 libc::sysctl(mib.as_mut_ptr(),
1073 &mut cpus as *mut _ as *mut _,
1074 &mut cpus_size as *mut _ as *mut _,
1085 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1086 let mut filtered = tests;
1088 // Remove tests that don't match the test filter
1089 filtered = match opts.filter {
1091 Some(ref filter) => {
1092 filtered.into_iter()
1093 .filter(|test| test.desc.name.as_slice().contains(&filter[..]))
1098 // Maybe pull out the ignored test and unignore them
1099 filtered = if !opts.run_ignored {
1102 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1103 if test.desc.ignore {
1104 let TestDescAndFn {desc, testfn} = test;
1105 Some(TestDescAndFn {
1106 desc: TestDesc { ignore: false, ..desc },
1113 filtered.into_iter().filter_map(filter).collect()
1116 // Sort the tests alphabetically
1117 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1122 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1123 // convert benchmarks to tests, if we're not benchmarking them
1126 let testfn = match x.testfn {
1127 DynBenchFn(bench) => {
1128 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
1130 StaticBenchFn(benchfn) => {
1131 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
1143 pub fn run_test(opts: &TestOpts,
1145 test: TestDescAndFn,
1146 monitor_ch: Sender<MonitorMsg>) {
1148 let TestDescAndFn {desc, testfn} = test;
1150 if force_ignore || desc.ignore {
1151 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1155 fn run_test_inner(desc: TestDesc,
1156 monitor_ch: Sender<MonitorMsg>,
1158 testfn: Box<FnBox() + Send>) {
1159 struct Sink(Arc<Mutex<Vec<u8>>>);
1160 impl Write for Sink {
1161 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1162 Write::write(&mut *self.0.lock().unwrap(), data)
1164 fn flush(&mut self) -> io::Result<()> {
1169 thread::spawn(move || {
1170 let data = Arc::new(Mutex::new(Vec::new()));
1171 let data2 = data.clone();
1172 let cfg = thread::Builder::new().name(match desc.name {
1173 DynTestName(ref name) => name.clone(),
1174 StaticTestName(name) => name.to_owned(),
1177 let result_guard = cfg.spawn(move || {
1179 io::set_print(box Sink(data2.clone()));
1180 io::set_panic(box Sink(data2));
1185 let test_result = calc_result(&desc, result_guard.join());
1186 let stdout = data.lock().unwrap().to_vec();
1187 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1192 DynBenchFn(bencher) => {
1193 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1194 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1197 StaticBenchFn(benchfn) => {
1198 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1199 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1203 let mut mm = MetricMap::new();
1204 f.call_box((&mut mm,));
1205 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1208 StaticMetricFn(f) => {
1209 let mut mm = MetricMap::new();
1211 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1214 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1215 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(f)),
1219 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
1220 match (&desc.should_panic, task_result) {
1221 (&ShouldPanic::No, Ok(())) |
1222 (&ShouldPanic::Yes, Err(_)) => TrOk,
1223 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1224 if err.downcast_ref::<String>()
1226 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1227 .map(|e| e.contains(msg))
1228 .unwrap_or(false) => TrOk,
1234 pub fn new() -> MetricMap {
1235 MetricMap(BTreeMap::new())
1238 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1239 /// must be non-negative. The `noise` indicates the uncertainty of the
1240 /// metric, which doubles as the "noise range" of acceptable
1241 /// pairwise-regressions on this named value, when comparing from one
1242 /// metric to the next using `compare_to_old`.
1244 /// If `noise` is positive, then it means this metric is of a value
1245 /// you want to see grow smaller, so a change larger than `noise` in the
1246 /// positive direction represents a regression.
1248 /// If `noise` is negative, then it means this metric is of a value
1249 /// you want to see grow larger, so a change larger than `noise` in the
1250 /// negative direction represents a regression.
1251 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1256 let MetricMap(ref mut map) = *self;
1257 map.insert(name.to_owned(), m);
1260 pub fn fmt_metrics(&self) -> String {
1261 let MetricMap(ref mm) = *self;
1262 let v: Vec<String> = mm.iter()
1263 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1272 /// A function that is opaque to the optimizer, to allow benchmarks to
1273 /// pretend to use outputs to assist in avoiding dead-code
1276 /// This function is a no-op, and does not even read from `dummy`.
1277 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1278 target_arch = "asmjs")))]
1279 pub fn black_box<T>(dummy: T) -> T {
1280 // we need to "use" the argument in some way LLVM can't
1282 unsafe { asm!("" : : "r"(&dummy)) }
1285 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1286 target_arch = "asmjs"))]
1288 pub fn black_box<T>(dummy: T) -> T {
1294 /// Callback for benchmark functions to run in their body.
1295 pub fn iter<T, F>(&mut self, mut inner: F)
1296 where F: FnMut() -> T
1298 let start = Instant::now();
1299 let k = self.iterations;
1303 self.dur = start.elapsed();
1306 pub fn ns_elapsed(&mut self) -> u64 {
1307 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1310 pub fn ns_per_iter(&mut self) -> u64 {
1311 if self.iterations == 0 {
1314 self.ns_elapsed() / cmp::max(self.iterations, 1)
1318 pub fn bench_n<F>(&mut self, n: u64, f: F)
1319 where F: FnOnce(&mut Bencher)
1321 self.iterations = n;
1325 // This is a more statistics-driven benchmark algorithm
1326 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary
1327 where F: FnMut(&mut Bencher)
1329 // Initial bench run to get ballpark figure.
1331 self.bench_n(n, |x| f(x));
1333 // Try to estimate iter count for 1ms falling back to 1m
1334 // iterations if first run took < 1ns.
1335 if self.ns_per_iter() == 0 {
1338 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1340 // if the first run took more than 1ms we don't want to just
1341 // be left doing 0 iterations on every loop. The unfortunate
1342 // side effect of not being able to do as many runs is
1343 // automatically handled by the statistical analysis below
1344 // (i.e. larger error bars).
1349 let mut total_run = Duration::new(0, 0);
1350 let samples: &mut [f64] = &mut [0.0_f64; 50];
1352 let loop_start = Instant::now();
1354 for p in &mut *samples {
1355 self.bench_n(n, |x| f(x));
1356 *p = self.ns_per_iter() as f64;
1359 stats::winsorize(samples, 5.0);
1360 let summ = stats::Summary::new(samples);
1362 for p in &mut *samples {
1363 self.bench_n(5 * n, |x| f(x));
1364 *p = self.ns_per_iter() as f64;
1367 stats::winsorize(samples, 5.0);
1368 let summ5 = stats::Summary::new(samples);
1369 let loop_run = loop_start.elapsed();
1371 // If we've run for 100ms and seem to have converged to a
1373 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
1374 summ.median - summ5.median < summ5.median_abs_dev {
1378 total_run = total_run + loop_run;
1379 // Longest we ever run for is 3s.
1380 if total_run > Duration::from_secs(3) {
1384 // If we overflow here just return the results so far. We check a
1385 // multiplier of 10 because we're about to multiply by 2 and the
1386 // next iteration of the loop will also multiply by 5 (to calculate
1387 // the summ5 result)
1388 n = match n.checked_mul(10) {
1390 None => return summ5,
1398 use std::time::Duration;
1399 use super::{Bencher, BenchSamples};
1401 pub fn benchmark<F>(f: F) -> BenchSamples
1402 where F: FnMut(&mut Bencher)
1404 let mut bs = Bencher {
1406 dur: Duration::new(0, 0),
1410 let ns_iter_summ = bs.auto_bench(f);
1412 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1413 let mb_s = bs.bytes * 1000 / ns_iter;
1416 ns_iter_summ: ns_iter_summ,
1417 mb_s: mb_s as usize,
1421 pub fn run_once<F>(f: F)
1422 where F: FnOnce(&mut Bencher)
1424 let mut bs = Bencher {
1426 dur: Duration::new(0, 0),
1435 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc, TestDescAndFn,
1436 TestOpts, run_test, MetricMap, StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1437 use std::sync::mpsc::channel;
1440 pub fn do_not_run_ignored_tests() {
1444 let desc = TestDescAndFn {
1446 name: StaticTestName("whatever"),
1448 should_panic: ShouldPanic::No,
1450 testfn: DynTestFn(Box::new(move || f())),
1452 let (tx, rx) = channel();
1453 run_test(&TestOpts::new(), false, desc, tx);
1454 let (_, res, _) = rx.recv().unwrap();
1455 assert!(res != TrOk);
1459 pub fn ignored_tests_result_in_ignored() {
1461 let desc = TestDescAndFn {
1463 name: StaticTestName("whatever"),
1465 should_panic: ShouldPanic::No,
1467 testfn: DynTestFn(Box::new(move || f())),
1469 let (tx, rx) = channel();
1470 run_test(&TestOpts::new(), false, desc, tx);
1471 let (_, res, _) = rx.recv().unwrap();
1472 assert!(res == TrIgnored);
1476 fn test_should_panic() {
1480 let desc = TestDescAndFn {
1482 name: StaticTestName("whatever"),
1484 should_panic: ShouldPanic::Yes,
1486 testfn: DynTestFn(Box::new(move || f())),
1488 let (tx, rx) = channel();
1489 run_test(&TestOpts::new(), false, desc, tx);
1490 let (_, res, _) = rx.recv().unwrap();
1491 assert!(res == TrOk);
1495 fn test_should_panic_good_message() {
1497 panic!("an error message");
1499 let desc = TestDescAndFn {
1501 name: StaticTestName("whatever"),
1503 should_panic: ShouldPanic::YesWithMessage("error message"),
1505 testfn: DynTestFn(Box::new(move || f())),
1507 let (tx, rx) = channel();
1508 run_test(&TestOpts::new(), false, desc, tx);
1509 let (_, res, _) = rx.recv().unwrap();
1510 assert!(res == TrOk);
1514 fn test_should_panic_bad_message() {
1516 panic!("an error message");
1518 let desc = TestDescAndFn {
1520 name: StaticTestName("whatever"),
1522 should_panic: ShouldPanic::YesWithMessage("foobar"),
1524 testfn: DynTestFn(Box::new(move || f())),
1526 let (tx, rx) = channel();
1527 run_test(&TestOpts::new(), false, desc, tx);
1528 let (_, res, _) = rx.recv().unwrap();
1529 assert!(res == TrFailed);
1533 fn test_should_panic_but_succeeds() {
1535 let desc = TestDescAndFn {
1537 name: StaticTestName("whatever"),
1539 should_panic: ShouldPanic::Yes,
1541 testfn: DynTestFn(Box::new(move || f())),
1543 let (tx, rx) = channel();
1544 run_test(&TestOpts::new(), false, desc, tx);
1545 let (_, res, _) = rx.recv().unwrap();
1546 assert!(res == TrFailed);
1550 fn parse_ignored_flag() {
1551 let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1552 let opts = match parse_opts(&args) {
1554 _ => panic!("Malformed arg in parse_ignored_flag"),
1556 assert!((opts.run_ignored));
1560 pub fn filter_for_ignored_option() {
1561 // When we run ignored tests the test filter should filter out all the
1562 // unignored tests and flip the ignore flag on the rest to false
1564 let mut opts = TestOpts::new();
1565 opts.run_tests = true;
1566 opts.run_ignored = true;
1568 let tests = vec![TestDescAndFn {
1570 name: StaticTestName("1"),
1572 should_panic: ShouldPanic::No,
1574 testfn: DynTestFn(Box::new(move || {})),
1578 name: StaticTestName("2"),
1580 should_panic: ShouldPanic::No,
1582 testfn: DynTestFn(Box::new(move || {})),
1584 let filtered = filter_tests(&opts, tests);
1586 assert_eq!(filtered.len(), 1);
1587 assert_eq!(filtered[0].desc.name.to_string(), "1");
1588 assert!(!filtered[0].desc.ignore);
1592 pub fn sort_tests() {
1593 let mut opts = TestOpts::new();
1594 opts.run_tests = true;
1596 let names = vec!["sha1::test".to_string(),
1597 "isize::test_to_str".to_string(),
1598 "isize::test_pow".to_string(),
1599 "test::do_not_run_ignored_tests".to_string(),
1600 "test::ignored_tests_result_in_ignored".to_string(),
1601 "test::first_free_arg_should_be_a_filter".to_string(),
1602 "test::parse_ignored_flag".to_string(),
1603 "test::filter_for_ignored_option".to_string(),
1604 "test::sort_tests".to_string()];
1607 let mut tests = Vec::new();
1608 for name in &names {
1609 let test = TestDescAndFn {
1611 name: DynTestName((*name).clone()),
1613 should_panic: ShouldPanic::No,
1615 testfn: DynTestFn(Box::new(testfn)),
1621 let filtered = filter_tests(&opts, tests);
1623 let expected = vec!["isize::test_pow".to_string(),
1624 "isize::test_to_str".to_string(),
1625 "sha1::test".to_string(),
1626 "test::do_not_run_ignored_tests".to_string(),
1627 "test::filter_for_ignored_option".to_string(),
1628 "test::first_free_arg_should_be_a_filter".to_string(),
1629 "test::ignored_tests_result_in_ignored".to_string(),
1630 "test::parse_ignored_flag".to_string(),
1631 "test::sort_tests".to_string()];
1633 for (a, b) in expected.iter().zip(filtered) {
1634 assert!(*a == b.desc.name.to_string());
1639 pub fn test_metricmap_compare() {
1640 let mut m1 = MetricMap::new();
1641 let mut m2 = MetricMap::new();
1642 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1643 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1645 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1646 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1648 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1649 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1651 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1652 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1654 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1655 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1657 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1658 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);