1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![crate_name = "test"]
21 #![unstable(feature = "test", issue = "50297")]
22 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
24 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
26 #![feature(set_stdio)]
27 #![feature(panic_unwind)]
28 #![feature(staged_api)]
29 #![feature(termination_trait_lib)]
33 #[cfg(any(unix, target_os = "cloudabi"))]
37 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
38 // on aarch64-pc-windows-msvc, or thumbv7a-pc-windows-msvc
39 // so we don't link libtest against libunwind (for the time being)
40 // even though it means that libtest won't be fully functional on
43 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
44 #[cfg(not(all(windows, any(target_arch = "aarch64", target_arch = "arm"))))]
45 extern crate panic_unwind;
47 pub use self::ColorConfig::*;
48 use self::NamePadding::*;
49 use self::OutputLocation::*;
50 use self::TestEvent::*;
51 pub use self::TestFn::*;
52 pub use self::TestName::*;
53 pub use self::TestResult::*;
58 use std::collections::BTreeMap;
63 use std::io::prelude::*;
64 use std::panic::{catch_unwind, AssertUnwindSafe};
65 use std::path::PathBuf;
67 use std::process::Termination;
68 use std::sync::mpsc::{channel, Sender};
69 use std::sync::{Arc, Mutex};
71 use std::time::{Duration, Instant};
76 const TEST_WARN_TIMEOUT_S: u64 = 60;
77 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
79 // to be used by rustc to compile tests in libtest
82 assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
83 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
84 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestOpts,
85 TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
92 use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
94 /// Whether to execute tests concurrently or not
95 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
101 // The name of a test. By convention this follows the rules for rust
102 // paths; i.e., it should be a series of identifiers separated by double
103 // colons. This way if some test runner wants to arrange the tests
104 // hierarchically it may.
106 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
108 StaticTestName(&'static str),
110 AlignedTestName(Cow<'static, str>, NamePadding),
113 fn as_slice(&self) -> &str {
115 StaticTestName(s) => s,
116 DynTestName(ref s) => s,
117 AlignedTestName(ref s, _) => &*s,
121 fn padding(&self) -> NamePadding {
123 &AlignedTestName(_, p) => p,
128 fn with_padding(&self, padding: NamePadding) -> TestName {
129 let name = match self {
130 &TestName::StaticTestName(name) => Cow::Borrowed(name),
131 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
132 &TestName::AlignedTestName(ref name, _) => name.clone(),
135 TestName::AlignedTestName(name, padding)
138 impl fmt::Display for TestName {
139 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
140 fmt::Display::fmt(self.as_slice(), f)
144 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
145 pub enum NamePadding {
151 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
152 let mut name = String::from(self.name.as_slice());
153 let fill = column_count.saturating_sub(name.len());
154 let pad = " ".repeat(fill);
165 /// Represents a benchmark function.
166 pub trait TDynBenchFn: Send {
167 fn run(&self, harness: &mut Bencher);
170 // A function that runs a test. If the function returns successfully,
171 // the test succeeds; if the function panics then the test fails. We
172 // may need to come up with a more clever definition of test in order
173 // to support isolation of tests into threads.
176 StaticBenchFn(fn(&mut Bencher)),
177 DynTestFn(Box<dyn FnOnce() + Send>),
178 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
182 fn padding(&self) -> NamePadding {
184 StaticTestFn(..) => PadNone,
185 StaticBenchFn(..) => PadOnRight,
186 DynTestFn(..) => PadNone,
187 DynBenchFn(..) => PadOnRight,
192 impl fmt::Debug for TestFn {
193 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
194 f.write_str(match *self {
195 StaticTestFn(..) => "StaticTestFn(..)",
196 StaticBenchFn(..) => "StaticBenchFn(..)",
197 DynTestFn(..) => "DynTestFn(..)",
198 DynBenchFn(..) => "DynBenchFn(..)",
203 /// Manager of the benchmarking runs.
205 /// This is fed into functions marked with `#[bench]` to allow for
206 /// set-up & tear-down before running a piece of code repeatedly via a
211 summary: Option<stats::Summary>,
215 #[derive(Clone, PartialEq, Eq)]
221 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
222 pub enum ShouldPanic {
225 YesWithMessage(&'static str),
228 // The definition of a single test. A test runner will run a list of
230 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
231 pub struct TestDesc {
234 pub should_panic: ShouldPanic,
235 pub allow_fail: bool,
239 pub struct TestDescAndFn {
244 #[derive(Clone, PartialEq, Debug, Copy)]
251 pub fn new(value: f64, noise: f64) -> Metric {
252 Metric { value, noise }
256 /// In case we want to add other options as well, just add them in this struct.
257 #[derive(Copy, Clone, Debug)]
259 display_output: bool,
263 pub fn new() -> Options {
265 display_output: false,
269 pub fn display_output(mut self, display_output: bool) -> Options {
270 self.display_output = display_output;
275 // The default console test runner. It accepts the command line
276 // arguments and a vector of test_descs.
277 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
278 let mut opts = match parse_opts(args) {
281 eprintln!("error: {}", msg);
287 opts.options = options;
289 if let Err(e) = list_tests_console(&opts, tests) {
290 eprintln!("error: io error when listing tests: {:?}", e);
294 match run_tests_console(&opts, tests) {
296 Ok(false) => process::exit(101),
298 eprintln!("error: io error when listing tests: {:?}", e);
305 // A variant optimized for invocation with a static test vector.
306 // This will panic (intentionally) when fed any dynamic tests, because
307 // it is copying the static values out into a dynamic vector and cannot
308 // copy dynamic values. It is doing this because from this point on
309 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
310 // semantics into parallel test runners, which in turn requires a Vec<>
311 // rather than a &[].
312 pub fn test_main_static(tests: &[&TestDescAndFn]) {
313 let args = env::args().collect::<Vec<_>>();
314 let owned_tests = tests
316 .map(|t| match t.testfn {
317 StaticTestFn(f) => TestDescAndFn {
318 testfn: StaticTestFn(f),
319 desc: t.desc.clone(),
321 StaticBenchFn(f) => TestDescAndFn {
322 testfn: StaticBenchFn(f),
323 desc: t.desc.clone(),
325 _ => panic!("non-static tests passed to test::test_main_static"),
328 test_main(&args, owned_tests, Options::new())
331 /// Invoked when unit tests terminate. Should panic if the unit
332 /// Tests is considered a failure. By default, invokes `report()`
333 /// and checks for a `0` result.
334 pub fn assert_test_result<T: Termination>(result: T) {
335 let code = result.report();
338 "the test returned a termination value with a non-zero status code ({}) \
339 which indicates a failure",
344 #[derive(Copy, Clone, Debug)]
345 pub enum ColorConfig {
351 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
352 pub enum OutputFormat {
358 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
359 pub enum RunIgnored {
366 pub struct TestOpts {
368 pub filter: Option<String>,
369 pub filter_exact: bool,
370 pub exclude_should_panic: bool,
371 pub run_ignored: RunIgnored,
373 pub bench_benchmarks: bool,
374 pub logfile: Option<PathBuf>,
376 pub color: ColorConfig,
377 pub format: OutputFormat,
378 pub test_threads: Option<usize>,
379 pub skip: Vec<String>,
380 pub options: Options,
383 /// Result of parsing the options.
384 pub type OptRes = Result<TestOpts, String>;
386 fn optgroups() -> getopts::Options {
387 let mut opts = getopts::Options::new();
388 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
389 .optflag("", "ignored", "Run only ignored tests")
390 .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
391 .optflag("", "test", "Run tests and not benchmarks")
392 .optflag("", "bench", "Run benchmarks instead of tests")
393 .optflag("", "list", "List all tests and benchmarks")
394 .optflag("h", "help", "Display this message (longer with --help)")
398 "Write logs to the specified file instead \
405 "don't capture stdout/stderr of each \
406 task, allow printing directly",
411 "Number of threads used for running tests \
418 "Skip tests whose names contain FILTER (this flag can \
419 be used multiple times)",
425 "Display one character per test instead of one line. \
426 Alias to --format=terse",
431 "Exactly match filters rather than by substring",
436 "Configure coloring of output:
437 auto = colorize if stdout is a tty and tests are run on serially (default);
438 always = always colorize output;
439 never = never colorize output;",
445 "Configure formatting of output:
446 pretty = Print verbose output;
447 terse = Display one character per test;
448 json = Output a json document",
454 "Enable nightly-only flags:
455 unstable-options = Allow use of experimental features",
461 fn usage(binary: &str, options: &getopts::Options) {
462 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
466 The FILTER string is tested against the name of all tests, and only those
467 tests whose names contain the filter are run.
469 By default, all tests are run in parallel. This can be altered with the
470 --test-threads flag or the RUST_TEST_THREADS environment variable when running
473 All tests have their standard output and standard error captured by default.
474 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
475 environment variable to a value other than "0". Logging is not captured by default.
479 `#[test]` - Indicates a function is a test to be run. This function
481 `#[bench]` - Indicates a function is a benchmark to be run. This
482 function takes one argument (test::Bencher).
483 `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if
484 the code causes a panic (an assertion failure or panic!)
485 A message may be provided, which the failure string must
486 contain: #[should_panic(expected = "foo")].
487 `#[ignore]` - When applied to a function which is already attributed as a
488 test, then the test runner will ignore these tests during
489 normal test runs. Running with --ignored or --include-ignored will run
491 usage = options.usage(&message)
495 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
496 fn is_nightly() -> bool {
497 // Whether this is a feature-staged build, i.e., on the beta or stable channel
498 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
499 // Whether we should enable unstable features for bootstrapping
500 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
502 bootstrap || !disable_unstable_features
505 // Parses command line arguments into test options
506 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
507 let mut allow_unstable = false;
508 let opts = optgroups();
509 let args = args.get(1..).unwrap_or(args);
510 let matches = match opts.parse(args) {
512 Err(f) => return Some(Err(f.to_string())),
515 if let Some(opt) = matches.opt_str("Z") {
518 "the option `Z` is only accepted on the nightly compiler".into(),
523 "unstable-options" => {
524 allow_unstable = true;
527 return Some(Err("Unrecognized option to `Z`".into()));
532 if matches.opt_present("h") {
533 usage(&args[0], &opts);
537 let filter = if !matches.free.is_empty() {
538 Some(matches.free[0].clone())
543 let exclude_should_panic = matches.opt_present("exclude-should-panic");
544 if !allow_unstable && exclude_should_panic {
546 "The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(),
550 let include_ignored = matches.opt_present("include-ignored");
551 if !allow_unstable && include_ignored {
553 "The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
557 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
560 "the options --include-ignored and --ignored are mutually exclusive".into(),
563 (true, false) => RunIgnored::Yes,
564 (false, true) => RunIgnored::Only,
565 (false, false) => RunIgnored::No,
567 let quiet = matches.opt_present("quiet");
568 let exact = matches.opt_present("exact");
569 let list = matches.opt_present("list");
571 let logfile = matches.opt_str("logfile");
572 let logfile = logfile.map(|s| PathBuf::from(&s));
574 let bench_benchmarks = matches.opt_present("bench");
575 let run_tests = !bench_benchmarks || matches.opt_present("test");
577 let mut nocapture = matches.opt_present("nocapture");
579 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
580 Ok(val) => &val != "0",
585 let test_threads = match matches.opt_str("test-threads") {
586 Some(n_str) => match n_str.parse::<usize>() {
587 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
590 return Some(Err(format!(
591 "argument for --test-threads must be a number > 0 \
600 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
601 Some("auto") | None => AutoColor,
602 Some("always") => AlwaysColor,
603 Some("never") => NeverColor,
606 return Some(Err(format!(
607 "argument for --color must be auto, always, or never (was \
614 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
615 None if quiet => OutputFormat::Terse,
616 Some("pretty") | None => OutputFormat::Pretty,
617 Some("terse") => OutputFormat::Terse,
621 "The \"json\" format is only accepted on the nightly compiler".into(),
628 return Some(Err(format!(
629 "argument for --format must be pretty, terse, or json (was \
636 let test_opts = TestOpts {
640 exclude_should_panic,
649 skip: matches.opt_strs("skip"),
650 options: Options::new(),
656 #[derive(Clone, PartialEq)]
657 pub struct BenchSamples {
658 ns_iter_summ: stats::Summary,
662 #[derive(Clone, PartialEq)]
663 pub enum TestResult {
669 TrBench(BenchSamples),
672 unsafe impl Send for TestResult {}
674 enum OutputLocation<T> {
675 Pretty(Box<term::StdoutTerminal>),
679 impl<T: Write> Write for OutputLocation<T> {
680 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
682 Pretty(ref mut term) => term.write(buf),
683 Raw(ref mut stdout) => stdout.write(buf),
687 fn flush(&mut self) -> io::Result<()> {
689 Pretty(ref mut term) => term.flush(),
690 Raw(ref mut stdout) => stdout.flush(),
695 struct ConsoleTestState {
696 log_out: Option<File>,
705 failures: Vec<(TestDesc, Vec<u8>)>,
706 not_failures: Vec<(TestDesc, Vec<u8>)>,
710 impl ConsoleTestState {
711 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
712 let log_out = match opts.logfile {
713 Some(ref path) => Some(File::create(path)?),
717 Ok(ConsoleTestState {
726 metrics: MetricMap::new(),
727 failures: Vec::new(),
728 not_failures: Vec::new(),
729 options: opts.options,
733 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
734 let msg = msg.as_ref();
737 Some(ref mut o) => o.write_all(msg.as_bytes()),
741 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
742 self.write_log(format!(
745 TrOk => "ok".to_owned(),
746 TrFailed => "failed".to_owned(),
747 TrFailedMsg(ref msg) => format!("failed: {}", msg),
748 TrIgnored => "ignored".to_owned(),
749 TrAllowedFail => "failed (allowed)".to_owned(),
750 TrBench(ref bs) => fmt_bench_samples(bs),
756 fn current_test_count(&self) -> usize {
757 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
761 // Format a number with thousands separators
762 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
764 let mut output = String::new();
765 let mut trailing = false;
766 for &pow in &[9, 6, 3, 0] {
767 let base = 10_usize.pow(pow);
768 if pow == 0 || trailing || n / base != 0 {
770 output.write_fmt(format_args!("{}", n / base)).unwrap();
772 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
785 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
787 let mut output = String::new();
789 let median = bs.ns_iter_summ.median as usize;
790 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
793 .write_fmt(format_args!(
794 "{:>11} ns/iter (+/- {})",
795 fmt_thousands_sep(median, ','),
796 fmt_thousands_sep(deviation, ',')
801 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
807 // List the tests to console, and optionally to logfile. Filters are honored.
808 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
809 let mut output = match term::stdout() {
810 None => Raw(io::stdout()),
811 Some(t) => Pretty(t),
814 let quiet = opts.format == OutputFormat::Terse;
815 let mut st = ConsoleTestState::new(opts)?;
820 for test in filter_tests(&opts, tests) {
821 use crate::TestFn::*;
824 desc: TestDesc { name, .. },
828 let fntype = match testfn {
829 StaticTestFn(..) | DynTestFn(..) => {
833 StaticBenchFn(..) | DynBenchFn(..) => {
839 writeln!(output, "{}: {}", name, fntype)?;
840 st.write_log(format!("{} {}\n", fntype, name))?;
843 fn plural(count: u32, s: &str) -> String {
845 1 => format!("{} {}", 1, s),
846 n => format!("{} {}s", n, s),
851 if ntest != 0 || nbench != 0 {
852 writeln!(output, "")?;
858 plural(ntest, "test"),
859 plural(nbench, "benchmark")
866 // A simple console test runner
867 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
870 st: &mut ConsoleTestState,
871 out: &mut dyn OutputFormatter,
872 ) -> io::Result<()> {
873 match (*event).clone() {
874 TeFiltered(ref filtered_tests) => {
875 st.total = filtered_tests.len();
876 out.write_run_start(filtered_tests.len())
878 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
879 TeWait(ref test) => out.write_test_start(test),
880 TeTimeout(ref test) => out.write_timeout(test),
881 TeResult(test, result, stdout) => {
882 st.write_log_result(&test, &result)?;
883 out.write_result(&test, &result, &*stdout)?;
887 st.not_failures.push((test, stdout));
889 TrIgnored => st.ignored += 1,
890 TrAllowedFail => st.allowed_fail += 1,
892 st.metrics.insert_metric(
893 test.name.as_slice(),
894 bs.ns_iter_summ.median,
895 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
901 st.failures.push((test, stdout));
903 TrFailedMsg(msg) => {
905 let mut stdout = stdout;
906 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
907 st.failures.push((test, stdout));
915 let output = match term::stdout() {
916 None => Raw(io::stdout()),
917 Some(t) => Pretty(t),
920 let max_name_len = tests
922 .max_by_key(|t| len_if_padded(*t))
923 .map(|t| t.desc.name.as_slice().len())
926 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
928 let mut out: Box<dyn OutputFormatter> = match opts.format {
929 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
935 OutputFormat::Terse => Box::new(TerseFormatter::new(
941 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
943 let mut st = ConsoleTestState::new(opts)?;
944 fn len_if_padded(t: &TestDescAndFn) -> usize {
945 match t.testfn.padding() {
947 PadOnRight => t.desc.name.as_slice().len(),
951 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
953 assert!(st.current_test_count() == st.total);
955 return out.write_run_finish(&st);
958 fn use_color(opts: &TestOpts) -> bool {
960 AutoColor => !opts.nocapture && stdout_isatty(),
967 target_os = "cloudabi",
969 all(target_arch = "wasm32", not(target_os = "emscripten")),
970 all(target_vendor = "fortanix", target_env = "sgx")
972 fn stdout_isatty() -> bool {
973 // FIXME: Implement isatty on Redox and SGX
977 fn stdout_isatty() -> bool {
978 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
981 fn stdout_isatty() -> bool {
984 type HANDLE = *mut u8;
985 type LPDWORD = *mut u32;
986 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
988 fn GetStdHandle(which: DWORD) -> HANDLE;
989 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
992 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
994 GetConsoleMode(handle, &mut out) != 0
1000 TeFiltered(Vec<TestDesc>),
1002 TeResult(TestDesc, TestResult, Vec<u8>),
1003 TeTimeout(TestDesc),
1004 TeFilteredOut(usize),
1007 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1009 struct Sink(Arc<Mutex<Vec<u8>>>);
1010 impl Write for Sink {
1011 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1012 Write::write(&mut *self.0.lock().unwrap(), data)
1014 fn flush(&mut self) -> io::Result<()> {
1019 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1021 F: FnMut(TestEvent) -> io::Result<()>,
1023 use std::collections::{self, HashMap};
1024 use std::hash::BuildHasherDefault;
1025 use std::sync::mpsc::RecvTimeoutError;
1026 // Use a deterministic hasher
1028 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1030 let tests_len = tests.len();
1032 let mut filtered_tests = filter_tests(opts, tests);
1033 if !opts.bench_benchmarks {
1034 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1037 let filtered_tests = {
1038 let mut filtered_tests = filtered_tests;
1039 for test in filtered_tests.iter_mut() {
1040 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1046 let filtered_out = tests_len - filtered_tests.len();
1047 callback(TeFilteredOut(filtered_out))?;
1049 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1051 callback(TeFiltered(filtered_descs))?;
1053 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1054 filtered_tests.into_iter().partition(|e| match e.testfn {
1055 StaticTestFn(_) | DynTestFn(_) => true,
1059 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1061 let mut remaining = filtered_tests;
1062 remaining.reverse();
1063 let mut pending = 0;
1065 let (tx, rx) = channel::<MonitorMsg>();
1067 let mut running_tests: TestMap = HashMap::default();
1069 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1070 let now = Instant::now();
1071 let timed_out = running_tests
1073 .filter_map(|(desc, timeout)| {
1074 if &now >= timeout {
1081 for test in &timed_out {
1082 running_tests.remove(test);
1087 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1088 running_tests.values().min().map(|next_timeout| {
1089 let now = Instant::now();
1090 if *next_timeout >= now {
1098 if concurrency == 1 {
1099 while !remaining.is_empty() {
1100 let test = remaining.pop().unwrap();
1101 callback(TeWait(test.desc.clone()))?;
1102 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1103 let (test, result, stdout) = rx.recv().unwrap();
1104 callback(TeResult(test, result, stdout))?;
1107 while pending > 0 || !remaining.is_empty() {
1108 while pending < concurrency && !remaining.is_empty() {
1109 let test = remaining.pop().unwrap();
1110 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1111 running_tests.insert(test.desc.clone(), timeout);
1112 callback(TeWait(test.desc.clone()))?; //here no pad
1113 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1119 if let Some(timeout) = calc_timeout(&running_tests) {
1120 res = rx.recv_timeout(timeout);
1121 for test in get_timed_out_tests(&mut running_tests) {
1122 callback(TeTimeout(test))?;
1124 if res != Err(RecvTimeoutError::Timeout) {
1128 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1133 let (desc, result, stdout) = res.unwrap();
1134 running_tests.remove(&desc);
1136 callback(TeResult(desc, result, stdout))?;
1141 if opts.bench_benchmarks {
1142 // All benchmarks run at the end, in serial.
1143 for b in filtered_benchs {
1144 callback(TeWait(b.desc.clone()))?;
1145 run_test(opts, false, b, tx.clone(), Concurrent::No);
1146 let (test, result, stdout) = rx.recv().unwrap();
1147 callback(TeResult(test, result, stdout))?;
1153 #[allow(deprecated)]
1154 fn get_concurrency() -> usize {
1155 return match env::var("RUST_TEST_THREADS") {
1157 let opt_n: Option<usize> = s.parse().ok();
1159 Some(n) if n > 0 => n,
1161 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1166 Err(..) => num_cpus(),
1170 #[allow(nonstandard_style)]
1171 fn num_cpus() -> usize {
1173 struct SYSTEM_INFO {
1174 wProcessorArchitecture: u16,
1177 lpMinimumApplicationAddress: *mut u8,
1178 lpMaximumApplicationAddress: *mut u8,
1179 dwActiveProcessorMask: *mut u8,
1180 dwNumberOfProcessors: u32,
1181 dwProcessorType: u32,
1182 dwAllocationGranularity: u32,
1183 wProcessorLevel: u16,
1184 wProcessorRevision: u16,
1187 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1190 let mut sysinfo = std::mem::zeroed();
1191 GetSystemInfo(&mut sysinfo);
1192 sysinfo.dwNumberOfProcessors as usize
1196 #[cfg(target_os = "redox")]
1197 fn num_cpus() -> usize {
1198 // FIXME: Implement num_cpus on Redox
1202 #[cfg(target_os = "vxworks")]
1203 fn num_cpus() -> usize {
1204 // FIXME: Implement num_cpus on vxWorks
1209 all(target_arch = "wasm32", not(target_os = "emscripten")),
1210 all(target_vendor = "fortanix", target_env = "sgx")
1212 fn num_cpus() -> usize {
1217 target_os = "android",
1218 target_os = "cloudabi",
1219 target_os = "emscripten",
1220 target_os = "fuchsia",
1222 target_os = "linux",
1223 target_os = "macos",
1224 target_os = "solaris"
1226 fn num_cpus() -> usize {
1227 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1231 target_os = "freebsd",
1232 target_os = "dragonfly",
1233 target_os = "netbsd"
1235 fn num_cpus() -> usize {
1238 let mut cpus: libc::c_uint = 0;
1239 let mut cpus_size = std::mem::size_of_val(&cpus);
1242 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1245 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1250 &mut cpus as *mut _ as *mut _,
1251 &mut cpus_size as *mut _ as *mut _,
1263 #[cfg(target_os = "openbsd")]
1264 fn num_cpus() -> usize {
1267 let mut cpus: libc::c_uint = 0;
1268 let mut cpus_size = std::mem::size_of_val(&cpus);
1269 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1275 &mut cpus as *mut _ as *mut _,
1276 &mut cpus_size as *mut _ as *mut _,
1287 #[cfg(target_os = "haiku")]
1288 fn num_cpus() -> usize {
1293 #[cfg(target_os = "l4re")]
1294 fn num_cpus() -> usize {
1300 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1301 let mut filtered = tests;
1302 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1303 let test_name = test.desc.name.as_slice();
1305 match opts.filter_exact {
1306 true => test_name == filter,
1307 false => test_name.contains(filter),
1311 // Remove tests that don't match the test filter
1312 if let Some(ref filter) = opts.filter {
1313 filtered.retain(|test| matches_filter(test, filter));
1316 // Skip tests that match any of the skip filters
1317 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
1319 // Excludes #[should_panic] tests
1320 if opts.exclude_should_panic {
1321 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
1324 // maybe unignore tests
1325 match opts.run_ignored {
1326 RunIgnored::Yes => {
1329 .for_each(|test| test.desc.ignore = false);
1331 RunIgnored::Only => {
1332 filtered.retain(|test| test.desc.ignore);
1335 .for_each(|test| test.desc.ignore = false);
1337 RunIgnored::No => {}
1340 // Sort the tests alphabetically
1341 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1346 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1347 // convert benchmarks to tests, if we're not benchmarking them
1351 let testfn = match x.testfn {
1352 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1353 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1355 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1356 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1371 test: TestDescAndFn,
1372 monitor_ch: Sender<MonitorMsg>,
1373 concurrency: Concurrent,
1375 let TestDescAndFn { desc, testfn } = test;
1377 let ignore_because_panic_abort = cfg!(target_arch = "wasm32")
1378 && !cfg!(target_os = "emscripten")
1379 && desc.should_panic != ShouldPanic::No;
1381 if force_ignore || desc.ignore || ignore_because_panic_abort {
1382 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1388 monitor_ch: Sender<MonitorMsg>,
1390 testfn: Box<dyn FnOnce() + Send>,
1391 concurrency: Concurrent,
1393 // Buffer for capturing standard I/O
1394 let data = Arc::new(Mutex::new(Vec::new()));
1395 let data2 = data.clone();
1397 let name = desc.name.clone();
1398 let runtest = move || {
1399 let oldio = if !nocapture {
1401 io::set_print(Some(Box::new(Sink(data2.clone())))),
1402 io::set_panic(Some(Box::new(Sink(data2)))),
1408 let result = catch_unwind(AssertUnwindSafe(testfn));
1410 if let Some((printio, panicio)) = oldio {
1411 io::set_print(printio);
1412 io::set_panic(panicio);
1415 let test_result = calc_result(&desc, result);
1416 let stdout = data.lock().unwrap().to_vec();
1418 .send((desc.clone(), test_result, stdout))
1422 // If the platform is single-threaded we're just going to run
1423 // the test synchronously, regardless of the concurrency
1425 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1426 if concurrency == Concurrent::Yes && supports_threads {
1427 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1428 cfg.spawn(runtest).unwrap();
1435 DynBenchFn(bencher) => {
1436 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1437 bencher.run(harness)
1440 StaticBenchFn(benchfn) => {
1441 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1442 (benchfn.clone())(harness)
1446 let cb = move || __rust_begin_short_backtrace(f);
1447 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1449 StaticTestFn(f) => run_test_inner(
1453 Box::new(move || __rust_begin_short_backtrace(f)),
1459 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1461 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1465 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1466 match (&desc.should_panic, task_result) {
1467 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1468 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1470 .downcast_ref::<String>()
1472 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1473 .map(|e| e.contains(msg))
1478 if desc.allow_fail {
1481 TrFailedMsg(format!("panic did not include expected string '{}'", msg))
1485 _ if desc.allow_fail => TrAllowedFail,
1490 #[derive(Clone, PartialEq)]
1491 pub struct MetricMap(BTreeMap<String, Metric>);
1494 pub fn new() -> MetricMap {
1495 MetricMap(BTreeMap::new())
1498 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1499 /// must be non-negative. The `noise` indicates the uncertainty of the
1500 /// metric, which doubles as the "noise range" of acceptable
1501 /// pairwise-regressions on this named value, when comparing from one
1502 /// metric to the next using `compare_to_old`.
1504 /// If `noise` is positive, then it means this metric is of a value
1505 /// you want to see grow smaller, so a change larger than `noise` in the
1506 /// positive direction represents a regression.
1508 /// If `noise` is negative, then it means this metric is of a value
1509 /// you want to see grow larger, so a change larger than `noise` in the
1510 /// negative direction represents a regression.
1511 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1512 let m = Metric { value, noise };
1513 self.0.insert(name.to_owned(), m);
1516 pub fn fmt_metrics(&self) -> String {
1520 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1521 .collect::<Vec<_>>();
1528 pub use std::hint::black_box;
1531 /// Callback for benchmark functions to run in their body.
1532 pub fn iter<T, F>(&mut self, mut inner: F)
1536 if self.mode == BenchMode::Single {
1537 ns_iter_inner(&mut inner, 1);
1541 self.summary = Some(iter(&mut inner));
1544 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1546 F: FnMut(&mut Bencher),
1549 return self.summary;
1553 fn ns_from_dur(dur: Duration) -> u64 {
1554 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1557 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1561 let start = Instant::now();
1565 return ns_from_dur(start.elapsed());
1568 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1572 // Initial bench run to get ballpark figure.
1573 let ns_single = ns_iter_inner(inner, 1);
1575 // Try to estimate iter count for 1ms falling back to 1m
1576 // iterations if first run took < 1ns.
1577 let ns_target_total = 1_000_000; // 1ms
1578 let mut n = ns_target_total / cmp::max(1, ns_single);
1580 // if the first run took more than 1ms we don't want to just
1581 // be left doing 0 iterations on every loop. The unfortunate
1582 // side effect of not being able to do as many runs is
1583 // automatically handled by the statistical analysis below
1584 // (i.e., larger error bars).
1587 let mut total_run = Duration::new(0, 0);
1588 let samples: &mut [f64] = &mut [0.0_f64; 50];
1590 let loop_start = Instant::now();
1592 for p in &mut *samples {
1593 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1596 stats::winsorize(samples, 5.0);
1597 let summ = stats::Summary::new(samples);
1599 for p in &mut *samples {
1600 let ns = ns_iter_inner(inner, 5 * n);
1601 *p = ns as f64 / (5 * n) as f64;
1604 stats::winsorize(samples, 5.0);
1605 let summ5 = stats::Summary::new(samples);
1607 let loop_run = loop_start.elapsed();
1609 // If we've run for 100ms and seem to have converged to a
1611 if loop_run > Duration::from_millis(100)
1612 && summ.median_abs_dev_pct < 1.0
1613 && summ.median - summ5.median < summ5.median_abs_dev
1618 total_run = total_run + loop_run;
1619 // Longest we ever run for is 3s.
1620 if total_run > Duration::from_secs(3) {
1624 // If we overflow here just return the results so far. We check a
1625 // multiplier of 10 because we're about to multiply by 2 and the
1626 // next iteration of the loop will also multiply by 5 (to calculate
1627 // the summ5 result)
1628 n = match n.checked_mul(10) {
1638 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1642 use std::panic::{catch_unwind, AssertUnwindSafe};
1643 use std::sync::{Arc, Mutex};
1645 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1647 F: FnMut(&mut Bencher),
1649 let mut bs = Bencher {
1650 mode: BenchMode::Auto,
1655 let data = Arc::new(Mutex::new(Vec::new()));
1656 let data2 = data.clone();
1658 let oldio = if !nocapture {
1660 io::set_print(Some(Box::new(Sink(data2.clone())))),
1661 io::set_panic(Some(Box::new(Sink(data2)))),
1667 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1669 if let Some((printio, panicio)) = oldio {
1670 io::set_print(printio);
1671 io::set_panic(panicio);
1674 let test_result = match result {
1676 Ok(Some(ns_iter_summ)) => {
1677 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1678 let mb_s = bs.bytes * 1000 / ns_iter;
1680 let bs = BenchSamples {
1682 mb_s: mb_s as usize,
1684 TestResult::TrBench(bs)
1687 // iter not called, so no data.
1688 // FIXME: error in this case?
1689 let samples: &mut [f64] = &mut [0.0_f64; 1];
1690 let bs = BenchSamples {
1691 ns_iter_summ: stats::Summary::new(samples),
1694 TestResult::TrBench(bs)
1696 Err(_) => TestResult::TrFailed,
1699 let stdout = data.lock().unwrap().to_vec();
1700 monitor_ch.send((desc, test_result, stdout)).unwrap();
1703 pub fn run_once<F>(f: F)
1705 F: FnMut(&mut Bencher),
1707 let mut bs = Bencher {
1708 mode: BenchMode::Single,