1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![crate_name = "test"]
21 #![unstable(feature = "test", issue = "50297")]
22 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
24 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
26 #![feature(set_stdio)]
27 #![feature(panic_unwind)]
28 #![feature(staged_api)]
29 #![feature(termination_trait_lib)]
33 #[cfg(any(unix, target_os = "cloudabi"))]
37 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
38 // on aarch64-pc-windows-msvc, or thumbv7a-pc-windows-msvc
39 // so we don't link libtest against libunwind (for the time being)
40 // even though it means that libtest won't be fully functional on
43 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
44 #[cfg(not(all(windows, any(target_arch = "aarch64", target_arch = "arm"))))]
45 extern crate panic_unwind;
47 pub use self::ColorConfig::*;
48 use self::NamePadding::*;
49 use self::OutputLocation::*;
50 use self::TestEvent::*;
51 pub use self::TestFn::*;
52 pub use self::TestName::*;
53 pub use self::TestResult::*;
58 use std::collections::BTreeMap;
63 use std::io::prelude::*;
64 use std::panic::{catch_unwind, AssertUnwindSafe};
65 use std::path::PathBuf;
67 use std::process::Termination;
68 use std::sync::mpsc::{channel, Sender};
69 use std::sync::{Arc, Mutex};
71 use std::time::{Duration, Instant};
76 const TEST_WARN_TIMEOUT_S: u64 = 60;
77 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
79 // to be used by rustc to compile tests in libtest
82 assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
83 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
84 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestOpts,
85 TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
92 use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
94 /// Whether to execute tests concurrently or not
95 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
101 // The name of a test. By convention this follows the rules for rust
102 // paths; i.e., it should be a series of identifiers separated by double
103 // colons. This way if some test runner wants to arrange the tests
104 // hierarchically it may.
106 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
108 StaticTestName(&'static str),
110 AlignedTestName(Cow<'static, str>, NamePadding),
113 fn as_slice(&self) -> &str {
115 StaticTestName(s) => s,
116 DynTestName(ref s) => s,
117 AlignedTestName(ref s, _) => &*s,
121 fn padding(&self) -> NamePadding {
123 &AlignedTestName(_, p) => p,
128 fn with_padding(&self, padding: NamePadding) -> TestName {
129 let name = match self {
130 &TestName::StaticTestName(name) => Cow::Borrowed(name),
131 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
132 &TestName::AlignedTestName(ref name, _) => name.clone(),
135 TestName::AlignedTestName(name, padding)
138 impl fmt::Display for TestName {
139 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
140 fmt::Display::fmt(self.as_slice(), f)
144 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
145 pub enum NamePadding {
151 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
152 let mut name = String::from(self.name.as_slice());
153 let fill = column_count.saturating_sub(name.len());
154 let pad = " ".repeat(fill);
165 /// Represents a benchmark function.
166 pub trait TDynBenchFn: Send {
167 fn run(&self, harness: &mut Bencher);
170 // A function that runs a test. If the function returns successfully,
171 // the test succeeds; if the function panics then the test fails. We
172 // may need to come up with a more clever definition of test in order
173 // to support isolation of tests into threads.
176 StaticBenchFn(fn(&mut Bencher)),
177 DynTestFn(Box<dyn FnOnce() + Send>),
178 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
182 fn padding(&self) -> NamePadding {
184 StaticTestFn(..) => PadNone,
185 StaticBenchFn(..) => PadOnRight,
186 DynTestFn(..) => PadNone,
187 DynBenchFn(..) => PadOnRight,
192 impl fmt::Debug for TestFn {
193 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
194 f.write_str(match *self {
195 StaticTestFn(..) => "StaticTestFn(..)",
196 StaticBenchFn(..) => "StaticBenchFn(..)",
197 DynTestFn(..) => "DynTestFn(..)",
198 DynBenchFn(..) => "DynBenchFn(..)",
203 /// Manager of the benchmarking runs.
205 /// This is fed into functions marked with `#[bench]` to allow for
206 /// set-up & tear-down before running a piece of code repeatedly via a
211 summary: Option<stats::Summary>,
215 #[derive(Clone, PartialEq, Eq)]
221 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
222 pub enum ShouldPanic {
225 YesWithMessage(&'static str),
228 // The definition of a single test. A test runner will run a list of
230 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
231 pub struct TestDesc {
234 pub should_panic: ShouldPanic,
235 pub allow_fail: bool,
239 pub struct TestDescAndFn {
244 #[derive(Clone, PartialEq, Debug, Copy)]
251 pub fn new(value: f64, noise: f64) -> Metric {
252 Metric { value, noise }
256 /// In case we want to add other options as well, just add them in this struct.
257 #[derive(Copy, Clone, Debug)]
259 display_output: bool,
263 pub fn new() -> Options {
265 display_output: false,
269 pub fn display_output(mut self, display_output: bool) -> Options {
270 self.display_output = display_output;
275 // The default console test runner. It accepts the command line
276 // arguments and a vector of test_descs.
277 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
278 let mut opts = match parse_opts(args) {
281 eprintln!("error: {}", msg);
287 opts.options = options;
289 if let Err(e) = list_tests_console(&opts, tests) {
290 eprintln!("error: io error when listing tests: {:?}", e);
294 match run_tests_console(&opts, tests) {
296 Ok(false) => process::exit(101),
298 eprintln!("error: io error when listing tests: {:?}", e);
305 // A variant optimized for invocation with a static test vector.
306 // This will panic (intentionally) when fed any dynamic tests, because
307 // it is copying the static values out into a dynamic vector and cannot
308 // copy dynamic values. It is doing this because from this point on
309 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
310 // semantics into parallel test runners, which in turn requires a Vec<>
311 // rather than a &[].
312 pub fn test_main_static(tests: &[&TestDescAndFn]) {
313 let args = env::args().collect::<Vec<_>>();
314 let owned_tests = tests
316 .map(|t| match t.testfn {
317 StaticTestFn(f) => TestDescAndFn {
318 testfn: StaticTestFn(f),
319 desc: t.desc.clone(),
321 StaticBenchFn(f) => TestDescAndFn {
322 testfn: StaticBenchFn(f),
323 desc: t.desc.clone(),
325 _ => panic!("non-static tests passed to test::test_main_static"),
328 test_main(&args, owned_tests, Options::new())
331 /// Invoked when unit tests terminate. Should panic if the unit
332 /// Tests is considered a failure. By default, invokes `report()`
333 /// and checks for a `0` result.
334 pub fn assert_test_result<T: Termination>(result: T) {
335 let code = result.report();
338 "the test returned a termination value with a non-zero status code ({}) \
339 which indicates a failure",
344 #[derive(Copy, Clone, Debug)]
345 pub enum ColorConfig {
351 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
352 pub enum OutputFormat {
358 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
359 pub enum RunIgnored {
366 pub struct TestOpts {
368 pub filter: Option<String>,
369 pub filter_exact: bool,
370 pub exclude_should_panic: bool,
371 pub run_ignored: RunIgnored,
373 pub bench_benchmarks: bool,
374 pub logfile: Option<PathBuf>,
376 pub color: ColorConfig,
377 pub format: OutputFormat,
378 pub test_threads: Option<usize>,
379 pub skip: Vec<String>,
380 pub options: Options,
383 /// Result of parsing the options.
384 pub type OptRes = Result<TestOpts, String>;
386 fn optgroups() -> getopts::Options {
387 let mut opts = getopts::Options::new();
388 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
389 .optflag("", "ignored", "Run only ignored tests")
390 .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
391 .optflag("", "test", "Run tests and not benchmarks")
392 .optflag("", "bench", "Run benchmarks instead of tests")
393 .optflag("", "list", "List all tests and benchmarks")
394 .optflag("h", "help", "Display this message (longer with --help)")
398 "Write logs to the specified file instead \
405 "don't capture stdout/stderr of each \
406 task, allow printing directly",
411 "Number of threads used for running tests \
418 "Skip tests whose names contain FILTER (this flag can \
419 be used multiple times)",
425 "Display one character per test instead of one line. \
426 Alias to --format=terse",
431 "Exactly match filters rather than by substring",
436 "Configure coloring of output:
437 auto = colorize if stdout is a tty and tests are run on serially (default);
438 always = always colorize output;
439 never = never colorize output;",
445 "Configure formatting of output:
446 pretty = Print verbose output;
447 terse = Display one character per test;
448 json = Output a json document",
454 "Enable nightly-only flags:
455 unstable-options = Allow use of experimental features",
461 fn usage(binary: &str, options: &getopts::Options) {
462 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
466 The FILTER string is tested against the name of all tests, and only those
467 tests whose names contain the filter are run.
469 By default, all tests are run in parallel. This can be altered with the
470 --test-threads flag or the RUST_TEST_THREADS environment variable when running
473 All tests have their standard output and standard error captured by default.
474 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
475 environment variable to a value other than "0". Logging is not captured by default.
479 `#[test]` - Indicates a function is a test to be run. This function
481 `#[bench]` - Indicates a function is a benchmark to be run. This
482 function takes one argument (test::Bencher).
483 `#[should_panic]` - This function (also labeled with `#[test]`) will only pass if
484 the code causes a panic (an assertion failure or panic!)
485 A message may be provided, which the failure string must
486 contain: #[should_panic(expected = "foo")].
487 `#[ignore]` - When applied to a function which is already attributed as a
488 test, then the test runner will ignore these tests during
489 normal test runs. Running with --ignored or --include-ignored will run
491 usage = options.usage(&message)
495 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
496 fn is_nightly() -> bool {
497 // Whether this is a feature-staged build, i.e., on the beta or stable channel
498 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
499 // Whether we should enable unstable features for bootstrapping
500 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
502 bootstrap || !disable_unstable_features
505 // Parses command line arguments into test options
506 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
507 let mut allow_unstable = false;
508 let opts = optgroups();
509 let args = args.get(1..).unwrap_or(args);
510 let matches = match opts.parse(args) {
512 Err(f) => return Some(Err(f.to_string())),
515 if let Some(opt) = matches.opt_str("Z") {
518 "the option `Z` is only accepted on the nightly compiler".into(),
523 "unstable-options" => {
524 allow_unstable = true;
527 return Some(Err("Unrecognized option to `Z`".into()));
532 if matches.opt_present("h") {
533 usage(&args[0], &opts);
537 let filter = if !matches.free.is_empty() {
538 Some(matches.free[0].clone())
543 let exclude_should_panic = matches.opt_present("exclude-should-panic");
544 if !allow_unstable && exclude_should_panic {
546 "The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(),
550 let include_ignored = matches.opt_present("include-ignored");
551 if !allow_unstable && include_ignored {
553 "The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
557 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
560 "the options --include-ignored and --ignored are mutually exclusive".into(),
563 (true, false) => RunIgnored::Yes,
564 (false, true) => RunIgnored::Only,
565 (false, false) => RunIgnored::No,
567 let quiet = matches.opt_present("quiet");
568 let exact = matches.opt_present("exact");
569 let list = matches.opt_present("list");
571 let logfile = matches.opt_str("logfile");
572 let logfile = logfile.map(|s| PathBuf::from(&s));
574 let bench_benchmarks = matches.opt_present("bench");
575 let run_tests = !bench_benchmarks || matches.opt_present("test");
577 let mut nocapture = matches.opt_present("nocapture");
579 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
580 Ok(val) => &val != "0",
585 let test_threads = match matches.opt_str("test-threads") {
586 Some(n_str) => match n_str.parse::<usize>() {
587 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
590 return Some(Err(format!(
591 "argument for --test-threads must be a number > 0 \
600 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
601 Some("auto") | None => AutoColor,
602 Some("always") => AlwaysColor,
603 Some("never") => NeverColor,
606 return Some(Err(format!(
607 "argument for --color must be auto, always, or never (was \
614 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
615 None if quiet => OutputFormat::Terse,
616 Some("pretty") | None => OutputFormat::Pretty,
617 Some("terse") => OutputFormat::Terse,
621 "The \"json\" format is only accepted on the nightly compiler".into(),
628 return Some(Err(format!(
629 "argument for --format must be pretty, terse, or json (was \
636 let test_opts = TestOpts {
640 exclude_should_panic,
649 skip: matches.opt_strs("skip"),
650 options: Options::new(),
656 #[derive(Clone, PartialEq)]
657 pub struct BenchSamples {
658 ns_iter_summ: stats::Summary,
662 #[derive(Clone, PartialEq)]
663 pub enum TestResult {
669 TrBench(BenchSamples),
672 unsafe impl Send for TestResult {}
674 enum OutputLocation<T> {
675 Pretty(Box<term::StdoutTerminal>),
679 impl<T: Write> Write for OutputLocation<T> {
680 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
682 Pretty(ref mut term) => term.write(buf),
683 Raw(ref mut stdout) => stdout.write(buf),
687 fn flush(&mut self) -> io::Result<()> {
689 Pretty(ref mut term) => term.flush(),
690 Raw(ref mut stdout) => stdout.flush(),
695 struct ConsoleTestState {
696 log_out: Option<File>,
705 failures: Vec<(TestDesc, Vec<u8>)>,
706 not_failures: Vec<(TestDesc, Vec<u8>)>,
710 impl ConsoleTestState {
711 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
712 let log_out = match opts.logfile {
713 Some(ref path) => Some(File::create(path)?),
717 Ok(ConsoleTestState {
726 metrics: MetricMap::new(),
727 failures: Vec::new(),
728 not_failures: Vec::new(),
729 options: opts.options,
733 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
734 let msg = msg.as_ref();
737 Some(ref mut o) => o.write_all(msg.as_bytes()),
741 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
742 self.write_log(format!(
745 TrOk => "ok".to_owned(),
746 TrFailed => "failed".to_owned(),
747 TrFailedMsg(ref msg) => format!("failed: {}", msg),
748 TrIgnored => "ignored".to_owned(),
749 TrAllowedFail => "failed (allowed)".to_owned(),
750 TrBench(ref bs) => fmt_bench_samples(bs),
756 fn current_test_count(&self) -> usize {
757 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
761 // Format a number with thousands separators
762 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
764 let mut output = String::new();
765 let mut trailing = false;
766 for &pow in &[9, 6, 3, 0] {
767 let base = 10_usize.pow(pow);
768 if pow == 0 || trailing || n / base != 0 {
770 output.write_fmt(format_args!("{}", n / base)).unwrap();
772 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
785 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
787 let mut output = String::new();
789 let median = bs.ns_iter_summ.median as usize;
790 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
793 .write_fmt(format_args!(
794 "{:>11} ns/iter (+/- {})",
795 fmt_thousands_sep(median, ','),
796 fmt_thousands_sep(deviation, ',')
801 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
807 // List the tests to console, and optionally to logfile. Filters are honored.
808 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
809 let mut output = match term::stdout() {
810 None => Raw(io::stdout()),
811 Some(t) => Pretty(t),
814 let quiet = opts.format == OutputFormat::Terse;
815 let mut st = ConsoleTestState::new(opts)?;
820 for test in filter_tests(&opts, tests) {
821 use crate::TestFn::*;
824 desc: TestDesc { name, .. },
828 let fntype = match testfn {
829 StaticTestFn(..) | DynTestFn(..) => {
833 StaticBenchFn(..) | DynBenchFn(..) => {
839 writeln!(output, "{}: {}", name, fntype)?;
840 st.write_log(format!("{} {}\n", fntype, name))?;
843 fn plural(count: u32, s: &str) -> String {
845 1 => format!("{} {}", 1, s),
846 n => format!("{} {}s", n, s),
851 if ntest != 0 || nbench != 0 {
852 writeln!(output, "")?;
858 plural(ntest, "test"),
859 plural(nbench, "benchmark")
866 // A simple console test runner
867 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
870 st: &mut ConsoleTestState,
871 out: &mut dyn OutputFormatter,
872 ) -> io::Result<()> {
873 match (*event).clone() {
874 TeFiltered(ref filtered_tests) => {
875 st.total = filtered_tests.len();
876 out.write_run_start(filtered_tests.len())
878 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
879 TeWait(ref test) => out.write_test_start(test),
880 TeTimeout(ref test) => out.write_timeout(test),
881 TeResult(test, result, stdout) => {
882 st.write_log_result(&test, &result)?;
883 out.write_result(&test, &result, &*stdout)?;
887 st.not_failures.push((test, stdout));
889 TrIgnored => st.ignored += 1,
890 TrAllowedFail => st.allowed_fail += 1,
892 st.metrics.insert_metric(
893 test.name.as_slice(),
894 bs.ns_iter_summ.median,
895 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
901 st.failures.push((test, stdout));
903 TrFailedMsg(msg) => {
905 let mut stdout = stdout;
906 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
907 st.failures.push((test, stdout));
915 let output = match term::stdout() {
916 None => Raw(io::stdout()),
917 Some(t) => Pretty(t),
920 let max_name_len = tests
922 .max_by_key(|t| len_if_padded(*t))
923 .map(|t| t.desc.name.as_slice().len())
926 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
928 let mut out: Box<dyn OutputFormatter> = match opts.format {
929 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
935 OutputFormat::Terse => Box::new(TerseFormatter::new(
941 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
943 let mut st = ConsoleTestState::new(opts)?;
944 fn len_if_padded(t: &TestDescAndFn) -> usize {
945 match t.testfn.padding() {
947 PadOnRight => t.desc.name.as_slice().len(),
951 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
953 assert!(st.current_test_count() == st.total);
955 return out.write_run_finish(&st);
958 fn use_color(opts: &TestOpts) -> bool {
960 AutoColor => !opts.nocapture && stdout_isatty(),
967 target_os = "cloudabi",
968 all(target_arch = "wasm32", not(target_os = "emscripten")),
969 all(target_vendor = "fortanix", target_env = "sgx")
971 fn stdout_isatty() -> bool {
972 // FIXME: Implement isatty on SGX
976 fn stdout_isatty() -> bool {
977 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
980 fn stdout_isatty() -> bool {
983 type HANDLE = *mut u8;
984 type LPDWORD = *mut u32;
985 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
987 fn GetStdHandle(which: DWORD) -> HANDLE;
988 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
991 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
993 GetConsoleMode(handle, &mut out) != 0
999 TeFiltered(Vec<TestDesc>),
1001 TeResult(TestDesc, TestResult, Vec<u8>),
1002 TeTimeout(TestDesc),
1003 TeFilteredOut(usize),
1006 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1008 struct Sink(Arc<Mutex<Vec<u8>>>);
1009 impl Write for Sink {
1010 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1011 Write::write(&mut *self.0.lock().unwrap(), data)
1013 fn flush(&mut self) -> io::Result<()> {
1018 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1020 F: FnMut(TestEvent) -> io::Result<()>,
1022 use std::collections::{self, HashMap};
1023 use std::hash::BuildHasherDefault;
1024 use std::sync::mpsc::RecvTimeoutError;
1025 // Use a deterministic hasher
1027 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1029 let tests_len = tests.len();
1031 let mut filtered_tests = filter_tests(opts, tests);
1032 if !opts.bench_benchmarks {
1033 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1036 let filtered_tests = {
1037 let mut filtered_tests = filtered_tests;
1038 for test in filtered_tests.iter_mut() {
1039 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1045 let filtered_out = tests_len - filtered_tests.len();
1046 callback(TeFilteredOut(filtered_out))?;
1048 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1050 callback(TeFiltered(filtered_descs))?;
1052 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1053 filtered_tests.into_iter().partition(|e| match e.testfn {
1054 StaticTestFn(_) | DynTestFn(_) => true,
1058 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1060 let mut remaining = filtered_tests;
1061 remaining.reverse();
1062 let mut pending = 0;
1064 let (tx, rx) = channel::<MonitorMsg>();
1066 let mut running_tests: TestMap = HashMap::default();
1068 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1069 let now = Instant::now();
1070 let timed_out = running_tests
1072 .filter_map(|(desc, timeout)| {
1073 if &now >= timeout {
1080 for test in &timed_out {
1081 running_tests.remove(test);
1086 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1087 running_tests.values().min().map(|next_timeout| {
1088 let now = Instant::now();
1089 if *next_timeout >= now {
1097 if concurrency == 1 {
1098 while !remaining.is_empty() {
1099 let test = remaining.pop().unwrap();
1100 callback(TeWait(test.desc.clone()))?;
1101 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1102 let (test, result, stdout) = rx.recv().unwrap();
1103 callback(TeResult(test, result, stdout))?;
1106 while pending > 0 || !remaining.is_empty() {
1107 while pending < concurrency && !remaining.is_empty() {
1108 let test = remaining.pop().unwrap();
1109 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1110 running_tests.insert(test.desc.clone(), timeout);
1111 callback(TeWait(test.desc.clone()))?; //here no pad
1112 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1118 if let Some(timeout) = calc_timeout(&running_tests) {
1119 res = rx.recv_timeout(timeout);
1120 for test in get_timed_out_tests(&mut running_tests) {
1121 callback(TeTimeout(test))?;
1123 if res != Err(RecvTimeoutError::Timeout) {
1127 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1132 let (desc, result, stdout) = res.unwrap();
1133 running_tests.remove(&desc);
1135 callback(TeResult(desc, result, stdout))?;
1140 if opts.bench_benchmarks {
1141 // All benchmarks run at the end, in serial.
1142 for b in filtered_benchs {
1143 callback(TeWait(b.desc.clone()))?;
1144 run_test(opts, false, b, tx.clone(), Concurrent::No);
1145 let (test, result, stdout) = rx.recv().unwrap();
1146 callback(TeResult(test, result, stdout))?;
1152 #[allow(deprecated)]
1153 fn get_concurrency() -> usize {
1154 return match env::var("RUST_TEST_THREADS") {
1156 let opt_n: Option<usize> = s.parse().ok();
1158 Some(n) if n > 0 => n,
1160 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1165 Err(..) => num_cpus(),
1169 #[allow(nonstandard_style)]
1170 fn num_cpus() -> usize {
1172 struct SYSTEM_INFO {
1173 wProcessorArchitecture: u16,
1176 lpMinimumApplicationAddress: *mut u8,
1177 lpMaximumApplicationAddress: *mut u8,
1178 dwActiveProcessorMask: *mut u8,
1179 dwNumberOfProcessors: u32,
1180 dwProcessorType: u32,
1181 dwAllocationGranularity: u32,
1182 wProcessorLevel: u16,
1183 wProcessorRevision: u16,
1186 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1189 let mut sysinfo = std::mem::zeroed();
1190 GetSystemInfo(&mut sysinfo);
1191 sysinfo.dwNumberOfProcessors as usize
1195 #[cfg(target_os = "vxworks")]
1196 fn num_cpus() -> usize {
1197 // FIXME: Implement num_cpus on vxWorks
1201 #[cfg(target_os = "redox")]
1202 fn num_cpus() -> usize {
1203 // FIXME: Implement num_cpus on Redox
1208 all(target_arch = "wasm32", not(target_os = "emscripten")),
1209 all(target_vendor = "fortanix", target_env = "sgx")
1211 fn num_cpus() -> usize {
1216 target_os = "android",
1217 target_os = "cloudabi",
1218 target_os = "emscripten",
1219 target_os = "fuchsia",
1221 target_os = "linux",
1222 target_os = "macos",
1223 target_os = "solaris",
1225 fn num_cpus() -> usize {
1226 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1230 target_os = "freebsd",
1231 target_os = "dragonfly",
1232 target_os = "netbsd"
1234 fn num_cpus() -> usize {
1237 let mut cpus: libc::c_uint = 0;
1238 let mut cpus_size = std::mem::size_of_val(&cpus);
1241 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1244 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1249 &mut cpus as *mut _ as *mut _,
1250 &mut cpus_size as *mut _ as *mut _,
1262 #[cfg(target_os = "openbsd")]
1263 fn num_cpus() -> usize {
1266 let mut cpus: libc::c_uint = 0;
1267 let mut cpus_size = std::mem::size_of_val(&cpus);
1268 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1274 &mut cpus as *mut _ as *mut _,
1275 &mut cpus_size as *mut _ as *mut _,
1286 #[cfg(target_os = "haiku")]
1287 fn num_cpus() -> usize {
1292 #[cfg(target_os = "l4re")]
1293 fn num_cpus() -> usize {
1299 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1300 let mut filtered = tests;
1301 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1302 let test_name = test.desc.name.as_slice();
1304 match opts.filter_exact {
1305 true => test_name == filter,
1306 false => test_name.contains(filter),
1310 // Remove tests that don't match the test filter
1311 if let Some(ref filter) = opts.filter {
1312 filtered.retain(|test| matches_filter(test, filter));
1315 // Skip tests that match any of the skip filters
1316 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
1318 // Excludes #[should_panic] tests
1319 if opts.exclude_should_panic {
1320 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
1323 // maybe unignore tests
1324 match opts.run_ignored {
1325 RunIgnored::Yes => {
1328 .for_each(|test| test.desc.ignore = false);
1330 RunIgnored::Only => {
1331 filtered.retain(|test| test.desc.ignore);
1334 .for_each(|test| test.desc.ignore = false);
1336 RunIgnored::No => {}
1339 // Sort the tests alphabetically
1340 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1345 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1346 // convert benchmarks to tests, if we're not benchmarking them
1350 let testfn = match x.testfn {
1351 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1352 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1354 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1355 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1370 test: TestDescAndFn,
1371 monitor_ch: Sender<MonitorMsg>,
1372 concurrency: Concurrent,
1374 let TestDescAndFn { desc, testfn } = test;
1376 let ignore_because_panic_abort = cfg!(target_arch = "wasm32")
1377 && !cfg!(target_os = "emscripten")
1378 && desc.should_panic != ShouldPanic::No;
1380 if force_ignore || desc.ignore || ignore_because_panic_abort {
1381 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1387 monitor_ch: Sender<MonitorMsg>,
1389 testfn: Box<dyn FnOnce() + Send>,
1390 concurrency: Concurrent,
1392 // Buffer for capturing standard I/O
1393 let data = Arc::new(Mutex::new(Vec::new()));
1394 let data2 = data.clone();
1396 let name = desc.name.clone();
1397 let runtest = move || {
1398 let oldio = if !nocapture {
1400 io::set_print(Some(Box::new(Sink(data2.clone())))),
1401 io::set_panic(Some(Box::new(Sink(data2)))),
1407 let result = catch_unwind(AssertUnwindSafe(testfn));
1409 if let Some((printio, panicio)) = oldio {
1410 io::set_print(printio);
1411 io::set_panic(panicio);
1414 let test_result = calc_result(&desc, result);
1415 let stdout = data.lock().unwrap().to_vec();
1417 .send((desc.clone(), test_result, stdout))
1421 // If the platform is single-threaded we're just going to run
1422 // the test synchronously, regardless of the concurrency
1424 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1425 if concurrency == Concurrent::Yes && supports_threads {
1426 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1427 cfg.spawn(runtest).unwrap();
1434 DynBenchFn(bencher) => {
1435 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1436 bencher.run(harness)
1439 StaticBenchFn(benchfn) => {
1440 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1441 (benchfn.clone())(harness)
1445 let cb = move || __rust_begin_short_backtrace(f);
1446 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1448 StaticTestFn(f) => run_test_inner(
1452 Box::new(move || __rust_begin_short_backtrace(f)),
1458 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1460 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1464 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1465 match (&desc.should_panic, task_result) {
1466 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1467 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1469 .downcast_ref::<String>()
1471 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1472 .map(|e| e.contains(msg))
1477 if desc.allow_fail {
1480 TrFailedMsg(format!("panic did not include expected string '{}'", msg))
1484 _ if desc.allow_fail => TrAllowedFail,
1489 #[derive(Clone, PartialEq)]
1490 pub struct MetricMap(BTreeMap<String, Metric>);
1493 pub fn new() -> MetricMap {
1494 MetricMap(BTreeMap::new())
1497 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1498 /// must be non-negative. The `noise` indicates the uncertainty of the
1499 /// metric, which doubles as the "noise range" of acceptable
1500 /// pairwise-regressions on this named value, when comparing from one
1501 /// metric to the next using `compare_to_old`.
1503 /// If `noise` is positive, then it means this metric is of a value
1504 /// you want to see grow smaller, so a change larger than `noise` in the
1505 /// positive direction represents a regression.
1507 /// If `noise` is negative, then it means this metric is of a value
1508 /// you want to see grow larger, so a change larger than `noise` in the
1509 /// negative direction represents a regression.
1510 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1511 let m = Metric { value, noise };
1512 self.0.insert(name.to_owned(), m);
1515 pub fn fmt_metrics(&self) -> String {
1519 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1520 .collect::<Vec<_>>();
1527 pub use std::hint::black_box;
1530 /// Callback for benchmark functions to run in their body.
1531 pub fn iter<T, F>(&mut self, mut inner: F)
1535 if self.mode == BenchMode::Single {
1536 ns_iter_inner(&mut inner, 1);
1540 self.summary = Some(iter(&mut inner));
1543 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1545 F: FnMut(&mut Bencher),
1548 return self.summary;
1552 fn ns_from_dur(dur: Duration) -> u64 {
1553 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1556 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1560 let start = Instant::now();
1564 return ns_from_dur(start.elapsed());
1567 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1571 // Initial bench run to get ballpark figure.
1572 let ns_single = ns_iter_inner(inner, 1);
1574 // Try to estimate iter count for 1ms falling back to 1m
1575 // iterations if first run took < 1ns.
1576 let ns_target_total = 1_000_000; // 1ms
1577 let mut n = ns_target_total / cmp::max(1, ns_single);
1579 // if the first run took more than 1ms we don't want to just
1580 // be left doing 0 iterations on every loop. The unfortunate
1581 // side effect of not being able to do as many runs is
1582 // automatically handled by the statistical analysis below
1583 // (i.e., larger error bars).
1586 let mut total_run = Duration::new(0, 0);
1587 let samples: &mut [f64] = &mut [0.0_f64; 50];
1589 let loop_start = Instant::now();
1591 for p in &mut *samples {
1592 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1595 stats::winsorize(samples, 5.0);
1596 let summ = stats::Summary::new(samples);
1598 for p in &mut *samples {
1599 let ns = ns_iter_inner(inner, 5 * n);
1600 *p = ns as f64 / (5 * n) as f64;
1603 stats::winsorize(samples, 5.0);
1604 let summ5 = stats::Summary::new(samples);
1606 let loop_run = loop_start.elapsed();
1608 // If we've run for 100ms and seem to have converged to a
1610 if loop_run > Duration::from_millis(100)
1611 && summ.median_abs_dev_pct < 1.0
1612 && summ.median - summ5.median < summ5.median_abs_dev
1617 total_run = total_run + loop_run;
1618 // Longest we ever run for is 3s.
1619 if total_run > Duration::from_secs(3) {
1623 // If we overflow here just return the results so far. We check a
1624 // multiplier of 10 because we're about to multiply by 2 and the
1625 // next iteration of the loop will also multiply by 5 (to calculate
1626 // the summ5 result)
1627 n = match n.checked_mul(10) {
1637 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1641 use std::panic::{catch_unwind, AssertUnwindSafe};
1642 use std::sync::{Arc, Mutex};
1644 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1646 F: FnMut(&mut Bencher),
1648 let mut bs = Bencher {
1649 mode: BenchMode::Auto,
1654 let data = Arc::new(Mutex::new(Vec::new()));
1655 let data2 = data.clone();
1657 let oldio = if !nocapture {
1659 io::set_print(Some(Box::new(Sink(data2.clone())))),
1660 io::set_panic(Some(Box::new(Sink(data2)))),
1666 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1668 if let Some((printio, panicio)) = oldio {
1669 io::set_print(printio);
1670 io::set_panic(panicio);
1673 let test_result = match result {
1675 Ok(Some(ns_iter_summ)) => {
1676 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1677 let mb_s = bs.bytes * 1000 / ns_iter;
1679 let bs = BenchSamples {
1681 mb_s: mb_s as usize,
1683 TestResult::TrBench(bs)
1686 // iter not called, so no data.
1687 // FIXME: error in this case?
1688 let samples: &mut [f64] = &mut [0.0_f64; 1];
1689 let bs = BenchSamples {
1690 ns_iter_summ: stats::Summary::new(samples),
1693 TestResult::TrBench(bs)
1695 Err(_) => TestResult::TrFailed,
1698 let stdout = data.lock().unwrap().to_vec();
1699 monitor_ch.send((desc, test_result, stdout)).unwrap();
1702 pub fn run_once<F>(f: F)
1704 F: FnMut(&mut Bencher),
1706 let mut bs = Bencher {
1707 mode: BenchMode::Single,