1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![crate_name = "test"]
21 #![unstable(feature = "test", issue = "27812")]
22 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
24 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
26 #![feature(set_stdio)]
27 #![feature(panic_unwind)]
28 #![feature(staged_api)]
29 #![feature(termination_trait_lib)]
33 #[cfg(any(unix, target_os = "cloudabi"))]
37 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
38 // on aarch64-pc-windows-msvc, or thumbv7a-pc-windows-msvc
39 // so we don't link libtest against libunwind (for the time being)
40 // even though it means that libtest won't be fully functional on
43 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
44 #[cfg(not(all(windows, any(target_arch = "aarch64", target_arch = "arm"))))]
45 extern crate panic_unwind;
47 pub use self::ColorConfig::*;
48 use self::NamePadding::*;
49 use self::OutputLocation::*;
50 use self::TestEvent::*;
51 pub use self::TestFn::*;
52 pub use self::TestName::*;
53 pub use self::TestResult::*;
58 use std::collections::BTreeMap;
63 use std::io::prelude::*;
64 use std::panic::{catch_unwind, AssertUnwindSafe};
65 use std::path::PathBuf;
67 use std::process::Termination;
68 use std::sync::mpsc::{channel, Sender};
69 use std::sync::{Arc, Mutex};
71 use std::time::{Duration, Instant};
73 const TEST_WARN_TIMEOUT_S: u64 = 60;
74 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
76 // to be used by rustc to compile tests in libtest
79 assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
80 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
81 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestOpts,
82 TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
89 use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
91 /// Whether to execute tests concurrently or not
92 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
98 // The name of a test. By convention this follows the rules for rust
99 // paths; i.e., it should be a series of identifiers separated by double
100 // colons. This way if some test runner wants to arrange the tests
101 // hierarchically it may.
103 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
105 StaticTestName(&'static str),
107 AlignedTestName(Cow<'static, str>, NamePadding),
110 fn as_slice(&self) -> &str {
112 StaticTestName(s) => s,
113 DynTestName(ref s) => s,
114 AlignedTestName(ref s, _) => &*s,
118 fn padding(&self) -> NamePadding {
120 &AlignedTestName(_, p) => p,
125 fn with_padding(&self, padding: NamePadding) -> TestName {
126 let name = match self {
127 &TestName::StaticTestName(name) => Cow::Borrowed(name),
128 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
129 &TestName::AlignedTestName(ref name, _) => name.clone(),
132 TestName::AlignedTestName(name, padding)
135 impl fmt::Display for TestName {
136 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
137 fmt::Display::fmt(self.as_slice(), f)
141 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
142 pub enum NamePadding {
148 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
149 let mut name = String::from(self.name.as_slice());
150 let fill = column_count.saturating_sub(name.len());
151 let pad = " ".repeat(fill);
162 /// Represents a benchmark function.
163 pub trait TDynBenchFn: Send {
164 fn run(&self, harness: &mut Bencher);
167 // A function that runs a test. If the function returns successfully,
168 // the test succeeds; if the function panics then the test fails. We
169 // may need to come up with a more clever definition of test in order
170 // to support isolation of tests into threads.
173 StaticBenchFn(fn(&mut Bencher)),
174 DynTestFn(Box<dyn FnOnce() + Send>),
175 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
179 fn padding(&self) -> NamePadding {
181 StaticTestFn(..) => PadNone,
182 StaticBenchFn(..) => PadOnRight,
183 DynTestFn(..) => PadNone,
184 DynBenchFn(..) => PadOnRight,
189 impl fmt::Debug for TestFn {
190 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
191 f.write_str(match *self {
192 StaticTestFn(..) => "StaticTestFn(..)",
193 StaticBenchFn(..) => "StaticBenchFn(..)",
194 DynTestFn(..) => "DynTestFn(..)",
195 DynBenchFn(..) => "DynBenchFn(..)",
200 /// Manager of the benchmarking runs.
202 /// This is fed into functions marked with `#[bench]` to allow for
203 /// set-up & tear-down before running a piece of code repeatedly via a
208 summary: Option<stats::Summary>,
212 #[derive(Clone, PartialEq, Eq)]
218 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
219 pub enum ShouldPanic {
222 YesWithMessage(&'static str),
225 // The definition of a single test. A test runner will run a list of
227 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
228 pub struct TestDesc {
231 pub should_panic: ShouldPanic,
232 pub allow_fail: bool,
236 pub struct TestDescAndFn {
241 #[derive(Clone, PartialEq, Debug, Copy)]
248 pub fn new(value: f64, noise: f64) -> Metric {
249 Metric { value, noise }
253 /// In case we want to add other options as well, just add them in this struct.
254 #[derive(Copy, Clone, Debug)]
256 display_output: bool,
260 pub fn new() -> Options {
262 display_output: false,
266 pub fn display_output(mut self, display_output: bool) -> Options {
267 self.display_output = display_output;
272 // The default console test runner. It accepts the command line
273 // arguments and a vector of test_descs.
274 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
275 let mut opts = match parse_opts(args) {
278 eprintln!("error: {}", msg);
284 opts.options = options;
286 if let Err(e) = list_tests_console(&opts, tests) {
287 eprintln!("error: io error when listing tests: {:?}", e);
291 match run_tests_console(&opts, tests) {
293 Ok(false) => process::exit(101),
295 eprintln!("error: io error when listing tests: {:?}", e);
302 // A variant optimized for invocation with a static test vector.
303 // This will panic (intentionally) when fed any dynamic tests, because
304 // it is copying the static values out into a dynamic vector and cannot
305 // copy dynamic values. It is doing this because from this point on
306 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
307 // semantics into parallel test runners, which in turn requires a Vec<>
308 // rather than a &[].
309 pub fn test_main_static(tests: &[&TestDescAndFn]) {
310 let args = env::args().collect::<Vec<_>>();
311 let owned_tests = tests
313 .map(|t| match t.testfn {
314 StaticTestFn(f) => TestDescAndFn {
315 testfn: StaticTestFn(f),
316 desc: t.desc.clone(),
318 StaticBenchFn(f) => TestDescAndFn {
319 testfn: StaticBenchFn(f),
320 desc: t.desc.clone(),
322 _ => panic!("non-static tests passed to test::test_main_static"),
325 test_main(&args, owned_tests, Options::new())
328 /// Invoked when unit tests terminate. Should panic if the unit
329 /// Tests is considered a failure. By default, invokes `report()`
330 /// and checks for a `0` result.
331 pub fn assert_test_result<T: Termination>(result: T) {
332 let code = result.report();
335 "the test returned a termination value with a non-zero status code ({}) \
336 which indicates a failure",
341 #[derive(Copy, Clone, Debug)]
342 pub enum ColorConfig {
348 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
349 pub enum OutputFormat {
355 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
356 pub enum RunIgnored {
363 pub struct TestOpts {
365 pub filter: Option<String>,
366 pub filter_exact: bool,
367 pub exclude_should_panic: bool,
368 pub run_ignored: RunIgnored,
370 pub bench_benchmarks: bool,
371 pub logfile: Option<PathBuf>,
373 pub color: ColorConfig,
374 pub format: OutputFormat,
375 pub test_threads: Option<usize>,
376 pub skip: Vec<String>,
377 pub options: Options,
382 fn new() -> TestOpts {
387 exclude_should_panic: false,
388 run_ignored: RunIgnored::No,
390 bench_benchmarks: false,
394 format: OutputFormat::Pretty,
397 options: Options::new(),
402 /// Result of parsing the options.
403 pub type OptRes = Result<TestOpts, String>;
405 fn optgroups() -> getopts::Options {
406 let mut opts = getopts::Options::new();
407 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
408 .optflag("", "ignored", "Run only ignored tests")
409 .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
410 .optflag("", "test", "Run tests and not benchmarks")
411 .optflag("", "bench", "Run benchmarks instead of tests")
412 .optflag("", "list", "List all tests and benchmarks")
413 .optflag("h", "help", "Display this message (longer with --help)")
417 "Write logs to the specified file instead \
424 "don't capture stdout/stderr of each \
425 task, allow printing directly",
430 "Number of threads used for running tests \
437 "Skip tests whose names contain FILTER (this flag can \
438 be used multiple times)",
444 "Display one character per test instead of one line. \
445 Alias to --format=terse",
450 "Exactly match filters rather than by substring",
455 "Configure coloring of output:
456 auto = colorize if stdout is a tty and tests are run on serially (default);
457 always = always colorize output;
458 never = never colorize output;",
464 "Configure formatting of output:
465 pretty = Print verbose output;
466 terse = Display one character per test;
467 json = Output a json document",
473 "Enable nightly-only flags:
474 unstable-options = Allow use of experimental features",
480 fn usage(binary: &str, options: &getopts::Options) {
481 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
485 The FILTER string is tested against the name of all tests, and only those
486 tests whose names contain the filter are run.
488 By default, all tests are run in parallel. This can be altered with the
489 --test-threads flag or the RUST_TEST_THREADS environment variable when running
492 All tests have their standard output and standard error captured by default.
493 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
494 environment variable to a value other than "0". Logging is not captured by default.
498 #[test] - Indicates a function is a test to be run. This function
500 #[bench] - Indicates a function is a benchmark to be run. This
501 function takes one argument (test::Bencher).
502 #[should_panic] - This function (also labeled with #[test]) will only pass if
503 the code causes a panic (an assertion failure or panic!)
504 A message may be provided, which the failure string must
505 contain: #[should_panic(expected = "foo")].
506 #[ignore] - When applied to a function which is already attributed as a
507 test, then the test runner will ignore these tests during
508 normal test runs. Running with --ignored or --include-ignored will run
510 usage = options.usage(&message)
514 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
515 fn is_nightly() -> bool {
516 // Whether this is a feature-staged build, i.e., on the beta or stable channel
517 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
518 // Whether we should enable unstable features for bootstrapping
519 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
521 bootstrap || !disable_unstable_features
524 // Parses command line arguments into test options
525 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
526 let mut allow_unstable = false;
527 let opts = optgroups();
528 let args = args.get(1..).unwrap_or(args);
529 let matches = match opts.parse(args) {
531 Err(f) => return Some(Err(f.to_string())),
534 if let Some(opt) = matches.opt_str("Z") {
537 "the option `Z` is only accepted on the nightly compiler".into(),
542 "unstable-options" => {
543 allow_unstable = true;
546 return Some(Err("Unrecognized option to `Z`".into()));
551 if matches.opt_present("h") {
552 usage(&args[0], &opts);
556 let filter = if !matches.free.is_empty() {
557 Some(matches.free[0].clone())
562 let exclude_should_panic = matches.opt_present("exclude-should-panic");
563 if !allow_unstable && exclude_should_panic {
565 "The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(),
569 let include_ignored = matches.opt_present("include-ignored");
570 if !allow_unstable && include_ignored {
572 "The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
576 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
579 "the options --include-ignored and --ignored are mutually exclusive".into(),
582 (true, false) => RunIgnored::Yes,
583 (false, true) => RunIgnored::Only,
584 (false, false) => RunIgnored::No,
586 let quiet = matches.opt_present("quiet");
587 let exact = matches.opt_present("exact");
588 let list = matches.opt_present("list");
590 let logfile = matches.opt_str("logfile");
591 let logfile = logfile.map(|s| PathBuf::from(&s));
593 let bench_benchmarks = matches.opt_present("bench");
594 let run_tests = !bench_benchmarks || matches.opt_present("test");
596 let mut nocapture = matches.opt_present("nocapture");
598 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
599 Ok(val) => &val != "0",
604 let test_threads = match matches.opt_str("test-threads") {
605 Some(n_str) => match n_str.parse::<usize>() {
606 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
609 return Some(Err(format!(
610 "argument for --test-threads must be a number > 0 \
619 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
620 Some("auto") | None => AutoColor,
621 Some("always") => AlwaysColor,
622 Some("never") => NeverColor,
625 return Some(Err(format!(
626 "argument for --color must be auto, always, or never (was \
633 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
634 None if quiet => OutputFormat::Terse,
635 Some("pretty") | None => OutputFormat::Pretty,
636 Some("terse") => OutputFormat::Terse,
640 "The \"json\" format is only accepted on the nightly compiler".into(),
647 return Some(Err(format!(
648 "argument for --format must be pretty, terse, or json (was \
655 let test_opts = TestOpts {
659 exclude_should_panic,
668 skip: matches.opt_strs("skip"),
669 options: Options::new(),
675 #[derive(Clone, PartialEq)]
676 pub struct BenchSamples {
677 ns_iter_summ: stats::Summary,
681 #[derive(Clone, PartialEq)]
682 pub enum TestResult {
688 TrBench(BenchSamples),
691 unsafe impl Send for TestResult {}
693 enum OutputLocation<T> {
694 Pretty(Box<term::StdoutTerminal>),
698 impl<T: Write> Write for OutputLocation<T> {
699 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
701 Pretty(ref mut term) => term.write(buf),
702 Raw(ref mut stdout) => stdout.write(buf),
706 fn flush(&mut self) -> io::Result<()> {
708 Pretty(ref mut term) => term.flush(),
709 Raw(ref mut stdout) => stdout.flush(),
714 struct ConsoleTestState {
715 log_out: Option<File>,
724 failures: Vec<(TestDesc, Vec<u8>)>,
725 not_failures: Vec<(TestDesc, Vec<u8>)>,
729 impl ConsoleTestState {
730 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
731 let log_out = match opts.logfile {
732 Some(ref path) => Some(File::create(path)?),
736 Ok(ConsoleTestState {
745 metrics: MetricMap::new(),
746 failures: Vec::new(),
747 not_failures: Vec::new(),
748 options: opts.options,
752 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
753 let msg = msg.as_ref();
756 Some(ref mut o) => o.write_all(msg.as_bytes()),
760 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
761 self.write_log(format!(
764 TrOk => "ok".to_owned(),
765 TrFailed => "failed".to_owned(),
766 TrFailedMsg(ref msg) => format!("failed: {}", msg),
767 TrIgnored => "ignored".to_owned(),
768 TrAllowedFail => "failed (allowed)".to_owned(),
769 TrBench(ref bs) => fmt_bench_samples(bs),
775 fn current_test_count(&self) -> usize {
776 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
780 // Format a number with thousands separators
781 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
783 let mut output = String::new();
784 let mut trailing = false;
785 for &pow in &[9, 6, 3, 0] {
786 let base = 10_usize.pow(pow);
787 if pow == 0 || trailing || n / base != 0 {
789 output.write_fmt(format_args!("{}", n / base)).unwrap();
791 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
804 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
806 let mut output = String::new();
808 let median = bs.ns_iter_summ.median as usize;
809 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
812 .write_fmt(format_args!(
813 "{:>11} ns/iter (+/- {})",
814 fmt_thousands_sep(median, ','),
815 fmt_thousands_sep(deviation, ',')
820 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
826 // List the tests to console, and optionally to logfile. Filters are honored.
827 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
828 let mut output = match term::stdout() {
829 None => Raw(io::stdout()),
830 Some(t) => Pretty(t),
833 let quiet = opts.format == OutputFormat::Terse;
834 let mut st = ConsoleTestState::new(opts)?;
839 for test in filter_tests(&opts, tests) {
840 use crate::TestFn::*;
843 desc: TestDesc { name, .. },
847 let fntype = match testfn {
848 StaticTestFn(..) | DynTestFn(..) => {
852 StaticBenchFn(..) | DynBenchFn(..) => {
858 writeln!(output, "{}: {}", name, fntype)?;
859 st.write_log(format!("{} {}\n", fntype, name))?;
862 fn plural(count: u32, s: &str) -> String {
864 1 => format!("{} {}", 1, s),
865 n => format!("{} {}s", n, s),
870 if ntest != 0 || nbench != 0 {
871 writeln!(output, "")?;
877 plural(ntest, "test"),
878 plural(nbench, "benchmark")
885 // A simple console test runner
886 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
889 st: &mut ConsoleTestState,
890 out: &mut dyn OutputFormatter,
891 ) -> io::Result<()> {
892 match (*event).clone() {
893 TeFiltered(ref filtered_tests) => {
894 st.total = filtered_tests.len();
895 out.write_run_start(filtered_tests.len())
897 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
898 TeWait(ref test) => out.write_test_start(test),
899 TeTimeout(ref test) => out.write_timeout(test),
900 TeResult(test, result, stdout) => {
901 st.write_log_result(&test, &result)?;
902 out.write_result(&test, &result, &*stdout)?;
906 st.not_failures.push((test, stdout));
908 TrIgnored => st.ignored += 1,
909 TrAllowedFail => st.allowed_fail += 1,
911 st.metrics.insert_metric(
912 test.name.as_slice(),
913 bs.ns_iter_summ.median,
914 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
920 st.failures.push((test, stdout));
922 TrFailedMsg(msg) => {
924 let mut stdout = stdout;
925 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
926 st.failures.push((test, stdout));
934 let output = match term::stdout() {
935 None => Raw(io::stdout()),
936 Some(t) => Pretty(t),
939 let max_name_len = tests
941 .max_by_key(|t| len_if_padded(*t))
942 .map(|t| t.desc.name.as_slice().len())
945 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
947 let mut out: Box<dyn OutputFormatter> = match opts.format {
948 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
954 OutputFormat::Terse => Box::new(TerseFormatter::new(
960 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
962 let mut st = ConsoleTestState::new(opts)?;
963 fn len_if_padded(t: &TestDescAndFn) -> usize {
964 match t.testfn.padding() {
966 PadOnRight => t.desc.name.as_slice().len(),
970 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
972 assert!(st.current_test_count() == st.total);
974 return out.write_run_finish(&st);
978 fn should_sort_failures_before_printing_them() {
979 let test_a = TestDesc {
980 name: StaticTestName("a"),
982 should_panic: ShouldPanic::No,
986 let test_b = TestDesc {
987 name: StaticTestName("b"),
989 should_panic: ShouldPanic::No,
993 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
995 let st = ConsoleTestState {
1004 metrics: MetricMap::new(),
1005 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
1006 options: Options::new(),
1007 not_failures: Vec::new(),
1010 out.write_failures(&st).unwrap();
1011 let s = match out.output_location() {
1012 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1013 &Pretty(_) => unreachable!(),
1016 let apos = s.find("a").unwrap();
1017 let bpos = s.find("b").unwrap();
1018 assert!(apos < bpos);
1021 fn use_color(opts: &TestOpts) -> bool {
1023 AutoColor => !opts.nocapture && stdout_isatty(),
1024 AlwaysColor => true,
1025 NeverColor => false,
1030 target_os = "cloudabi",
1031 target_os = "redox",
1032 all(target_arch = "wasm32", not(target_os = "emscripten")),
1033 all(target_vendor = "fortanix", target_env = "sgx")
1035 fn stdout_isatty() -> bool {
1036 // FIXME: Implement isatty on Redox and SGX
1040 fn stdout_isatty() -> bool {
1041 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1044 fn stdout_isatty() -> bool {
1047 type HANDLE = *mut u8;
1048 type LPDWORD = *mut u32;
1049 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1051 fn GetStdHandle(which: DWORD) -> HANDLE;
1052 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1055 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1057 GetConsoleMode(handle, &mut out) != 0
1062 pub enum TestEvent {
1063 TeFiltered(Vec<TestDesc>),
1065 TeResult(TestDesc, TestResult, Vec<u8>),
1066 TeTimeout(TestDesc),
1067 TeFilteredOut(usize),
1070 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1072 struct Sink(Arc<Mutex<Vec<u8>>>);
1073 impl Write for Sink {
1074 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1075 Write::write(&mut *self.0.lock().unwrap(), data)
1077 fn flush(&mut self) -> io::Result<()> {
1082 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1084 F: FnMut(TestEvent) -> io::Result<()>,
1086 use std::collections::{self, HashMap};
1087 use std::hash::BuildHasherDefault;
1088 use std::sync::mpsc::RecvTimeoutError;
1089 // Use a deterministic hasher
1091 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1093 let tests_len = tests.len();
1095 let mut filtered_tests = filter_tests(opts, tests);
1096 if !opts.bench_benchmarks {
1097 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1100 let filtered_tests = {
1101 let mut filtered_tests = filtered_tests;
1102 for test in filtered_tests.iter_mut() {
1103 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1109 let filtered_out = tests_len - filtered_tests.len();
1110 callback(TeFilteredOut(filtered_out))?;
1112 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1114 callback(TeFiltered(filtered_descs))?;
1116 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1117 filtered_tests.into_iter().partition(|e| match e.testfn {
1118 StaticTestFn(_) | DynTestFn(_) => true,
1122 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1124 let mut remaining = filtered_tests;
1125 remaining.reverse();
1126 let mut pending = 0;
1128 let (tx, rx) = channel::<MonitorMsg>();
1130 let mut running_tests: TestMap = HashMap::default();
1132 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1133 let now = Instant::now();
1134 let timed_out = running_tests
1136 .filter_map(|(desc, timeout)| {
1137 if &now >= timeout {
1144 for test in &timed_out {
1145 running_tests.remove(test);
1150 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1151 running_tests.values().min().map(|next_timeout| {
1152 let now = Instant::now();
1153 if *next_timeout >= now {
1161 if concurrency == 1 {
1162 while !remaining.is_empty() {
1163 let test = remaining.pop().unwrap();
1164 callback(TeWait(test.desc.clone()))?;
1165 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1166 let (test, result, stdout) = rx.recv().unwrap();
1167 callback(TeResult(test, result, stdout))?;
1170 while pending > 0 || !remaining.is_empty() {
1171 while pending < concurrency && !remaining.is_empty() {
1172 let test = remaining.pop().unwrap();
1173 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1174 running_tests.insert(test.desc.clone(), timeout);
1175 callback(TeWait(test.desc.clone()))?; //here no pad
1176 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1182 if let Some(timeout) = calc_timeout(&running_tests) {
1183 res = rx.recv_timeout(timeout);
1184 for test in get_timed_out_tests(&mut running_tests) {
1185 callback(TeTimeout(test))?;
1187 if res != Err(RecvTimeoutError::Timeout) {
1191 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1196 let (desc, result, stdout) = res.unwrap();
1197 running_tests.remove(&desc);
1199 callback(TeResult(desc, result, stdout))?;
1204 if opts.bench_benchmarks {
1205 // All benchmarks run at the end, in serial.
1206 for b in filtered_benchs {
1207 callback(TeWait(b.desc.clone()))?;
1208 run_test(opts, false, b, tx.clone(), Concurrent::No);
1209 let (test, result, stdout) = rx.recv().unwrap();
1210 callback(TeResult(test, result, stdout))?;
1216 #[allow(deprecated)]
1217 fn get_concurrency() -> usize {
1218 return match env::var("RUST_TEST_THREADS") {
1220 let opt_n: Option<usize> = s.parse().ok();
1222 Some(n) if n > 0 => n,
1224 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1229 Err(..) => num_cpus(),
1233 #[allow(nonstandard_style)]
1234 fn num_cpus() -> usize {
1236 struct SYSTEM_INFO {
1237 wProcessorArchitecture: u16,
1240 lpMinimumApplicationAddress: *mut u8,
1241 lpMaximumApplicationAddress: *mut u8,
1242 dwActiveProcessorMask: *mut u8,
1243 dwNumberOfProcessors: u32,
1244 dwProcessorType: u32,
1245 dwAllocationGranularity: u32,
1246 wProcessorLevel: u16,
1247 wProcessorRevision: u16,
1250 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1253 let mut sysinfo = std::mem::zeroed();
1254 GetSystemInfo(&mut sysinfo);
1255 sysinfo.dwNumberOfProcessors as usize
1259 #[cfg(target_os = "redox")]
1260 fn num_cpus() -> usize {
1261 // FIXME: Implement num_cpus on Redox
1265 #[cfg(target_os = "vxworks")]
1266 fn num_cpus() -> usize {
1267 // FIXME: Implement num_cpus on vxWorks
1272 all(target_arch = "wasm32", not(target_os = "emscripten")),
1273 all(target_vendor = "fortanix", target_env = "sgx")
1275 fn num_cpus() -> usize {
1280 target_os = "android",
1281 target_os = "cloudabi",
1282 target_os = "emscripten",
1283 target_os = "fuchsia",
1285 target_os = "linux",
1286 target_os = "macos",
1287 target_os = "solaris"
1289 fn num_cpus() -> usize {
1290 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1294 target_os = "freebsd",
1295 target_os = "dragonfly",
1296 target_os = "netbsd"
1298 fn num_cpus() -> usize {
1301 let mut cpus: libc::c_uint = 0;
1302 let mut cpus_size = std::mem::size_of_val(&cpus);
1305 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1308 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1313 &mut cpus as *mut _ as *mut _,
1314 &mut cpus_size as *mut _ as *mut _,
1326 #[cfg(target_os = "openbsd")]
1327 fn num_cpus() -> usize {
1330 let mut cpus: libc::c_uint = 0;
1331 let mut cpus_size = std::mem::size_of_val(&cpus);
1332 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1338 &mut cpus as *mut _ as *mut _,
1339 &mut cpus_size as *mut _ as *mut _,
1350 #[cfg(target_os = "haiku")]
1351 fn num_cpus() -> usize {
1356 #[cfg(target_os = "l4re")]
1357 fn num_cpus() -> usize {
1363 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1364 let mut filtered = tests;
1365 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1366 let test_name = test.desc.name.as_slice();
1368 match opts.filter_exact {
1369 true => test_name == filter,
1370 false => test_name.contains(filter),
1374 // Remove tests that don't match the test filter
1375 if let Some(ref filter) = opts.filter {
1376 filtered.retain(|test| matches_filter(test, filter));
1379 // Skip tests that match any of the skip filters
1380 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
1382 // Excludes #[should_panic] tests
1383 if opts.exclude_should_panic {
1384 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
1387 // maybe unignore tests
1388 match opts.run_ignored {
1389 RunIgnored::Yes => {
1392 .for_each(|test| test.desc.ignore = false);
1394 RunIgnored::Only => {
1395 filtered.retain(|test| test.desc.ignore);
1398 .for_each(|test| test.desc.ignore = false);
1400 RunIgnored::No => {}
1403 // Sort the tests alphabetically
1404 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1409 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1410 // convert benchmarks to tests, if we're not benchmarking them
1414 let testfn = match x.testfn {
1415 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1416 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1418 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1419 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1434 test: TestDescAndFn,
1435 monitor_ch: Sender<MonitorMsg>,
1436 concurrency: Concurrent,
1438 let TestDescAndFn { desc, testfn } = test;
1440 let ignore_because_panic_abort = cfg!(target_arch = "wasm32")
1441 && !cfg!(target_os = "emscripten")
1442 && desc.should_panic != ShouldPanic::No;
1444 if force_ignore || desc.ignore || ignore_because_panic_abort {
1445 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1451 monitor_ch: Sender<MonitorMsg>,
1453 testfn: Box<dyn FnOnce() + Send>,
1454 concurrency: Concurrent,
1456 // Buffer for capturing standard I/O
1457 let data = Arc::new(Mutex::new(Vec::new()));
1458 let data2 = data.clone();
1460 let name = desc.name.clone();
1461 let runtest = move || {
1462 let oldio = if !nocapture {
1464 io::set_print(Some(Box::new(Sink(data2.clone())))),
1465 io::set_panic(Some(Box::new(Sink(data2)))),
1471 let result = catch_unwind(AssertUnwindSafe(testfn));
1473 if let Some((printio, panicio)) = oldio {
1474 io::set_print(printio);
1475 io::set_panic(panicio);
1478 let test_result = calc_result(&desc, result);
1479 let stdout = data.lock().unwrap().to_vec();
1481 .send((desc.clone(), test_result, stdout))
1485 // If the platform is single-threaded we're just going to run
1486 // the test synchronously, regardless of the concurrency
1488 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1489 if concurrency == Concurrent::Yes && supports_threads {
1490 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1491 cfg.spawn(runtest).unwrap();
1498 DynBenchFn(bencher) => {
1499 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1500 bencher.run(harness)
1503 StaticBenchFn(benchfn) => {
1504 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1505 (benchfn.clone())(harness)
1509 let cb = move || __rust_begin_short_backtrace(f);
1510 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1512 StaticTestFn(f) => run_test_inner(
1516 Box::new(move || __rust_begin_short_backtrace(f)),
1522 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1524 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1528 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1529 match (&desc.should_panic, task_result) {
1530 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1531 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1533 .downcast_ref::<String>()
1535 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1536 .map(|e| e.contains(msg))
1541 if desc.allow_fail {
1544 TrFailedMsg(format!("panic did not include expected string '{}'", msg))
1548 _ if desc.allow_fail => TrAllowedFail,
1553 #[derive(Clone, PartialEq)]
1554 pub struct MetricMap(BTreeMap<String, Metric>);
1557 pub fn new() -> MetricMap {
1558 MetricMap(BTreeMap::new())
1561 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1562 /// must be non-negative. The `noise` indicates the uncertainty of the
1563 /// metric, which doubles as the "noise range" of acceptable
1564 /// pairwise-regressions on this named value, when comparing from one
1565 /// metric to the next using `compare_to_old`.
1567 /// If `noise` is positive, then it means this metric is of a value
1568 /// you want to see grow smaller, so a change larger than `noise` in the
1569 /// positive direction represents a regression.
1571 /// If `noise` is negative, then it means this metric is of a value
1572 /// you want to see grow larger, so a change larger than `noise` in the
1573 /// negative direction represents a regression.
1574 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1575 let m = Metric { value, noise };
1576 self.0.insert(name.to_owned(), m);
1579 pub fn fmt_metrics(&self) -> String {
1583 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1584 .collect::<Vec<_>>();
1591 pub use std::hint::black_box;
1594 /// Callback for benchmark functions to run in their body.
1595 pub fn iter<T, F>(&mut self, mut inner: F)
1599 if self.mode == BenchMode::Single {
1600 ns_iter_inner(&mut inner, 1);
1604 self.summary = Some(iter(&mut inner));
1607 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1609 F: FnMut(&mut Bencher),
1612 return self.summary;
1616 fn ns_from_dur(dur: Duration) -> u64 {
1617 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1620 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1624 let start = Instant::now();
1628 return ns_from_dur(start.elapsed());
1631 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1635 // Initial bench run to get ballpark figure.
1636 let ns_single = ns_iter_inner(inner, 1);
1638 // Try to estimate iter count for 1ms falling back to 1m
1639 // iterations if first run took < 1ns.
1640 let ns_target_total = 1_000_000; // 1ms
1641 let mut n = ns_target_total / cmp::max(1, ns_single);
1643 // if the first run took more than 1ms we don't want to just
1644 // be left doing 0 iterations on every loop. The unfortunate
1645 // side effect of not being able to do as many runs is
1646 // automatically handled by the statistical analysis below
1647 // (i.e., larger error bars).
1650 let mut total_run = Duration::new(0, 0);
1651 let samples: &mut [f64] = &mut [0.0_f64; 50];
1653 let loop_start = Instant::now();
1655 for p in &mut *samples {
1656 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1659 stats::winsorize(samples, 5.0);
1660 let summ = stats::Summary::new(samples);
1662 for p in &mut *samples {
1663 let ns = ns_iter_inner(inner, 5 * n);
1664 *p = ns as f64 / (5 * n) as f64;
1667 stats::winsorize(samples, 5.0);
1668 let summ5 = stats::Summary::new(samples);
1670 let loop_run = loop_start.elapsed();
1672 // If we've run for 100ms and seem to have converged to a
1674 if loop_run > Duration::from_millis(100)
1675 && summ.median_abs_dev_pct < 1.0
1676 && summ.median - summ5.median < summ5.median_abs_dev
1681 total_run = total_run + loop_run;
1682 // Longest we ever run for is 3s.
1683 if total_run > Duration::from_secs(3) {
1687 // If we overflow here just return the results so far. We check a
1688 // multiplier of 10 because we're about to multiply by 2 and the
1689 // next iteration of the loop will also multiply by 5 (to calculate
1690 // the summ5 result)
1691 n = match n.checked_mul(10) {
1701 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1705 use std::panic::{catch_unwind, AssertUnwindSafe};
1706 use std::sync::{Arc, Mutex};
1708 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1710 F: FnMut(&mut Bencher),
1712 let mut bs = Bencher {
1713 mode: BenchMode::Auto,
1718 let data = Arc::new(Mutex::new(Vec::new()));
1719 let data2 = data.clone();
1721 let oldio = if !nocapture {
1723 io::set_print(Some(Box::new(Sink(data2.clone())))),
1724 io::set_panic(Some(Box::new(Sink(data2)))),
1730 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1732 if let Some((printio, panicio)) = oldio {
1733 io::set_print(printio);
1734 io::set_panic(panicio);
1737 let test_result = match result {
1739 Ok(Some(ns_iter_summ)) => {
1740 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1741 let mb_s = bs.bytes * 1000 / ns_iter;
1743 let bs = BenchSamples {
1745 mb_s: mb_s as usize,
1747 TestResult::TrBench(bs)
1750 // iter not called, so no data.
1751 // FIXME: error in this case?
1752 let samples: &mut [f64] = &mut [0.0_f64; 1];
1753 let bs = BenchSamples {
1754 ns_iter_summ: stats::Summary::new(samples),
1757 TestResult::TrBench(bs)
1759 Err(_) => TestResult::TrFailed,
1762 let stdout = data.lock().unwrap().to_vec();
1763 monitor_ch.send((desc, test_result, stdout)).unwrap();
1766 pub fn run_once<F>(f: F)
1768 F: FnMut(&mut Bencher),
1770 let mut bs = Bencher {
1771 mode: BenchMode::Single,