1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![deny(rust_2018_idioms)]
21 #![crate_name = "test"]
22 #![unstable(feature = "test", issue = "27812")]
23 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
25 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
27 #![feature(set_stdio)]
28 #![feature(panic_unwind)]
29 #![feature(staged_api)]
30 #![feature(termination_trait_lib)]
34 #[cfg(any(unix, target_os = "cloudabi"))]
38 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
39 // on aarch64-pc-windows-msvc, or thumbv7a-pc-windows-msvc
40 // so we don't link libtest against libunwind (for the time being)
41 // even though it means that libtest won't be fully functional on
44 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
45 #[cfg(not(all(windows, any(target_arch = "aarch64", target_arch = "arm"))))]
46 extern crate panic_unwind;
48 pub use self::ColorConfig::*;
49 use self::NamePadding::*;
50 use self::OutputLocation::*;
51 use self::TestEvent::*;
52 pub use self::TestFn::*;
53 pub use self::TestName::*;
54 pub use self::TestResult::*;
59 use std::collections::BTreeMap;
64 use std::io::prelude::*;
65 use std::panic::{catch_unwind, AssertUnwindSafe};
66 use std::path::PathBuf;
68 use std::process::Termination;
69 use std::sync::mpsc::{channel, Sender};
70 use std::sync::{Arc, Mutex};
72 use std::time::{Duration, Instant};
74 const TEST_WARN_TIMEOUT_S: u64 = 60;
75 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
77 // to be used by rustc to compile tests in libtest
80 assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
81 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
82 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestOpts,
83 TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
90 use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
92 /// Whether to execute tests concurrently or not
93 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
99 // The name of a test. By convention this follows the rules for rust
100 // paths; i.e., it should be a series of identifiers separated by double
101 // colons. This way if some test runner wants to arrange the tests
102 // hierarchically it may.
104 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
106 StaticTestName(&'static str),
108 AlignedTestName(Cow<'static, str>, NamePadding),
111 fn as_slice(&self) -> &str {
113 StaticTestName(s) => s,
114 DynTestName(ref s) => s,
115 AlignedTestName(ref s, _) => &*s,
119 fn padding(&self) -> NamePadding {
121 &AlignedTestName(_, p) => p,
126 fn with_padding(&self, padding: NamePadding) -> TestName {
127 let name = match self {
128 &TestName::StaticTestName(name) => Cow::Borrowed(name),
129 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
130 &TestName::AlignedTestName(ref name, _) => name.clone(),
133 TestName::AlignedTestName(name, padding)
136 impl fmt::Display for TestName {
137 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
138 fmt::Display::fmt(self.as_slice(), f)
142 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
143 pub enum NamePadding {
149 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
150 let mut name = String::from(self.name.as_slice());
151 let fill = column_count.saturating_sub(name.len());
152 let pad = " ".repeat(fill);
163 /// Represents a benchmark function.
164 pub trait TDynBenchFn: Send {
165 fn run(&self, harness: &mut Bencher);
168 // A function that runs a test. If the function returns successfully,
169 // the test succeeds; if the function panics then the test fails. We
170 // may need to come up with a more clever definition of test in order
171 // to support isolation of tests into threads.
174 StaticBenchFn(fn(&mut Bencher)),
175 DynTestFn(Box<dyn FnOnce() + Send>),
176 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
180 fn padding(&self) -> NamePadding {
182 StaticTestFn(..) => PadNone,
183 StaticBenchFn(..) => PadOnRight,
184 DynTestFn(..) => PadNone,
185 DynBenchFn(..) => PadOnRight,
190 impl fmt::Debug for TestFn {
191 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
192 f.write_str(match *self {
193 StaticTestFn(..) => "StaticTestFn(..)",
194 StaticBenchFn(..) => "StaticBenchFn(..)",
195 DynTestFn(..) => "DynTestFn(..)",
196 DynBenchFn(..) => "DynBenchFn(..)",
201 /// Manager of the benchmarking runs.
203 /// This is fed into functions marked with `#[bench]` to allow for
204 /// set-up & tear-down before running a piece of code repeatedly via a
209 summary: Option<stats::Summary>,
213 #[derive(Clone, PartialEq, Eq)]
219 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
220 pub enum ShouldPanic {
223 YesWithMessage(&'static str),
226 // The definition of a single test. A test runner will run a list of
228 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
229 pub struct TestDesc {
232 pub should_panic: ShouldPanic,
233 pub allow_fail: bool,
237 pub struct TestDescAndFn {
242 #[derive(Clone, PartialEq, Debug, Copy)]
249 pub fn new(value: f64, noise: f64) -> Metric {
250 Metric { value, noise }
254 /// In case we want to add other options as well, just add them in this struct.
255 #[derive(Copy, Clone, Debug)]
257 display_output: bool,
261 pub fn new() -> Options {
263 display_output: false,
267 pub fn display_output(mut self, display_output: bool) -> Options {
268 self.display_output = display_output;
273 // The default console test runner. It accepts the command line
274 // arguments and a vector of test_descs.
275 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
276 let mut opts = match parse_opts(args) {
279 eprintln!("error: {}", msg);
285 opts.options = options;
287 if let Err(e) = list_tests_console(&opts, tests) {
288 eprintln!("error: io error when listing tests: {:?}", e);
292 match run_tests_console(&opts, tests) {
294 Ok(false) => process::exit(101),
296 eprintln!("error: io error when listing tests: {:?}", e);
303 // A variant optimized for invocation with a static test vector.
304 // This will panic (intentionally) when fed any dynamic tests, because
305 // it is copying the static values out into a dynamic vector and cannot
306 // copy dynamic values. It is doing this because from this point on
307 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
308 // semantics into parallel test runners, which in turn requires a Vec<>
309 // rather than a &[].
310 pub fn test_main_static(tests: &[&TestDescAndFn]) {
311 let args = env::args().collect::<Vec<_>>();
312 let owned_tests = tests
314 .map(|t| match t.testfn {
315 StaticTestFn(f) => TestDescAndFn {
316 testfn: StaticTestFn(f),
317 desc: t.desc.clone(),
319 StaticBenchFn(f) => TestDescAndFn {
320 testfn: StaticBenchFn(f),
321 desc: t.desc.clone(),
323 _ => panic!("non-static tests passed to test::test_main_static"),
326 test_main(&args, owned_tests, Options::new())
329 /// Invoked when unit tests terminate. Should panic if the unit
330 /// Tests is considered a failure. By default, invokes `report()`
331 /// and checks for a `0` result.
332 pub fn assert_test_result<T: Termination>(result: T) {
333 let code = result.report();
336 "the test returned a termination value with a non-zero status code ({}) \
337 which indicates a failure",
342 #[derive(Copy, Clone, Debug)]
343 pub enum ColorConfig {
349 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
350 pub enum OutputFormat {
356 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
357 pub enum RunIgnored {
364 pub struct TestOpts {
366 pub filter: Option<String>,
367 pub filter_exact: bool,
368 pub exclude_should_panic: bool,
369 pub run_ignored: RunIgnored,
371 pub bench_benchmarks: bool,
372 pub logfile: Option<PathBuf>,
374 pub color: ColorConfig,
375 pub format: OutputFormat,
376 pub test_threads: Option<usize>,
377 pub skip: Vec<String>,
378 pub options: Options,
383 fn new() -> TestOpts {
388 exclude_should_panic: false,
389 run_ignored: RunIgnored::No,
391 bench_benchmarks: false,
395 format: OutputFormat::Pretty,
398 options: Options::new(),
403 /// Result of parsing the options.
404 pub type OptRes = Result<TestOpts, String>;
406 fn optgroups() -> getopts::Options {
407 let mut opts = getopts::Options::new();
408 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
409 .optflag("", "ignored", "Run only ignored tests")
410 .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
411 .optflag("", "test", "Run tests and not benchmarks")
412 .optflag("", "bench", "Run benchmarks instead of tests")
413 .optflag("", "list", "List all tests and benchmarks")
414 .optflag("h", "help", "Display this message (longer with --help)")
418 "Write logs to the specified file instead \
425 "don't capture stdout/stderr of each \
426 task, allow printing directly",
431 "Number of threads used for running tests \
438 "Skip tests whose names contain FILTER (this flag can \
439 be used multiple times)",
445 "Display one character per test instead of one line. \
446 Alias to --format=terse",
451 "Exactly match filters rather than by substring",
456 "Configure coloring of output:
457 auto = colorize if stdout is a tty and tests are run on serially (default);
458 always = always colorize output;
459 never = never colorize output;",
465 "Configure formatting of output:
466 pretty = Print verbose output;
467 terse = Display one character per test;
468 json = Output a json document",
474 "Enable nightly-only flags:
475 unstable-options = Allow use of experimental features",
481 fn usage(binary: &str, options: &getopts::Options) {
482 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
486 The FILTER string is tested against the name of all tests, and only those
487 tests whose names contain the filter are run.
489 By default, all tests are run in parallel. This can be altered with the
490 --test-threads flag or the RUST_TEST_THREADS environment variable when running
493 All tests have their standard output and standard error captured by default.
494 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
495 environment variable to a value other than "0". Logging is not captured by default.
499 #[test] - Indicates a function is a test to be run. This function
501 #[bench] - Indicates a function is a benchmark to be run. This
502 function takes one argument (test::Bencher).
503 #[should_panic] - This function (also labeled with #[test]) will only pass if
504 the code causes a panic (an assertion failure or panic!)
505 A message may be provided, which the failure string must
506 contain: #[should_panic(expected = "foo")].
507 #[ignore] - When applied to a function which is already attributed as a
508 test, then the test runner will ignore these tests during
509 normal test runs. Running with --ignored or --include-ignored will run
511 usage = options.usage(&message)
515 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
516 fn is_nightly() -> bool {
517 // Whether this is a feature-staged build, i.e., on the beta or stable channel
518 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
519 // Whether we should enable unstable features for bootstrapping
520 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
522 bootstrap || !disable_unstable_features
525 // Parses command line arguments into test options
526 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
527 let mut allow_unstable = false;
528 let opts = optgroups();
529 let args = args.get(1..).unwrap_or(args);
530 let matches = match opts.parse(args) {
532 Err(f) => return Some(Err(f.to_string())),
535 if let Some(opt) = matches.opt_str("Z") {
538 "the option `Z` is only accepted on the nightly compiler".into(),
543 "unstable-options" => {
544 allow_unstable = true;
547 return Some(Err("Unrecognized option to `Z`".into()));
552 if matches.opt_present("h") {
553 usage(&args[0], &opts);
557 let filter = if !matches.free.is_empty() {
558 Some(matches.free[0].clone())
563 let exclude_should_panic = matches.opt_present("exclude-should-panic");
564 if !allow_unstable && exclude_should_panic {
566 "The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(),
570 let include_ignored = matches.opt_present("include-ignored");
571 if !allow_unstable && include_ignored {
573 "The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
577 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
580 "the options --include-ignored and --ignored are mutually exclusive".into(),
583 (true, false) => RunIgnored::Yes,
584 (false, true) => RunIgnored::Only,
585 (false, false) => RunIgnored::No,
587 let quiet = matches.opt_present("quiet");
588 let exact = matches.opt_present("exact");
589 let list = matches.opt_present("list");
591 let logfile = matches.opt_str("logfile");
592 let logfile = logfile.map(|s| PathBuf::from(&s));
594 let bench_benchmarks = matches.opt_present("bench");
595 let run_tests = !bench_benchmarks || matches.opt_present("test");
597 let mut nocapture = matches.opt_present("nocapture");
599 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
600 Ok(val) => &val != "0",
605 let test_threads = match matches.opt_str("test-threads") {
606 Some(n_str) => match n_str.parse::<usize>() {
607 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
610 return Some(Err(format!(
611 "argument for --test-threads must be a number > 0 \
620 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
621 Some("auto") | None => AutoColor,
622 Some("always") => AlwaysColor,
623 Some("never") => NeverColor,
626 return Some(Err(format!(
627 "argument for --color must be auto, always, or never (was \
634 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
635 None if quiet => OutputFormat::Terse,
636 Some("pretty") | None => OutputFormat::Pretty,
637 Some("terse") => OutputFormat::Terse,
641 "The \"json\" format is only accepted on the nightly compiler".into(),
648 return Some(Err(format!(
649 "argument for --format must be pretty, terse, or json (was \
656 let test_opts = TestOpts {
660 exclude_should_panic,
669 skip: matches.opt_strs("skip"),
670 options: Options::new(),
676 #[derive(Clone, PartialEq)]
677 pub struct BenchSamples {
678 ns_iter_summ: stats::Summary,
682 #[derive(Clone, PartialEq)]
683 pub enum TestResult {
689 TrBench(BenchSamples),
692 unsafe impl Send for TestResult {}
694 enum OutputLocation<T> {
695 Pretty(Box<term::StdoutTerminal>),
699 impl<T: Write> Write for OutputLocation<T> {
700 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
702 Pretty(ref mut term) => term.write(buf),
703 Raw(ref mut stdout) => stdout.write(buf),
707 fn flush(&mut self) -> io::Result<()> {
709 Pretty(ref mut term) => term.flush(),
710 Raw(ref mut stdout) => stdout.flush(),
715 struct ConsoleTestState {
716 log_out: Option<File>,
725 failures: Vec<(TestDesc, Vec<u8>)>,
726 not_failures: Vec<(TestDesc, Vec<u8>)>,
730 impl ConsoleTestState {
731 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
732 let log_out = match opts.logfile {
733 Some(ref path) => Some(File::create(path)?),
737 Ok(ConsoleTestState {
746 metrics: MetricMap::new(),
747 failures: Vec::new(),
748 not_failures: Vec::new(),
749 options: opts.options,
753 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
754 let msg = msg.as_ref();
757 Some(ref mut o) => o.write_all(msg.as_bytes()),
761 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
762 self.write_log(format!(
765 TrOk => "ok".to_owned(),
766 TrFailed => "failed".to_owned(),
767 TrFailedMsg(ref msg) => format!("failed: {}", msg),
768 TrIgnored => "ignored".to_owned(),
769 TrAllowedFail => "failed (allowed)".to_owned(),
770 TrBench(ref bs) => fmt_bench_samples(bs),
776 fn current_test_count(&self) -> usize {
777 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
781 // Format a number with thousands separators
782 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
784 let mut output = String::new();
785 let mut trailing = false;
786 for &pow in &[9, 6, 3, 0] {
787 let base = 10_usize.pow(pow);
788 if pow == 0 || trailing || n / base != 0 {
790 output.write_fmt(format_args!("{}", n / base)).unwrap();
792 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
805 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
807 let mut output = String::new();
809 let median = bs.ns_iter_summ.median as usize;
810 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
813 .write_fmt(format_args!(
814 "{:>11} ns/iter (+/- {})",
815 fmt_thousands_sep(median, ','),
816 fmt_thousands_sep(deviation, ',')
821 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
827 // List the tests to console, and optionally to logfile. Filters are honored.
828 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
829 let mut output = match term::stdout() {
830 None => Raw(io::stdout()),
831 Some(t) => Pretty(t),
834 let quiet = opts.format == OutputFormat::Terse;
835 let mut st = ConsoleTestState::new(opts)?;
840 for test in filter_tests(&opts, tests) {
841 use crate::TestFn::*;
844 desc: TestDesc { name, .. },
848 let fntype = match testfn {
849 StaticTestFn(..) | DynTestFn(..) => {
853 StaticBenchFn(..) | DynBenchFn(..) => {
859 writeln!(output, "{}: {}", name, fntype)?;
860 st.write_log(format!("{} {}\n", fntype, name))?;
863 fn plural(count: u32, s: &str) -> String {
865 1 => format!("{} {}", 1, s),
866 n => format!("{} {}s", n, s),
871 if ntest != 0 || nbench != 0 {
872 writeln!(output, "")?;
878 plural(ntest, "test"),
879 plural(nbench, "benchmark")
886 // A simple console test runner
887 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
890 st: &mut ConsoleTestState,
891 out: &mut dyn OutputFormatter,
892 ) -> io::Result<()> {
893 match (*event).clone() {
894 TeFiltered(ref filtered_tests) => {
895 st.total = filtered_tests.len();
896 out.write_run_start(filtered_tests.len())
898 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
899 TeWait(ref test) => out.write_test_start(test),
900 TeTimeout(ref test) => out.write_timeout(test),
901 TeResult(test, result, stdout) => {
902 st.write_log_result(&test, &result)?;
903 out.write_result(&test, &result, &*stdout)?;
907 st.not_failures.push((test, stdout));
909 TrIgnored => st.ignored += 1,
910 TrAllowedFail => st.allowed_fail += 1,
912 st.metrics.insert_metric(
913 test.name.as_slice(),
914 bs.ns_iter_summ.median,
915 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
921 st.failures.push((test, stdout));
923 TrFailedMsg(msg) => {
925 let mut stdout = stdout;
926 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
927 st.failures.push((test, stdout));
935 let output = match term::stdout() {
936 None => Raw(io::stdout()),
937 Some(t) => Pretty(t),
940 let max_name_len = tests
942 .max_by_key(|t| len_if_padded(*t))
943 .map(|t| t.desc.name.as_slice().len())
946 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
948 let mut out: Box<dyn OutputFormatter> = match opts.format {
949 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
955 OutputFormat::Terse => Box::new(TerseFormatter::new(
961 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
963 let mut st = ConsoleTestState::new(opts)?;
964 fn len_if_padded(t: &TestDescAndFn) -> usize {
965 match t.testfn.padding() {
967 PadOnRight => t.desc.name.as_slice().len(),
971 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
973 assert!(st.current_test_count() == st.total);
975 return out.write_run_finish(&st);
979 fn should_sort_failures_before_printing_them() {
980 let test_a = TestDesc {
981 name: StaticTestName("a"),
983 should_panic: ShouldPanic::No,
987 let test_b = TestDesc {
988 name: StaticTestName("b"),
990 should_panic: ShouldPanic::No,
994 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
996 let st = ConsoleTestState {
1005 metrics: MetricMap::new(),
1006 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
1007 options: Options::new(),
1008 not_failures: Vec::new(),
1011 out.write_failures(&st).unwrap();
1012 let s = match out.output_location() {
1013 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1014 &Pretty(_) => unreachable!(),
1017 let apos = s.find("a").unwrap();
1018 let bpos = s.find("b").unwrap();
1019 assert!(apos < bpos);
1022 fn use_color(opts: &TestOpts) -> bool {
1024 AutoColor => !opts.nocapture && stdout_isatty(),
1025 AlwaysColor => true,
1026 NeverColor => false,
1031 target_os = "cloudabi",
1032 target_os = "redox",
1033 all(target_arch = "wasm32", not(target_os = "emscripten")),
1034 all(target_vendor = "fortanix", target_env = "sgx")
1036 fn stdout_isatty() -> bool {
1037 // FIXME: Implement isatty on Redox and SGX
1041 fn stdout_isatty() -> bool {
1042 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1045 fn stdout_isatty() -> bool {
1048 type HANDLE = *mut u8;
1049 type LPDWORD = *mut u32;
1050 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1052 fn GetStdHandle(which: DWORD) -> HANDLE;
1053 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1056 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1058 GetConsoleMode(handle, &mut out) != 0
1063 pub enum TestEvent {
1064 TeFiltered(Vec<TestDesc>),
1066 TeResult(TestDesc, TestResult, Vec<u8>),
1067 TeTimeout(TestDesc),
1068 TeFilteredOut(usize),
1071 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1073 struct Sink(Arc<Mutex<Vec<u8>>>);
1074 impl Write for Sink {
1075 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1076 Write::write(&mut *self.0.lock().unwrap(), data)
1078 fn flush(&mut self) -> io::Result<()> {
1083 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1085 F: FnMut(TestEvent) -> io::Result<()>,
1087 use std::collections::{self, HashMap};
1088 use std::hash::BuildHasherDefault;
1089 use std::sync::mpsc::RecvTimeoutError;
1090 // Use a deterministic hasher
1092 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1094 let tests_len = tests.len();
1096 let mut filtered_tests = filter_tests(opts, tests);
1097 if !opts.bench_benchmarks {
1098 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1101 let filtered_tests = {
1102 let mut filtered_tests = filtered_tests;
1103 for test in filtered_tests.iter_mut() {
1104 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1110 let filtered_out = tests_len - filtered_tests.len();
1111 callback(TeFilteredOut(filtered_out))?;
1113 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1115 callback(TeFiltered(filtered_descs))?;
1117 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1118 filtered_tests.into_iter().partition(|e| match e.testfn {
1119 StaticTestFn(_) | DynTestFn(_) => true,
1123 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1125 let mut remaining = filtered_tests;
1126 remaining.reverse();
1127 let mut pending = 0;
1129 let (tx, rx) = channel::<MonitorMsg>();
1131 let mut running_tests: TestMap = HashMap::default();
1133 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1134 let now = Instant::now();
1135 let timed_out = running_tests
1137 .filter_map(|(desc, timeout)| {
1138 if &now >= timeout {
1145 for test in &timed_out {
1146 running_tests.remove(test);
1151 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1152 running_tests.values().min().map(|next_timeout| {
1153 let now = Instant::now();
1154 if *next_timeout >= now {
1162 if concurrency == 1 {
1163 while !remaining.is_empty() {
1164 let test = remaining.pop().unwrap();
1165 callback(TeWait(test.desc.clone()))?;
1166 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1167 let (test, result, stdout) = rx.recv().unwrap();
1168 callback(TeResult(test, result, stdout))?;
1171 while pending > 0 || !remaining.is_empty() {
1172 while pending < concurrency && !remaining.is_empty() {
1173 let test = remaining.pop().unwrap();
1174 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1175 running_tests.insert(test.desc.clone(), timeout);
1176 callback(TeWait(test.desc.clone()))?; //here no pad
1177 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1183 if let Some(timeout) = calc_timeout(&running_tests) {
1184 res = rx.recv_timeout(timeout);
1185 for test in get_timed_out_tests(&mut running_tests) {
1186 callback(TeTimeout(test))?;
1188 if res != Err(RecvTimeoutError::Timeout) {
1192 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1197 let (desc, result, stdout) = res.unwrap();
1198 running_tests.remove(&desc);
1200 callback(TeResult(desc, result, stdout))?;
1205 if opts.bench_benchmarks {
1206 // All benchmarks run at the end, in serial.
1207 for b in filtered_benchs {
1208 callback(TeWait(b.desc.clone()))?;
1209 run_test(opts, false, b, tx.clone(), Concurrent::No);
1210 let (test, result, stdout) = rx.recv().unwrap();
1211 callback(TeResult(test, result, stdout))?;
1217 #[allow(deprecated)]
1218 fn get_concurrency() -> usize {
1219 return match env::var("RUST_TEST_THREADS") {
1221 let opt_n: Option<usize> = s.parse().ok();
1223 Some(n) if n > 0 => n,
1225 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1230 Err(..) => num_cpus(),
1234 #[allow(nonstandard_style)]
1235 fn num_cpus() -> usize {
1237 struct SYSTEM_INFO {
1238 wProcessorArchitecture: u16,
1241 lpMinimumApplicationAddress: *mut u8,
1242 lpMaximumApplicationAddress: *mut u8,
1243 dwActiveProcessorMask: *mut u8,
1244 dwNumberOfProcessors: u32,
1245 dwProcessorType: u32,
1246 dwAllocationGranularity: u32,
1247 wProcessorLevel: u16,
1248 wProcessorRevision: u16,
1251 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1254 let mut sysinfo = std::mem::zeroed();
1255 GetSystemInfo(&mut sysinfo);
1256 sysinfo.dwNumberOfProcessors as usize
1260 #[cfg(target_os = "redox")]
1261 fn num_cpus() -> usize {
1262 // FIXME: Implement num_cpus on Redox
1266 #[cfg(target_os = "vxworks")]
1267 fn num_cpus() -> usize {
1268 // FIXME: Implement num_cpus on vxWorks
1273 all(target_arch = "wasm32", not(target_os = "emscripten")),
1274 all(target_vendor = "fortanix", target_env = "sgx")
1276 fn num_cpus() -> usize {
1281 target_os = "android",
1282 target_os = "cloudabi",
1283 target_os = "emscripten",
1284 target_os = "fuchsia",
1286 target_os = "linux",
1287 target_os = "macos",
1288 target_os = "solaris"
1290 fn num_cpus() -> usize {
1291 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1295 target_os = "freebsd",
1296 target_os = "dragonfly",
1297 target_os = "netbsd"
1299 fn num_cpus() -> usize {
1302 let mut cpus: libc::c_uint = 0;
1303 let mut cpus_size = std::mem::size_of_val(&cpus);
1306 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1309 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1314 &mut cpus as *mut _ as *mut _,
1315 &mut cpus_size as *mut _ as *mut _,
1327 #[cfg(target_os = "openbsd")]
1328 fn num_cpus() -> usize {
1331 let mut cpus: libc::c_uint = 0;
1332 let mut cpus_size = std::mem::size_of_val(&cpus);
1333 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1339 &mut cpus as *mut _ as *mut _,
1340 &mut cpus_size as *mut _ as *mut _,
1351 #[cfg(target_os = "haiku")]
1352 fn num_cpus() -> usize {
1357 #[cfg(target_os = "l4re")]
1358 fn num_cpus() -> usize {
1364 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1365 let mut filtered = tests;
1366 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1367 let test_name = test.desc.name.as_slice();
1369 match opts.filter_exact {
1370 true => test_name == filter,
1371 false => test_name.contains(filter),
1375 // Remove tests that don't match the test filter
1376 if let Some(ref filter) = opts.filter {
1377 filtered.retain(|test| matches_filter(test, filter));
1380 // Skip tests that match any of the skip filters
1381 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
1383 // Excludes #[should_panic] tests
1384 if opts.exclude_should_panic {
1385 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
1388 // maybe unignore tests
1389 match opts.run_ignored {
1390 RunIgnored::Yes => {
1393 .for_each(|test| test.desc.ignore = false);
1395 RunIgnored::Only => {
1396 filtered.retain(|test| test.desc.ignore);
1399 .for_each(|test| test.desc.ignore = false);
1401 RunIgnored::No => {}
1404 // Sort the tests alphabetically
1405 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1410 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1411 // convert benchmarks to tests, if we're not benchmarking them
1415 let testfn = match x.testfn {
1416 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1417 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1419 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1420 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1435 test: TestDescAndFn,
1436 monitor_ch: Sender<MonitorMsg>,
1437 concurrency: Concurrent,
1439 let TestDescAndFn { desc, testfn } = test;
1441 let ignore_because_panic_abort = cfg!(target_arch = "wasm32")
1442 && !cfg!(target_os = "emscripten")
1443 && desc.should_panic != ShouldPanic::No;
1445 if force_ignore || desc.ignore || ignore_because_panic_abort {
1446 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1452 monitor_ch: Sender<MonitorMsg>,
1454 testfn: Box<dyn FnOnce() + Send>,
1455 concurrency: Concurrent,
1457 // Buffer for capturing standard I/O
1458 let data = Arc::new(Mutex::new(Vec::new()));
1459 let data2 = data.clone();
1461 let name = desc.name.clone();
1462 let runtest = move || {
1463 let oldio = if !nocapture {
1465 io::set_print(Some(Box::new(Sink(data2.clone())))),
1466 io::set_panic(Some(Box::new(Sink(data2)))),
1472 let result = catch_unwind(AssertUnwindSafe(testfn));
1474 if let Some((printio, panicio)) = oldio {
1475 io::set_print(printio);
1476 io::set_panic(panicio);
1479 let test_result = calc_result(&desc, result);
1480 let stdout = data.lock().unwrap().to_vec();
1482 .send((desc.clone(), test_result, stdout))
1486 // If the platform is single-threaded we're just going to run
1487 // the test synchronously, regardless of the concurrency
1489 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1490 if concurrency == Concurrent::Yes && supports_threads {
1491 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1492 cfg.spawn(runtest).unwrap();
1499 DynBenchFn(bencher) => {
1500 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1501 bencher.run(harness)
1504 StaticBenchFn(benchfn) => {
1505 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1506 (benchfn.clone())(harness)
1510 let cb = move || __rust_begin_short_backtrace(f);
1511 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1513 StaticTestFn(f) => run_test_inner(
1517 Box::new(move || __rust_begin_short_backtrace(f)),
1523 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1525 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1529 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1530 match (&desc.should_panic, task_result) {
1531 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1532 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1534 .downcast_ref::<String>()
1536 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1537 .map(|e| e.contains(msg))
1542 if desc.allow_fail {
1545 TrFailedMsg(format!("panic did not include expected string '{}'", msg))
1549 _ if desc.allow_fail => TrAllowedFail,
1554 #[derive(Clone, PartialEq)]
1555 pub struct MetricMap(BTreeMap<String, Metric>);
1558 pub fn new() -> MetricMap {
1559 MetricMap(BTreeMap::new())
1562 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1563 /// must be non-negative. The `noise` indicates the uncertainty of the
1564 /// metric, which doubles as the "noise range" of acceptable
1565 /// pairwise-regressions on this named value, when comparing from one
1566 /// metric to the next using `compare_to_old`.
1568 /// If `noise` is positive, then it means this metric is of a value
1569 /// you want to see grow smaller, so a change larger than `noise` in the
1570 /// positive direction represents a regression.
1572 /// If `noise` is negative, then it means this metric is of a value
1573 /// you want to see grow larger, so a change larger than `noise` in the
1574 /// negative direction represents a regression.
1575 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1576 let m = Metric { value, noise };
1577 self.0.insert(name.to_owned(), m);
1580 pub fn fmt_metrics(&self) -> String {
1584 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1585 .collect::<Vec<_>>();
1592 pub use std::hint::black_box;
1595 /// Callback for benchmark functions to run in their body.
1596 pub fn iter<T, F>(&mut self, mut inner: F)
1600 if self.mode == BenchMode::Single {
1601 ns_iter_inner(&mut inner, 1);
1605 self.summary = Some(iter(&mut inner));
1608 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1610 F: FnMut(&mut Bencher),
1613 return self.summary;
1617 fn ns_from_dur(dur: Duration) -> u64 {
1618 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1621 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1625 let start = Instant::now();
1629 return ns_from_dur(start.elapsed());
1632 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1636 // Initial bench run to get ballpark figure.
1637 let ns_single = ns_iter_inner(inner, 1);
1639 // Try to estimate iter count for 1ms falling back to 1m
1640 // iterations if first run took < 1ns.
1641 let ns_target_total = 1_000_000; // 1ms
1642 let mut n = ns_target_total / cmp::max(1, ns_single);
1644 // if the first run took more than 1ms we don't want to just
1645 // be left doing 0 iterations on every loop. The unfortunate
1646 // side effect of not being able to do as many runs is
1647 // automatically handled by the statistical analysis below
1648 // (i.e., larger error bars).
1651 let mut total_run = Duration::new(0, 0);
1652 let samples: &mut [f64] = &mut [0.0_f64; 50];
1654 let loop_start = Instant::now();
1656 for p in &mut *samples {
1657 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1660 stats::winsorize(samples, 5.0);
1661 let summ = stats::Summary::new(samples);
1663 for p in &mut *samples {
1664 let ns = ns_iter_inner(inner, 5 * n);
1665 *p = ns as f64 / (5 * n) as f64;
1668 stats::winsorize(samples, 5.0);
1669 let summ5 = stats::Summary::new(samples);
1671 let loop_run = loop_start.elapsed();
1673 // If we've run for 100ms and seem to have converged to a
1675 if loop_run > Duration::from_millis(100)
1676 && summ.median_abs_dev_pct < 1.0
1677 && summ.median - summ5.median < summ5.median_abs_dev
1682 total_run = total_run + loop_run;
1683 // Longest we ever run for is 3s.
1684 if total_run > Duration::from_secs(3) {
1688 // If we overflow here just return the results so far. We check a
1689 // multiplier of 10 because we're about to multiply by 2 and the
1690 // next iteration of the loop will also multiply by 5 (to calculate
1691 // the summ5 result)
1692 n = match n.checked_mul(10) {
1702 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1706 use std::panic::{catch_unwind, AssertUnwindSafe};
1707 use std::sync::{Arc, Mutex};
1709 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1711 F: FnMut(&mut Bencher),
1713 let mut bs = Bencher {
1714 mode: BenchMode::Auto,
1719 let data = Arc::new(Mutex::new(Vec::new()));
1720 let data2 = data.clone();
1722 let oldio = if !nocapture {
1724 io::set_print(Some(Box::new(Sink(data2.clone())))),
1725 io::set_panic(Some(Box::new(Sink(data2)))),
1731 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1733 if let Some((printio, panicio)) = oldio {
1734 io::set_print(printio);
1735 io::set_panic(panicio);
1738 let test_result = match result {
1740 Ok(Some(ns_iter_summ)) => {
1741 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1742 let mb_s = bs.bytes * 1000 / ns_iter;
1744 let bs = BenchSamples {
1746 mb_s: mb_s as usize,
1748 TestResult::TrBench(bs)
1751 // iter not called, so no data.
1752 // FIXME: error in this case?
1753 let samples: &mut [f64] = &mut [0.0_f64; 1];
1754 let bs = BenchSamples {
1755 ns_iter_summ: stats::Summary::new(samples),
1758 TestResult::TrBench(bs)
1760 Err(_) => TestResult::TrFailed,
1763 let stdout = data.lock().unwrap().to_vec();
1764 monitor_ch.send((desc, test_result, stdout)).unwrap();
1767 pub fn run_once<F>(f: F)
1769 F: FnMut(&mut Bencher),
1771 let mut bs = Bencher {
1772 mode: BenchMode::Single,