1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![deny(rust_2018_idioms)]
21 #![crate_name = "test"]
22 #![unstable(feature = "test", issue = "27812")]
23 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
25 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
27 #![feature(set_stdio)]
28 #![feature(panic_unwind)]
29 #![feature(staged_api)]
30 #![feature(termination_trait_lib)]
34 #[cfg(any(unix, target_os = "cloudabi"))]
38 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
39 // on aarch64-pc-windows-msvc, or thumbv7a-pc-windows-msvc
40 // so we don't link libtest against libunwind (for the time being)
41 // even though it means that libtest won't be fully functional on
44 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
45 #[cfg(not(all(windows, any(target_arch = "aarch64", target_arch = "arm"))))]
46 extern crate panic_unwind;
48 pub use self::ColorConfig::*;
49 use self::NamePadding::*;
50 use self::OutputLocation::*;
51 use self::TestEvent::*;
52 pub use self::TestFn::*;
53 pub use self::TestName::*;
54 pub use self::TestResult::*;
59 use std::collections::BTreeMap;
64 use std::io::prelude::*;
65 use std::panic::{catch_unwind, AssertUnwindSafe};
66 use std::path::PathBuf;
68 use std::process::Termination;
69 use std::sync::mpsc::{channel, Sender};
70 use std::sync::{Arc, Mutex};
72 use std::time::{Duration, Instant};
74 const TEST_WARN_TIMEOUT_S: u64 = 60;
75 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
77 // to be used by rustc to compile tests in libtest
80 assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
81 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
82 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestOpts,
83 TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
90 use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
92 /// Whether to execute tests concurrently or not
93 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
99 // The name of a test. By convention this follows the rules for rust
100 // paths; i.e., it should be a series of identifiers separated by double
101 // colons. This way if some test runner wants to arrange the tests
102 // hierarchically it may.
104 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
106 StaticTestName(&'static str),
108 AlignedTestName(Cow<'static, str>, NamePadding),
111 fn as_slice(&self) -> &str {
113 StaticTestName(s) => s,
114 DynTestName(ref s) => s,
115 AlignedTestName(ref s, _) => &*s,
119 fn padding(&self) -> NamePadding {
121 &AlignedTestName(_, p) => p,
126 fn with_padding(&self, padding: NamePadding) -> TestName {
127 let name = match self {
128 &TestName::StaticTestName(name) => Cow::Borrowed(name),
129 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
130 &TestName::AlignedTestName(ref name, _) => name.clone(),
133 TestName::AlignedTestName(name, padding)
136 impl fmt::Display for TestName {
137 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
138 fmt::Display::fmt(self.as_slice(), f)
142 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
143 pub enum NamePadding {
149 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
150 let mut name = String::from(self.name.as_slice());
151 let fill = column_count.saturating_sub(name.len());
152 let pad = " ".repeat(fill);
163 /// Represents a benchmark function.
164 pub trait TDynBenchFn: Send {
165 fn run(&self, harness: &mut Bencher);
168 // A function that runs a test. If the function returns successfully,
169 // the test succeeds; if the function panics then the test fails. We
170 // may need to come up with a more clever definition of test in order
171 // to support isolation of tests into threads.
174 StaticBenchFn(fn(&mut Bencher)),
175 DynTestFn(Box<dyn FnOnce() + Send>),
176 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
180 fn padding(&self) -> NamePadding {
182 StaticTestFn(..) => PadNone,
183 StaticBenchFn(..) => PadOnRight,
184 DynTestFn(..) => PadNone,
185 DynBenchFn(..) => PadOnRight,
190 impl fmt::Debug for TestFn {
191 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
192 f.write_str(match *self {
193 StaticTestFn(..) => "StaticTestFn(..)",
194 StaticBenchFn(..) => "StaticBenchFn(..)",
195 DynTestFn(..) => "DynTestFn(..)",
196 DynBenchFn(..) => "DynBenchFn(..)",
201 /// Manager of the benchmarking runs.
203 /// This is fed into functions marked with `#[bench]` to allow for
204 /// set-up & tear-down before running a piece of code repeatedly via a
209 summary: Option<stats::Summary>,
213 #[derive(Clone, PartialEq, Eq)]
219 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
220 pub enum ShouldPanic {
223 YesWithMessage(&'static str),
226 // The definition of a single test. A test runner will run a list of
228 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
229 pub struct TestDesc {
232 pub should_panic: ShouldPanic,
233 pub allow_fail: bool,
237 pub struct TestDescAndFn {
242 #[derive(Clone, PartialEq, Debug, Copy)]
249 pub fn new(value: f64, noise: f64) -> Metric {
250 Metric { value, noise }
254 /// In case we want to add other options as well, just add them in this struct.
255 #[derive(Copy, Clone, Debug)]
257 display_output: bool,
261 pub fn new() -> Options {
263 display_output: false,
267 pub fn display_output(mut self, display_output: bool) -> Options {
268 self.display_output = display_output;
273 // The default console test runner. It accepts the command line
274 // arguments and a vector of test_descs.
275 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
276 let mut opts = match parse_opts(args) {
279 eprintln!("error: {}", msg);
285 opts.options = options;
287 if let Err(e) = list_tests_console(&opts, tests) {
288 eprintln!("error: io error when listing tests: {:?}", e);
292 match run_tests_console(&opts, tests) {
294 Ok(false) => process::exit(101),
296 eprintln!("error: io error when listing tests: {:?}", e);
303 // A variant optimized for invocation with a static test vector.
304 // This will panic (intentionally) when fed any dynamic tests, because
305 // it is copying the static values out into a dynamic vector and cannot
306 // copy dynamic values. It is doing this because from this point on
307 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
308 // semantics into parallel test runners, which in turn requires a Vec<>
309 // rather than a &[].
310 pub fn test_main_static(tests: &[&TestDescAndFn]) {
311 let args = env::args().collect::<Vec<_>>();
312 let owned_tests = tests
314 .map(|t| match t.testfn {
315 StaticTestFn(f) => TestDescAndFn {
316 testfn: StaticTestFn(f),
317 desc: t.desc.clone(),
319 StaticBenchFn(f) => TestDescAndFn {
320 testfn: StaticBenchFn(f),
321 desc: t.desc.clone(),
323 _ => panic!("non-static tests passed to test::test_main_static"),
326 test_main(&args, owned_tests, Options::new())
329 /// Invoked when unit tests terminate. Should panic if the unit
330 /// Tests is considered a failure. By default, invokes `report()`
331 /// and checks for a `0` result.
332 pub fn assert_test_result<T: Termination>(result: T) {
333 let code = result.report();
336 "the test returned a termination value with a non-zero status code ({}) \
337 which indicates a failure",
342 #[derive(Copy, Clone, Debug)]
343 pub enum ColorConfig {
349 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
350 pub enum OutputFormat {
356 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
357 pub enum RunIgnored {
364 pub struct TestOpts {
366 pub filter: Option<String>,
367 pub filter_exact: bool,
368 pub exclude_should_panic: bool,
369 pub run_ignored: RunIgnored,
371 pub bench_benchmarks: bool,
372 pub logfile: Option<PathBuf>,
374 pub color: ColorConfig,
375 pub format: OutputFormat,
376 pub test_threads: Option<usize>,
377 pub skip: Vec<String>,
378 pub options: Options,
383 fn new() -> TestOpts {
388 exclude_should_panic: false,
389 run_ignored: RunIgnored::No,
391 bench_benchmarks: false,
395 format: OutputFormat::Pretty,
398 options: Options::new(),
403 /// Result of parsing the options.
404 pub type OptRes = Result<TestOpts, String>;
406 fn optgroups() -> getopts::Options {
407 let mut opts = getopts::Options::new();
408 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
409 .optflag("", "ignored", "Run only ignored tests")
410 .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
411 .optflag("", "test", "Run tests and not benchmarks")
412 .optflag("", "bench", "Run benchmarks instead of tests")
413 .optflag("", "list", "List all tests and benchmarks")
414 .optflag("h", "help", "Display this message (longer with --help)")
418 "Write logs to the specified file instead \
425 "don't capture stdout/stderr of each \
426 task, allow printing directly",
431 "Number of threads used for running tests \
438 "Skip tests whose names contain FILTER (this flag can \
439 be used multiple times)",
445 "Display one character per test instead of one line. \
446 Alias to --format=terse",
451 "Exactly match filters rather than by substring",
456 "Configure coloring of output:
457 auto = colorize if stdout is a tty and tests are run on serially (default);
458 always = always colorize output;
459 never = never colorize output;",
465 "Configure formatting of output:
466 pretty = Print verbose output;
467 terse = Display one character per test;
468 json = Output a json document",
474 "Enable nightly-only flags:
475 unstable-options = Allow use of experimental features",
481 fn usage(binary: &str, options: &getopts::Options) {
482 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
486 The FILTER string is tested against the name of all tests, and only those
487 tests whose names contain the filter are run.
489 By default, all tests are run in parallel. This can be altered with the
490 --test-threads flag or the RUST_TEST_THREADS environment variable when running
493 All tests have their standard output and standard error captured by default.
494 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
495 environment variable to a value other than "0". Logging is not captured by default.
499 #[test] - Indicates a function is a test to be run. This function
501 #[bench] - Indicates a function is a benchmark to be run. This
502 function takes one argument (test::Bencher).
503 #[should_panic] - This function (also labeled with #[test]) will only pass if
504 the code causes a panic (an assertion failure or panic!)
505 A message may be provided, which the failure string must
506 contain: #[should_panic(expected = "foo")].
507 #[ignore] - When applied to a function which is already attributed as a
508 test, then the test runner will ignore these tests during
509 normal test runs. Running with --ignored or --include-ignored will run
511 usage = options.usage(&message)
515 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
516 fn is_nightly() -> bool {
517 // Whether this is a feature-staged build, i.e., on the beta or stable channel
518 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
519 // Whether we should enable unstable features for bootstrapping
520 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
522 bootstrap || !disable_unstable_features
525 // Parses command line arguments into test options
526 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
527 let mut allow_unstable = false;
528 let opts = optgroups();
529 let args = args.get(1..).unwrap_or(args);
530 let matches = match opts.parse(args) {
532 Err(f) => return Some(Err(f.to_string())),
535 if let Some(opt) = matches.opt_str("Z") {
538 "the option `Z` is only accepted on the nightly compiler".into(),
543 "unstable-options" => {
544 allow_unstable = true;
547 return Some(Err("Unrecognized option to `Z`".into()));
552 if matches.opt_present("h") {
553 usage(&args[0], &opts);
557 let filter = if !matches.free.is_empty() {
558 Some(matches.free[0].clone())
563 let exclude_should_panic = matches.opt_present("exclude-should-panic");
564 if !allow_unstable && exclude_should_panic {
566 "The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(),
570 let include_ignored = matches.opt_present("include-ignored");
571 if !allow_unstable && include_ignored {
573 "The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
577 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
580 "the options --include-ignored and --ignored are mutually exclusive".into(),
583 (true, false) => RunIgnored::Yes,
584 (false, true) => RunIgnored::Only,
585 (false, false) => RunIgnored::No,
587 let quiet = matches.opt_present("quiet");
588 let exact = matches.opt_present("exact");
589 let list = matches.opt_present("list");
591 let logfile = matches.opt_str("logfile");
592 let logfile = logfile.map(|s| PathBuf::from(&s));
594 let bench_benchmarks = matches.opt_present("bench");
595 let run_tests = !bench_benchmarks || matches.opt_present("test");
597 let mut nocapture = matches.opt_present("nocapture");
599 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
600 Ok(val) => &val != "0",
605 let test_threads = match matches.opt_str("test-threads") {
606 Some(n_str) => match n_str.parse::<usize>() {
607 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
610 return Some(Err(format!(
611 "argument for --test-threads must be a number > 0 \
620 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
621 Some("auto") | None => AutoColor,
622 Some("always") => AlwaysColor,
623 Some("never") => NeverColor,
626 return Some(Err(format!(
627 "argument for --color must be auto, always, or never (was \
634 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
635 None if quiet => OutputFormat::Terse,
636 Some("pretty") | None => OutputFormat::Pretty,
637 Some("terse") => OutputFormat::Terse,
641 "The \"json\" format is only accepted on the nightly compiler".into(),
648 return Some(Err(format!(
649 "argument for --format must be pretty, terse, or json (was \
656 let test_opts = TestOpts {
660 exclude_should_panic,
669 skip: matches.opt_strs("skip"),
670 options: Options::new(),
676 #[derive(Clone, PartialEq)]
677 pub struct BenchSamples {
678 ns_iter_summ: stats::Summary,
682 #[derive(Clone, PartialEq)]
683 pub enum TestResult {
689 TrBench(BenchSamples),
692 unsafe impl Send for TestResult {}
694 enum OutputLocation<T> {
695 Pretty(Box<term::StdoutTerminal>),
699 impl<T: Write> Write for OutputLocation<T> {
700 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
702 Pretty(ref mut term) => term.write(buf),
703 Raw(ref mut stdout) => stdout.write(buf),
707 fn flush(&mut self) -> io::Result<()> {
709 Pretty(ref mut term) => term.flush(),
710 Raw(ref mut stdout) => stdout.flush(),
715 struct ConsoleTestState {
716 log_out: Option<File>,
725 failures: Vec<(TestDesc, Vec<u8>)>,
726 not_failures: Vec<(TestDesc, Vec<u8>)>,
730 impl ConsoleTestState {
731 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
732 let log_out = match opts.logfile {
733 Some(ref path) => Some(File::create(path)?),
737 Ok(ConsoleTestState {
746 metrics: MetricMap::new(),
747 failures: Vec::new(),
748 not_failures: Vec::new(),
749 options: opts.options,
753 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
754 let msg = msg.as_ref();
757 Some(ref mut o) => o.write_all(msg.as_bytes()),
761 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
762 self.write_log(format!(
765 TrOk => "ok".to_owned(),
766 TrFailed => "failed".to_owned(),
767 TrFailedMsg(ref msg) => format!("failed: {}", msg),
768 TrIgnored => "ignored".to_owned(),
769 TrAllowedFail => "failed (allowed)".to_owned(),
770 TrBench(ref bs) => fmt_bench_samples(bs),
776 fn current_test_count(&self) -> usize {
777 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
781 // Format a number with thousands separators
782 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
784 let mut output = String::new();
785 let mut trailing = false;
786 for &pow in &[9, 6, 3, 0] {
787 let base = 10_usize.pow(pow);
788 if pow == 0 || trailing || n / base != 0 {
790 output.write_fmt(format_args!("{}", n / base)).unwrap();
792 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
805 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
807 let mut output = String::new();
809 let median = bs.ns_iter_summ.median as usize;
810 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
813 .write_fmt(format_args!(
814 "{:>11} ns/iter (+/- {})",
815 fmt_thousands_sep(median, ','),
816 fmt_thousands_sep(deviation, ',')
821 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
827 // List the tests to console, and optionally to logfile. Filters are honored.
828 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
829 let mut output = match term::stdout() {
830 None => Raw(io::stdout()),
831 Some(t) => Pretty(t),
834 let quiet = opts.format == OutputFormat::Terse;
835 let mut st = ConsoleTestState::new(opts)?;
840 for test in filter_tests(&opts, tests) {
841 use crate::TestFn::*;
844 desc: TestDesc { name, .. },
848 let fntype = match testfn {
849 StaticTestFn(..) | DynTestFn(..) => {
853 StaticBenchFn(..) | DynBenchFn(..) => {
859 writeln!(output, "{}: {}", name, fntype)?;
860 st.write_log(format!("{} {}\n", fntype, name))?;
863 fn plural(count: u32, s: &str) -> String {
865 1 => format!("{} {}", 1, s),
866 n => format!("{} {}s", n, s),
871 if ntest != 0 || nbench != 0 {
872 writeln!(output, "")?;
878 plural(ntest, "test"),
879 plural(nbench, "benchmark")
886 // A simple console test runner
887 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
890 st: &mut ConsoleTestState,
891 out: &mut dyn OutputFormatter,
892 ) -> io::Result<()> {
893 match (*event).clone() {
894 TeFiltered(ref filtered_tests) => {
895 st.total = filtered_tests.len();
896 out.write_run_start(filtered_tests.len())
898 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
899 TeWait(ref test) => out.write_test_start(test),
900 TeTimeout(ref test) => out.write_timeout(test),
901 TeResult(test, result, stdout) => {
902 st.write_log_result(&test, &result)?;
903 out.write_result(&test, &result, &*stdout)?;
907 st.not_failures.push((test, stdout));
909 TrIgnored => st.ignored += 1,
910 TrAllowedFail => st.allowed_fail += 1,
912 st.metrics.insert_metric(
913 test.name.as_slice(),
914 bs.ns_iter_summ.median,
915 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
921 st.failures.push((test, stdout));
923 TrFailedMsg(msg) => {
925 let mut stdout = stdout;
926 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
927 st.failures.push((test, stdout));
935 let output = match term::stdout() {
936 None => Raw(io::stdout()),
937 Some(t) => Pretty(t),
940 let max_name_len = tests
942 .max_by_key(|t| len_if_padded(*t))
943 .map(|t| t.desc.name.as_slice().len())
946 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
948 let mut out: Box<dyn OutputFormatter> = match opts.format {
949 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
955 OutputFormat::Terse => Box::new(TerseFormatter::new(
961 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
963 let mut st = ConsoleTestState::new(opts)?;
964 fn len_if_padded(t: &TestDescAndFn) -> usize {
965 match t.testfn.padding() {
967 PadOnRight => t.desc.name.as_slice().len(),
971 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
973 assert!(st.current_test_count() == st.total);
975 return out.write_run_finish(&st);
979 fn should_sort_failures_before_printing_them() {
980 let test_a = TestDesc {
981 name: StaticTestName("a"),
983 should_panic: ShouldPanic::No,
987 let test_b = TestDesc {
988 name: StaticTestName("b"),
990 should_panic: ShouldPanic::No,
994 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
996 let st = ConsoleTestState {
1005 metrics: MetricMap::new(),
1006 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
1007 options: Options::new(),
1008 not_failures: Vec::new(),
1011 out.write_failures(&st).unwrap();
1012 let s = match out.output_location() {
1013 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1014 &Pretty(_) => unreachable!(),
1017 let apos = s.find("a").unwrap();
1018 let bpos = s.find("b").unwrap();
1019 assert!(apos < bpos);
1022 fn use_color(opts: &TestOpts) -> bool {
1024 AutoColor => !opts.nocapture && stdout_isatty(),
1025 AlwaysColor => true,
1026 NeverColor => false,
1031 target_os = "cloudabi",
1032 target_os = "redox",
1033 all(target_arch = "wasm32", not(target_os = "emscripten")),
1034 all(target_vendor = "fortanix", target_env = "sgx")
1036 fn stdout_isatty() -> bool {
1037 // FIXME: Implement isatty on Redox and SGX
1041 fn stdout_isatty() -> bool {
1042 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1045 fn stdout_isatty() -> bool {
1048 type HANDLE = *mut u8;
1049 type LPDWORD = *mut u32;
1050 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1052 fn GetStdHandle(which: DWORD) -> HANDLE;
1053 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1056 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1058 GetConsoleMode(handle, &mut out) != 0
1063 pub enum TestEvent {
1064 TeFiltered(Vec<TestDesc>),
1066 TeResult(TestDesc, TestResult, Vec<u8>),
1067 TeTimeout(TestDesc),
1068 TeFilteredOut(usize),
1071 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1073 struct Sink(Arc<Mutex<Vec<u8>>>);
1074 impl Write for Sink {
1075 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1076 Write::write(&mut *self.0.lock().unwrap(), data)
1078 fn flush(&mut self) -> io::Result<()> {
1083 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1085 F: FnMut(TestEvent) -> io::Result<()>,
1087 use std::collections::{self, HashMap};
1088 use std::hash::BuildHasherDefault;
1089 use std::sync::mpsc::RecvTimeoutError;
1090 // Use a deterministic hasher
1092 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1094 let tests_len = tests.len();
1096 let mut filtered_tests = filter_tests(opts, tests);
1097 if !opts.bench_benchmarks {
1098 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1101 let filtered_tests = {
1102 let mut filtered_tests = filtered_tests;
1103 for test in filtered_tests.iter_mut() {
1104 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1110 let filtered_out = tests_len - filtered_tests.len();
1111 callback(TeFilteredOut(filtered_out))?;
1113 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1115 callback(TeFiltered(filtered_descs))?;
1117 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1118 filtered_tests.into_iter().partition(|e| match e.testfn {
1119 StaticTestFn(_) | DynTestFn(_) => true,
1123 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1125 let mut remaining = filtered_tests;
1126 remaining.reverse();
1127 let mut pending = 0;
1129 let (tx, rx) = channel::<MonitorMsg>();
1131 let mut running_tests: TestMap = HashMap::default();
1133 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1134 let now = Instant::now();
1135 let timed_out = running_tests
1137 .filter_map(|(desc, timeout)| {
1138 if &now >= timeout {
1145 for test in &timed_out {
1146 running_tests.remove(test);
1151 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1152 running_tests.values().min().map(|next_timeout| {
1153 let now = Instant::now();
1154 if *next_timeout >= now {
1162 if concurrency == 1 {
1163 while !remaining.is_empty() {
1164 let test = remaining.pop().unwrap();
1165 callback(TeWait(test.desc.clone()))?;
1166 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1167 let (test, result, stdout) = rx.recv().unwrap();
1168 callback(TeResult(test, result, stdout))?;
1171 while pending > 0 || !remaining.is_empty() {
1172 while pending < concurrency && !remaining.is_empty() {
1173 let test = remaining.pop().unwrap();
1174 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1175 running_tests.insert(test.desc.clone(), timeout);
1176 callback(TeWait(test.desc.clone()))?; //here no pad
1177 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1183 if let Some(timeout) = calc_timeout(&running_tests) {
1184 res = rx.recv_timeout(timeout);
1185 for test in get_timed_out_tests(&mut running_tests) {
1186 callback(TeTimeout(test))?;
1188 if res != Err(RecvTimeoutError::Timeout) {
1192 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1197 let (desc, result, stdout) = res.unwrap();
1198 running_tests.remove(&desc);
1200 callback(TeResult(desc, result, stdout))?;
1205 if opts.bench_benchmarks {
1206 // All benchmarks run at the end, in serial.
1207 for b in filtered_benchs {
1208 callback(TeWait(b.desc.clone()))?;
1209 run_test(opts, false, b, tx.clone(), Concurrent::No);
1210 let (test, result, stdout) = rx.recv().unwrap();
1211 callback(TeResult(test, result, stdout))?;
1217 #[allow(deprecated)]
1218 fn get_concurrency() -> usize {
1219 return match env::var("RUST_TEST_THREADS") {
1221 let opt_n: Option<usize> = s.parse().ok();
1223 Some(n) if n > 0 => n,
1225 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1230 Err(..) => num_cpus(),
1234 #[allow(nonstandard_style)]
1235 fn num_cpus() -> usize {
1237 struct SYSTEM_INFO {
1238 wProcessorArchitecture: u16,
1241 lpMinimumApplicationAddress: *mut u8,
1242 lpMaximumApplicationAddress: *mut u8,
1243 dwActiveProcessorMask: *mut u8,
1244 dwNumberOfProcessors: u32,
1245 dwProcessorType: u32,
1246 dwAllocationGranularity: u32,
1247 wProcessorLevel: u16,
1248 wProcessorRevision: u16,
1251 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1254 let mut sysinfo = std::mem::zeroed();
1255 GetSystemInfo(&mut sysinfo);
1256 sysinfo.dwNumberOfProcessors as usize
1260 #[cfg(target_os = "redox")]
1261 fn num_cpus() -> usize {
1262 // FIXME: Implement num_cpus on Redox
1267 all(target_arch = "wasm32", not(target_os = "emscripten")),
1268 all(target_vendor = "fortanix", target_env = "sgx")
1270 fn num_cpus() -> usize {
1275 target_os = "android",
1276 target_os = "cloudabi",
1277 target_os = "emscripten",
1278 target_os = "fuchsia",
1280 target_os = "linux",
1281 target_os = "macos",
1282 target_os = "solaris"
1284 fn num_cpus() -> usize {
1285 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1289 target_os = "freebsd",
1290 target_os = "dragonfly",
1291 target_os = "netbsd"
1293 fn num_cpus() -> usize {
1296 let mut cpus: libc::c_uint = 0;
1297 let mut cpus_size = std::mem::size_of_val(&cpus);
1300 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1303 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1308 &mut cpus as *mut _ as *mut _,
1309 &mut cpus_size as *mut _ as *mut _,
1321 #[cfg(target_os = "openbsd")]
1322 fn num_cpus() -> usize {
1325 let mut cpus: libc::c_uint = 0;
1326 let mut cpus_size = std::mem::size_of_val(&cpus);
1327 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1333 &mut cpus as *mut _ as *mut _,
1334 &mut cpus_size as *mut _ as *mut _,
1345 #[cfg(target_os = "haiku")]
1346 fn num_cpus() -> usize {
1351 #[cfg(target_os = "l4re")]
1352 fn num_cpus() -> usize {
1358 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1359 let mut filtered = tests;
1360 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1361 let test_name = test.desc.name.as_slice();
1363 match opts.filter_exact {
1364 true => test_name == filter,
1365 false => test_name.contains(filter),
1369 // Remove tests that don't match the test filter
1370 if let Some(ref filter) = opts.filter {
1371 filtered.retain(|test| matches_filter(test, filter));
1374 // Skip tests that match any of the skip filters
1375 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
1377 // Excludes #[should_panic] tests
1378 if opts.exclude_should_panic {
1379 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
1382 // maybe unignore tests
1383 match opts.run_ignored {
1384 RunIgnored::Yes => {
1387 .for_each(|test| test.desc.ignore = false);
1389 RunIgnored::Only => {
1390 filtered.retain(|test| test.desc.ignore);
1393 .for_each(|test| test.desc.ignore = false);
1395 RunIgnored::No => {}
1398 // Sort the tests alphabetically
1399 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1404 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1405 // convert benchmarks to tests, if we're not benchmarking them
1409 let testfn = match x.testfn {
1410 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1411 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1413 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1414 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1429 test: TestDescAndFn,
1430 monitor_ch: Sender<MonitorMsg>,
1431 concurrency: Concurrent,
1433 let TestDescAndFn { desc, testfn } = test;
1435 let ignore_because_panic_abort = cfg!(target_arch = "wasm32")
1436 && !cfg!(target_os = "emscripten")
1437 && desc.should_panic != ShouldPanic::No;
1439 if force_ignore || desc.ignore || ignore_because_panic_abort {
1440 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1446 monitor_ch: Sender<MonitorMsg>,
1448 testfn: Box<dyn FnOnce() + Send>,
1449 concurrency: Concurrent,
1451 // Buffer for capturing standard I/O
1452 let data = Arc::new(Mutex::new(Vec::new()));
1453 let data2 = data.clone();
1455 let name = desc.name.clone();
1456 let runtest = move || {
1457 let oldio = if !nocapture {
1459 io::set_print(Some(Box::new(Sink(data2.clone())))),
1460 io::set_panic(Some(Box::new(Sink(data2)))),
1466 let result = catch_unwind(AssertUnwindSafe(testfn));
1468 if let Some((printio, panicio)) = oldio {
1469 io::set_print(printio);
1470 io::set_panic(panicio);
1473 let test_result = calc_result(&desc, result);
1474 let stdout = data.lock().unwrap().to_vec();
1476 .send((desc.clone(), test_result, stdout))
1480 // If the platform is single-threaded we're just going to run
1481 // the test synchronously, regardless of the concurrency
1483 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1484 if concurrency == Concurrent::Yes && supports_threads {
1485 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1486 cfg.spawn(runtest).unwrap();
1493 DynBenchFn(bencher) => {
1494 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1495 bencher.run(harness)
1498 StaticBenchFn(benchfn) => {
1499 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1500 (benchfn.clone())(harness)
1504 let cb = move || __rust_begin_short_backtrace(f);
1505 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1507 StaticTestFn(f) => run_test_inner(
1511 Box::new(move || __rust_begin_short_backtrace(f)),
1517 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1519 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1523 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1524 match (&desc.should_panic, task_result) {
1525 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1526 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1528 .downcast_ref::<String>()
1530 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1531 .map(|e| e.contains(msg))
1536 if desc.allow_fail {
1539 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1543 _ if desc.allow_fail => TrAllowedFail,
1548 #[derive(Clone, PartialEq)]
1549 pub struct MetricMap(BTreeMap<String, Metric>);
1552 pub fn new() -> MetricMap {
1553 MetricMap(BTreeMap::new())
1556 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1557 /// must be non-negative. The `noise` indicates the uncertainty of the
1558 /// metric, which doubles as the "noise range" of acceptable
1559 /// pairwise-regressions on this named value, when comparing from one
1560 /// metric to the next using `compare_to_old`.
1562 /// If `noise` is positive, then it means this metric is of a value
1563 /// you want to see grow smaller, so a change larger than `noise` in the
1564 /// positive direction represents a regression.
1566 /// If `noise` is negative, then it means this metric is of a value
1567 /// you want to see grow larger, so a change larger than `noise` in the
1568 /// negative direction represents a regression.
1569 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1570 let m = Metric { value, noise };
1571 self.0.insert(name.to_owned(), m);
1574 pub fn fmt_metrics(&self) -> String {
1578 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1579 .collect::<Vec<_>>();
1586 pub use std::hint::black_box;
1589 /// Callback for benchmark functions to run in their body.
1590 pub fn iter<T, F>(&mut self, mut inner: F)
1594 if self.mode == BenchMode::Single {
1595 ns_iter_inner(&mut inner, 1);
1599 self.summary = Some(iter(&mut inner));
1602 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1604 F: FnMut(&mut Bencher),
1607 return self.summary;
1611 fn ns_from_dur(dur: Duration) -> u64 {
1612 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1615 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1619 let start = Instant::now();
1623 return ns_from_dur(start.elapsed());
1626 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1630 // Initial bench run to get ballpark figure.
1631 let ns_single = ns_iter_inner(inner, 1);
1633 // Try to estimate iter count for 1ms falling back to 1m
1634 // iterations if first run took < 1ns.
1635 let ns_target_total = 1_000_000; // 1ms
1636 let mut n = ns_target_total / cmp::max(1, ns_single);
1638 // if the first run took more than 1ms we don't want to just
1639 // be left doing 0 iterations on every loop. The unfortunate
1640 // side effect of not being able to do as many runs is
1641 // automatically handled by the statistical analysis below
1642 // (i.e., larger error bars).
1645 let mut total_run = Duration::new(0, 0);
1646 let samples: &mut [f64] = &mut [0.0_f64; 50];
1648 let loop_start = Instant::now();
1650 for p in &mut *samples {
1651 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1654 stats::winsorize(samples, 5.0);
1655 let summ = stats::Summary::new(samples);
1657 for p in &mut *samples {
1658 let ns = ns_iter_inner(inner, 5 * n);
1659 *p = ns as f64 / (5 * n) as f64;
1662 stats::winsorize(samples, 5.0);
1663 let summ5 = stats::Summary::new(samples);
1665 let loop_run = loop_start.elapsed();
1667 // If we've run for 100ms and seem to have converged to a
1669 if loop_run > Duration::from_millis(100)
1670 && summ.median_abs_dev_pct < 1.0
1671 && summ.median - summ5.median < summ5.median_abs_dev
1676 total_run = total_run + loop_run;
1677 // Longest we ever run for is 3s.
1678 if total_run > Duration::from_secs(3) {
1682 // If we overflow here just return the results so far. We check a
1683 // multiplier of 10 because we're about to multiply by 2 and the
1684 // next iteration of the loop will also multiply by 5 (to calculate
1685 // the summ5 result)
1686 n = match n.checked_mul(10) {
1696 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1700 use std::panic::{catch_unwind, AssertUnwindSafe};
1701 use std::sync::{Arc, Mutex};
1703 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1705 F: FnMut(&mut Bencher),
1707 let mut bs = Bencher {
1708 mode: BenchMode::Auto,
1713 let data = Arc::new(Mutex::new(Vec::new()));
1714 let data2 = data.clone();
1716 let oldio = if !nocapture {
1718 io::set_print(Some(Box::new(Sink(data2.clone())))),
1719 io::set_panic(Some(Box::new(Sink(data2)))),
1725 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1727 if let Some((printio, panicio)) = oldio {
1728 io::set_print(printio);
1729 io::set_panic(panicio);
1732 let test_result = match result {
1734 Ok(Some(ns_iter_summ)) => {
1735 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1736 let mb_s = bs.bytes * 1000 / ns_iter;
1738 let bs = BenchSamples {
1740 mb_s: mb_s as usize,
1742 TestResult::TrBench(bs)
1745 // iter not called, so no data.
1746 // FIXME: error in this case?
1747 let samples: &mut [f64] = &mut [0.0_f64; 1];
1748 let bs = BenchSamples {
1749 ns_iter_summ: stats::Summary::new(samples),
1752 TestResult::TrBench(bs)
1754 Err(_) => TestResult::TrFailed,
1757 let stdout = data.lock().unwrap().to_vec();
1758 monitor_ch.send((desc, test_result, stdout)).unwrap();
1761 pub fn run_once<F>(f: F)
1763 F: FnMut(&mut Bencher),
1765 let mut bs = Bencher {
1766 mode: BenchMode::Single,
1778 filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
1779 ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1783 use crate::Concurrent;
1784 use std::sync::mpsc::channel;
1786 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
1790 name: StaticTestName("1"),
1792 should_panic: ShouldPanic::No,
1795 testfn: DynTestFn(Box::new(move || {})),
1799 name: StaticTestName("2"),
1801 should_panic: ShouldPanic::No,
1804 testfn: DynTestFn(Box::new(move || {})),
1810 pub fn do_not_run_ignored_tests() {
1814 let desc = TestDescAndFn {
1816 name: StaticTestName("whatever"),
1818 should_panic: ShouldPanic::No,
1821 testfn: DynTestFn(Box::new(f)),
1823 let (tx, rx) = channel();
1824 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1825 let (_, res, _) = rx.recv().unwrap();
1826 assert!(res != TrOk);
1830 pub fn ignored_tests_result_in_ignored() {
1832 let desc = TestDescAndFn {
1834 name: StaticTestName("whatever"),
1836 should_panic: ShouldPanic::No,
1839 testfn: DynTestFn(Box::new(f)),
1841 let (tx, rx) = channel();
1842 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1843 let (_, res, _) = rx.recv().unwrap();
1844 assert!(res == TrIgnored);
1848 fn test_should_panic() {
1852 let desc = TestDescAndFn {
1854 name: StaticTestName("whatever"),
1856 should_panic: ShouldPanic::Yes,
1859 testfn: DynTestFn(Box::new(f)),
1861 let (tx, rx) = channel();
1862 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1863 let (_, res, _) = rx.recv().unwrap();
1864 assert!(res == TrOk);
1868 fn test_should_panic_good_message() {
1870 panic!("an error message");
1872 let desc = TestDescAndFn {
1874 name: StaticTestName("whatever"),
1876 should_panic: ShouldPanic::YesWithMessage("error message"),
1879 testfn: DynTestFn(Box::new(f)),
1881 let (tx, rx) = channel();
1882 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1883 let (_, res, _) = rx.recv().unwrap();
1884 assert!(res == TrOk);
1888 fn test_should_panic_bad_message() {
1890 panic!("an error message");
1892 let expected = "foobar";
1893 let failed_msg = "Panic did not include expected string";
1894 let desc = TestDescAndFn {
1896 name: StaticTestName("whatever"),
1898 should_panic: ShouldPanic::YesWithMessage(expected),
1901 testfn: DynTestFn(Box::new(f)),
1903 let (tx, rx) = channel();
1904 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1905 let (_, res, _) = rx.recv().unwrap();
1906 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1910 fn test_should_panic_but_succeeds() {
1912 let desc = TestDescAndFn {
1914 name: StaticTestName("whatever"),
1916 should_panic: ShouldPanic::Yes,
1919 testfn: DynTestFn(Box::new(f)),
1921 let (tx, rx) = channel();
1922 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1923 let (_, res, _) = rx.recv().unwrap();
1924 assert!(res == TrFailed);
1928 fn parse_ignored_flag() {
1930 "progname".to_string(),
1931 "filter".to_string(),
1932 "--ignored".to_string(),
1934 let opts = parse_opts(&args).unwrap().unwrap();
1935 assert_eq!(opts.run_ignored, RunIgnored::Only);
1939 fn parse_include_ignored_flag() {
1941 "progname".to_string(),
1942 "filter".to_string(),
1943 "-Zunstable-options".to_string(),
1944 "--include-ignored".to_string(),
1946 let opts = parse_opts(&args).unwrap().unwrap();
1947 assert_eq!(opts.run_ignored, RunIgnored::Yes);
1951 pub fn filter_for_ignored_option() {
1952 // When we run ignored tests the test filter should filter out all the
1953 // unignored tests and flip the ignore flag on the rest to false
1955 let mut opts = TestOpts::new();
1956 opts.run_tests = true;
1957 opts.run_ignored = RunIgnored::Only;
1959 let tests = one_ignored_one_unignored_test();
1960 let filtered = filter_tests(&opts, tests);
1962 assert_eq!(filtered.len(), 1);
1963 assert_eq!(filtered[0].desc.name.to_string(), "1");
1964 assert!(!filtered[0].desc.ignore);
1968 pub fn run_include_ignored_option() {
1969 // When we "--include-ignored" tests, the ignore flag should be set to false on
1970 // all tests and no test filtered out
1972 let mut opts = TestOpts::new();
1973 opts.run_tests = true;
1974 opts.run_ignored = RunIgnored::Yes;
1976 let tests = one_ignored_one_unignored_test();
1977 let filtered = filter_tests(&opts, tests);
1979 assert_eq!(filtered.len(), 2);
1980 assert!(!filtered[0].desc.ignore);
1981 assert!(!filtered[1].desc.ignore);
1985 pub fn exclude_should_panic_option() {
1986 let mut opts = TestOpts::new();
1987 opts.run_tests = true;
1988 opts.exclude_should_panic = true;
1990 let mut tests = one_ignored_one_unignored_test();
1991 tests.push(TestDescAndFn {
1993 name: StaticTestName("3"),
1995 should_panic: ShouldPanic::Yes,
1998 testfn: DynTestFn(Box::new(move || {})),
2001 let filtered = filter_tests(&opts, tests);
2003 assert_eq!(filtered.len(), 2);
2004 assert!(filtered.iter().all(|test| test.desc.should_panic == ShouldPanic::No));
2008 pub fn exact_filter_match() {
2009 fn tests() -> Vec<TestDescAndFn> {
2010 vec!["base", "base::test", "base::test1", "base::test2"]
2012 .map(|name| TestDescAndFn {
2014 name: StaticTestName(name),
2016 should_panic: ShouldPanic::No,
2019 testfn: DynTestFn(Box::new(move || {})),
2024 let substr = filter_tests(
2026 filter: Some("base".into()),
2031 assert_eq!(substr.len(), 4);
2033 let substr = filter_tests(
2035 filter: Some("bas".into()),
2040 assert_eq!(substr.len(), 4);
2042 let substr = filter_tests(
2044 filter: Some("::test".into()),
2049 assert_eq!(substr.len(), 3);
2051 let substr = filter_tests(
2053 filter: Some("base::test".into()),
2058 assert_eq!(substr.len(), 3);
2060 let exact = filter_tests(
2062 filter: Some("base".into()),
2068 assert_eq!(exact.len(), 1);
2070 let exact = filter_tests(
2072 filter: Some("bas".into()),
2078 assert_eq!(exact.len(), 0);
2080 let exact = filter_tests(
2082 filter: Some("::test".into()),
2088 assert_eq!(exact.len(), 0);
2090 let exact = filter_tests(
2092 filter: Some("base::test".into()),
2098 assert_eq!(exact.len(), 1);
2102 pub fn sort_tests() {
2103 let mut opts = TestOpts::new();
2104 opts.run_tests = true;
2107 "sha1::test".to_string(),
2108 "isize::test_to_str".to_string(),
2109 "isize::test_pow".to_string(),
2110 "test::do_not_run_ignored_tests".to_string(),
2111 "test::ignored_tests_result_in_ignored".to_string(),
2112 "test::first_free_arg_should_be_a_filter".to_string(),
2113 "test::parse_ignored_flag".to_string(),
2114 "test::parse_include_ignored_flag".to_string(),
2115 "test::filter_for_ignored_option".to_string(),
2116 "test::run_include_ignored_option".to_string(),
2117 "test::sort_tests".to_string(),
2121 let mut tests = Vec::new();
2122 for name in &names {
2123 let test = TestDescAndFn {
2125 name: DynTestName((*name).clone()),
2127 should_panic: ShouldPanic::No,
2130 testfn: DynTestFn(Box::new(testfn)),
2136 let filtered = filter_tests(&opts, tests);
2138 let expected = vec![
2139 "isize::test_pow".to_string(),
2140 "isize::test_to_str".to_string(),
2141 "sha1::test".to_string(),
2142 "test::do_not_run_ignored_tests".to_string(),
2143 "test::filter_for_ignored_option".to_string(),
2144 "test::first_free_arg_should_be_a_filter".to_string(),
2145 "test::ignored_tests_result_in_ignored".to_string(),
2146 "test::parse_ignored_flag".to_string(),
2147 "test::parse_include_ignored_flag".to_string(),
2148 "test::run_include_ignored_option".to_string(),
2149 "test::sort_tests".to_string(),
2152 for (a, b) in expected.iter().zip(filtered) {
2153 assert!(*a == b.desc.name.to_string());
2158 pub fn test_metricmap_compare() {
2159 let mut m1 = MetricMap::new();
2160 let mut m2 = MetricMap::new();
2161 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2162 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2164 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2165 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2167 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2168 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2170 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2171 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2173 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2174 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2176 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2177 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2181 pub fn test_bench_once_no_iter() {
2182 fn f(_: &mut Bencher) {}
2187 pub fn test_bench_once_iter() {
2188 fn f(b: &mut Bencher) {
2195 pub fn test_bench_no_iter() {
2196 fn f(_: &mut Bencher) {}
2198 let (tx, rx) = channel();
2200 let desc = TestDesc {
2201 name: StaticTestName("f"),
2203 should_panic: ShouldPanic::No,
2207 crate::bench::benchmark(desc, tx, true, f);
2212 pub fn test_bench_iter() {
2213 fn f(b: &mut Bencher) {
2217 let (tx, rx) = channel();
2219 let desc = TestDesc {
2220 name: StaticTestName("f"),
2222 should_panic: ShouldPanic::No,
2226 crate::bench::benchmark(desc, tx, true, f);