1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![deny(rust_2018_idioms)]
21 #![crate_name = "test"]
22 #![unstable(feature = "test", issue = "27812")]
23 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
26 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
28 #![feature(set_stdio)]
29 #![feature(panic_unwind)]
30 #![feature(staged_api)]
31 #![feature(termination_trait_lib)]
35 #[cfg(any(unix, target_os = "cloudabi"))]
39 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
40 // on aarch64-pc-windows-msvc, so we don't link libtest against
41 // libunwind (for the time being), even though it means that
42 // libtest won't be fully functional on this platform.
44 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
45 #[cfg(not(all(windows, target_arch = "aarch64")))]
46 extern crate panic_unwind;
48 pub use self::ColorConfig::*;
49 use self::NamePadding::*;
50 use self::OutputLocation::*;
51 use self::TestEvent::*;
52 pub use self::TestFn::*;
53 pub use self::TestName::*;
54 pub use self::TestResult::*;
58 use std::boxed::FnBox;
60 use std::collections::BTreeMap;
65 use std::io::prelude::*;
66 use std::panic::{catch_unwind, AssertUnwindSafe};
67 use std::path::PathBuf;
69 use std::process::Termination;
70 use std::sync::mpsc::{channel, Sender};
71 use std::sync::{Arc, Mutex};
73 use std::time::{Duration, Instant};
75 const TEST_WARN_TIMEOUT_S: u64 = 60;
76 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
78 // to be used by rustc to compile tests in libtest
81 assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
82 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
83 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestOpts,
84 TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
91 use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
93 /// Whether to execute tests concurrently or not
94 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
100 // The name of a test. By convention this follows the rules for rust
101 // paths; i.e., it should be a series of identifiers separated by double
102 // colons. This way if some test runner wants to arrange the tests
103 // hierarchically it may.
105 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
107 StaticTestName(&'static str),
109 AlignedTestName(Cow<'static, str>, NamePadding),
112 fn as_slice(&self) -> &str {
114 StaticTestName(s) => s,
115 DynTestName(ref s) => s,
116 AlignedTestName(ref s, _) => &*s,
120 fn padding(&self) -> NamePadding {
122 &AlignedTestName(_, p) => p,
127 fn with_padding(&self, padding: NamePadding) -> TestName {
128 let name = match self {
129 &TestName::StaticTestName(name) => Cow::Borrowed(name),
130 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
131 &TestName::AlignedTestName(ref name, _) => name.clone(),
134 TestName::AlignedTestName(name, padding)
137 impl fmt::Display for TestName {
138 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
139 fmt::Display::fmt(self.as_slice(), f)
143 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
144 pub enum NamePadding {
150 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
151 let mut name = String::from(self.name.as_slice());
152 let fill = column_count.saturating_sub(name.len());
153 let pad = " ".repeat(fill);
164 /// Represents a benchmark function.
165 pub trait TDynBenchFn: Send {
166 fn run(&self, harness: &mut Bencher);
169 // A function that runs a test. If the function returns successfully,
170 // the test succeeds; if the function panics then the test fails. We
171 // may need to come up with a more clever definition of test in order
172 // to support isolation of tests into threads.
175 StaticBenchFn(fn(&mut Bencher)),
176 DynTestFn(Box<dyn FnBox() + Send>),
177 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
181 fn padding(&self) -> NamePadding {
183 StaticTestFn(..) => PadNone,
184 StaticBenchFn(..) => PadOnRight,
185 DynTestFn(..) => PadNone,
186 DynBenchFn(..) => PadOnRight,
191 impl fmt::Debug for TestFn {
192 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
193 f.write_str(match *self {
194 StaticTestFn(..) => "StaticTestFn(..)",
195 StaticBenchFn(..) => "StaticBenchFn(..)",
196 DynTestFn(..) => "DynTestFn(..)",
197 DynBenchFn(..) => "DynBenchFn(..)",
202 /// Manager of the benchmarking runs.
204 /// This is fed into functions marked with `#[bench]` to allow for
205 /// set-up & tear-down before running a piece of code repeatedly via a
210 summary: Option<stats::Summary>,
214 #[derive(Clone, PartialEq, Eq)]
220 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
221 pub enum ShouldPanic {
224 YesWithMessage(&'static str),
227 // The definition of a single test. A test runner will run a list of
229 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
230 pub struct TestDesc {
233 pub should_panic: ShouldPanic,
234 pub allow_fail: bool,
238 pub struct TestDescAndFn {
243 #[derive(Clone, PartialEq, Debug, Copy)]
250 pub fn new(value: f64, noise: f64) -> Metric {
251 Metric { value, noise }
255 /// In case we want to add other options as well, just add them in this struct.
256 #[derive(Copy, Clone, Debug)]
258 display_output: bool,
262 pub fn new() -> Options {
264 display_output: false,
268 pub fn display_output(mut self, display_output: bool) -> Options {
269 self.display_output = display_output;
274 // The default console test runner. It accepts the command line
275 // arguments and a vector of test_descs.
276 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
277 let mut opts = match parse_opts(args) {
280 eprintln!("error: {}", msg);
286 opts.options = options;
288 if let Err(e) = list_tests_console(&opts, tests) {
289 eprintln!("error: io error when listing tests: {:?}", e);
293 match run_tests_console(&opts, tests) {
295 Ok(false) => process::exit(101),
297 eprintln!("error: io error when listing tests: {:?}", e);
304 // A variant optimized for invocation with a static test vector.
305 // This will panic (intentionally) when fed any dynamic tests, because
306 // it is copying the static values out into a dynamic vector and cannot
307 // copy dynamic values. It is doing this because from this point on
308 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
309 // semantics into parallel test runners, which in turn requires a Vec<>
310 // rather than a &[].
311 pub fn test_main_static(tests: &[&TestDescAndFn]) {
312 let args = env::args().collect::<Vec<_>>();
313 let owned_tests = tests
315 .map(|t| match t.testfn {
316 StaticTestFn(f) => TestDescAndFn {
317 testfn: StaticTestFn(f),
318 desc: t.desc.clone(),
320 StaticBenchFn(f) => TestDescAndFn {
321 testfn: StaticBenchFn(f),
322 desc: t.desc.clone(),
324 _ => panic!("non-static tests passed to test::test_main_static"),
327 test_main(&args, owned_tests, Options::new())
330 /// Invoked when unit tests terminate. Should panic if the unit
331 /// Tests is considered a failure. By default, invokes `report()`
332 /// and checks for a `0` result.
333 pub fn assert_test_result<T: Termination>(result: T) {
334 let code = result.report();
337 "the test returned a termination value with a non-zero status code ({}) \
338 which indicates a failure",
343 #[derive(Copy, Clone, Debug)]
344 pub enum ColorConfig {
350 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
351 pub enum OutputFormat {
357 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
358 pub enum RunIgnored {
365 pub struct TestOpts {
367 pub filter: Option<String>,
368 pub filter_exact: bool,
369 pub exclude_should_panic: bool,
370 pub run_ignored: RunIgnored,
372 pub bench_benchmarks: bool,
373 pub logfile: Option<PathBuf>,
375 pub color: ColorConfig,
376 pub format: OutputFormat,
377 pub test_threads: Option<usize>,
378 pub skip: Vec<String>,
379 pub options: Options,
384 fn new() -> TestOpts {
389 exclude_should_panic: false,
390 run_ignored: RunIgnored::No,
392 bench_benchmarks: false,
396 format: OutputFormat::Pretty,
399 options: Options::new(),
404 /// Result of parsing the options.
405 pub type OptRes = Result<TestOpts, String>;
407 fn optgroups() -> getopts::Options {
408 let mut opts = getopts::Options::new();
409 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
410 .optflag("", "ignored", "Run only ignored tests")
411 .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
412 .optflag("", "test", "Run tests and not benchmarks")
413 .optflag("", "bench", "Run benchmarks instead of tests")
414 .optflag("", "list", "List all tests and benchmarks")
415 .optflag("h", "help", "Display this message (longer with --help)")
419 "Write logs to the specified file instead \
426 "don't capture stdout/stderr of each \
427 task, allow printing directly",
432 "Number of threads used for running tests \
439 "Skip tests whose names contain FILTER (this flag can \
440 be used multiple times)",
446 "Display one character per test instead of one line. \
447 Alias to --format=terse",
452 "Exactly match filters rather than by substring",
457 "Configure coloring of output:
458 auto = colorize if stdout is a tty and tests are run on serially (default);
459 always = always colorize output;
460 never = never colorize output;",
466 "Configure formatting of output:
467 pretty = Print verbose output;
468 terse = Display one character per test;
469 json = Output a json document",
475 "Enable nightly-only flags:
476 unstable-options = Allow use of experimental features",
482 fn usage(binary: &str, options: &getopts::Options) {
483 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
487 The FILTER string is tested against the name of all tests, and only those
488 tests whose names contain the filter are run.
490 By default, all tests are run in parallel. This can be altered with the
491 --test-threads flag or the RUST_TEST_THREADS environment variable when running
494 All tests have their standard output and standard error captured by default.
495 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
496 environment variable to a value other than "0". Logging is not captured by default.
500 #[test] - Indicates a function is a test to be run. This function
502 #[bench] - Indicates a function is a benchmark to be run. This
503 function takes one argument (test::Bencher).
504 #[should_panic] - This function (also labeled with #[test]) will only pass if
505 the code causes a panic (an assertion failure or panic!)
506 A message may be provided, which the failure string must
507 contain: #[should_panic(expected = "foo")].
508 #[ignore] - When applied to a function which is already attributed as a
509 test, then the test runner will ignore these tests during
510 normal test runs. Running with --ignored or --include-ignored will run
512 usage = options.usage(&message)
516 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
517 fn is_nightly() -> bool {
518 // Whether this is a feature-staged build, i.e., on the beta or stable channel
519 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
520 // Whether we should enable unstable features for bootstrapping
521 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
523 bootstrap || !disable_unstable_features
526 // Parses command line arguments into test options
527 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
528 let mut allow_unstable = false;
529 let opts = optgroups();
530 let args = args.get(1..).unwrap_or(args);
531 let matches = match opts.parse(args) {
533 Err(f) => return Some(Err(f.to_string())),
536 if let Some(opt) = matches.opt_str("Z") {
539 "the option `Z` is only accepted on the nightly compiler".into(),
544 "unstable-options" => {
545 allow_unstable = true;
548 return Some(Err("Unrecognized option to `Z`".into()));
553 if matches.opt_present("h") {
554 usage(&args[0], &opts);
558 let filter = if !matches.free.is_empty() {
559 Some(matches.free[0].clone())
564 let exclude_should_panic = matches.opt_present("exclude-should-panic");
565 if !allow_unstable && exclude_should_panic {
567 "The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(),
571 let include_ignored = matches.opt_present("include-ignored");
572 if !allow_unstable && include_ignored {
574 "The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
578 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
581 "the options --include-ignored and --ignored are mutually exclusive".into(),
584 (true, false) => RunIgnored::Yes,
585 (false, true) => RunIgnored::Only,
586 (false, false) => RunIgnored::No,
588 let quiet = matches.opt_present("quiet");
589 let exact = matches.opt_present("exact");
590 let list = matches.opt_present("list");
592 let logfile = matches.opt_str("logfile");
593 let logfile = logfile.map(|s| PathBuf::from(&s));
595 let bench_benchmarks = matches.opt_present("bench");
596 let run_tests = !bench_benchmarks || matches.opt_present("test");
598 let mut nocapture = matches.opt_present("nocapture");
600 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
601 Ok(val) => &val != "0",
606 let test_threads = match matches.opt_str("test-threads") {
607 Some(n_str) => match n_str.parse::<usize>() {
608 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
611 return Some(Err(format!(
612 "argument for --test-threads must be a number > 0 \
621 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
622 Some("auto") | None => AutoColor,
623 Some("always") => AlwaysColor,
624 Some("never") => NeverColor,
627 return Some(Err(format!(
628 "argument for --color must be auto, always, or never (was \
635 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
636 None if quiet => OutputFormat::Terse,
637 Some("pretty") | None => OutputFormat::Pretty,
638 Some("terse") => OutputFormat::Terse,
642 "The \"json\" format is only accepted on the nightly compiler".into(),
649 return Some(Err(format!(
650 "argument for --format must be pretty, terse, or json (was \
657 let test_opts = TestOpts {
661 exclude_should_panic,
670 skip: matches.opt_strs("skip"),
671 options: Options::new(),
677 #[derive(Clone, PartialEq)]
678 pub struct BenchSamples {
679 ns_iter_summ: stats::Summary,
683 #[derive(Clone, PartialEq)]
684 pub enum TestResult {
690 TrBench(BenchSamples),
693 unsafe impl Send for TestResult {}
695 enum OutputLocation<T> {
696 Pretty(Box<term::StdoutTerminal>),
700 impl<T: Write> Write for OutputLocation<T> {
701 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
703 Pretty(ref mut term) => term.write(buf),
704 Raw(ref mut stdout) => stdout.write(buf),
708 fn flush(&mut self) -> io::Result<()> {
710 Pretty(ref mut term) => term.flush(),
711 Raw(ref mut stdout) => stdout.flush(),
716 struct ConsoleTestState {
717 log_out: Option<File>,
726 failures: Vec<(TestDesc, Vec<u8>)>,
727 not_failures: Vec<(TestDesc, Vec<u8>)>,
731 impl ConsoleTestState {
732 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
733 let log_out = match opts.logfile {
734 Some(ref path) => Some(File::create(path)?),
738 Ok(ConsoleTestState {
747 metrics: MetricMap::new(),
748 failures: Vec::new(),
749 not_failures: Vec::new(),
750 options: opts.options,
754 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
755 let msg = msg.as_ref();
758 Some(ref mut o) => o.write_all(msg.as_bytes()),
762 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
763 self.write_log(format!(
766 TrOk => "ok".to_owned(),
767 TrFailed => "failed".to_owned(),
768 TrFailedMsg(ref msg) => format!("failed: {}", msg),
769 TrIgnored => "ignored".to_owned(),
770 TrAllowedFail => "failed (allowed)".to_owned(),
771 TrBench(ref bs) => fmt_bench_samples(bs),
777 fn current_test_count(&self) -> usize {
778 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
782 // Format a number with thousands separators
783 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
785 let mut output = String::new();
786 let mut trailing = false;
787 for &pow in &[9, 6, 3, 0] {
788 let base = 10_usize.pow(pow);
789 if pow == 0 || trailing || n / base != 0 {
791 output.write_fmt(format_args!("{}", n / base)).unwrap();
793 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
806 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
808 let mut output = String::new();
810 let median = bs.ns_iter_summ.median as usize;
811 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
814 .write_fmt(format_args!(
815 "{:>11} ns/iter (+/- {})",
816 fmt_thousands_sep(median, ','),
817 fmt_thousands_sep(deviation, ',')
822 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
828 // List the tests to console, and optionally to logfile. Filters are honored.
829 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
830 let mut output = match term::stdout() {
831 None => Raw(io::stdout()),
832 Some(t) => Pretty(t),
835 let quiet = opts.format == OutputFormat::Terse;
836 let mut st = ConsoleTestState::new(opts)?;
841 for test in filter_tests(&opts, tests) {
842 use crate::TestFn::*;
845 desc: TestDesc { name, .. },
849 let fntype = match testfn {
850 StaticTestFn(..) | DynTestFn(..) => {
854 StaticBenchFn(..) | DynBenchFn(..) => {
860 writeln!(output, "{}: {}", name, fntype)?;
861 st.write_log(format!("{} {}\n", fntype, name))?;
864 fn plural(count: u32, s: &str) -> String {
866 1 => format!("{} {}", 1, s),
867 n => format!("{} {}s", n, s),
872 if ntest != 0 || nbench != 0 {
873 writeln!(output, "")?;
879 plural(ntest, "test"),
880 plural(nbench, "benchmark")
887 // A simple console test runner
888 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
891 st: &mut ConsoleTestState,
892 out: &mut dyn OutputFormatter,
893 ) -> io::Result<()> {
894 match (*event).clone() {
895 TeFiltered(ref filtered_tests) => {
896 st.total = filtered_tests.len();
897 out.write_run_start(filtered_tests.len())
899 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
900 TeWait(ref test) => out.write_test_start(test),
901 TeTimeout(ref test) => out.write_timeout(test),
902 TeResult(test, result, stdout) => {
903 st.write_log_result(&test, &result)?;
904 out.write_result(&test, &result, &*stdout)?;
908 st.not_failures.push((test, stdout));
910 TrIgnored => st.ignored += 1,
911 TrAllowedFail => st.allowed_fail += 1,
913 st.metrics.insert_metric(
914 test.name.as_slice(),
915 bs.ns_iter_summ.median,
916 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
922 st.failures.push((test, stdout));
924 TrFailedMsg(msg) => {
926 let mut stdout = stdout;
927 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
928 st.failures.push((test, stdout));
936 let output = match term::stdout() {
937 None => Raw(io::stdout()),
938 Some(t) => Pretty(t),
941 let max_name_len = tests
943 .max_by_key(|t| len_if_padded(*t))
944 .map(|t| t.desc.name.as_slice().len())
947 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
949 let mut out: Box<dyn OutputFormatter> = match opts.format {
950 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
956 OutputFormat::Terse => Box::new(TerseFormatter::new(
962 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
964 let mut st = ConsoleTestState::new(opts)?;
965 fn len_if_padded(t: &TestDescAndFn) -> usize {
966 match t.testfn.padding() {
968 PadOnRight => t.desc.name.as_slice().len(),
972 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
974 assert!(st.current_test_count() == st.total);
976 return out.write_run_finish(&st);
980 fn should_sort_failures_before_printing_them() {
981 let test_a = TestDesc {
982 name: StaticTestName("a"),
984 should_panic: ShouldPanic::No,
988 let test_b = TestDesc {
989 name: StaticTestName("b"),
991 should_panic: ShouldPanic::No,
995 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
997 let st = ConsoleTestState {
1006 metrics: MetricMap::new(),
1007 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
1008 options: Options::new(),
1009 not_failures: Vec::new(),
1012 out.write_failures(&st).unwrap();
1013 let s = match out.output_location() {
1014 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1015 &Pretty(_) => unreachable!(),
1018 let apos = s.find("a").unwrap();
1019 let bpos = s.find("b").unwrap();
1020 assert!(apos < bpos);
1023 fn use_color(opts: &TestOpts) -> bool {
1025 AutoColor => !opts.nocapture && stdout_isatty(),
1026 AlwaysColor => true,
1027 NeverColor => false,
1032 target_os = "cloudabi",
1033 target_os = "redox",
1034 all(target_arch = "wasm32", not(target_os = "emscripten")),
1035 all(target_vendor = "fortanix", target_env = "sgx")
1037 fn stdout_isatty() -> bool {
1038 // FIXME: Implement isatty on Redox and SGX
1042 fn stdout_isatty() -> bool {
1043 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1046 fn stdout_isatty() -> bool {
1049 type HANDLE = *mut u8;
1050 type LPDWORD = *mut u32;
1051 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1053 fn GetStdHandle(which: DWORD) -> HANDLE;
1054 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1057 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1059 GetConsoleMode(handle, &mut out) != 0
1064 pub enum TestEvent {
1065 TeFiltered(Vec<TestDesc>),
1067 TeResult(TestDesc, TestResult, Vec<u8>),
1068 TeTimeout(TestDesc),
1069 TeFilteredOut(usize),
1072 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1074 struct Sink(Arc<Mutex<Vec<u8>>>);
1075 impl Write for Sink {
1076 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1077 Write::write(&mut *self.0.lock().unwrap(), data)
1079 fn flush(&mut self) -> io::Result<()> {
1084 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1086 F: FnMut(TestEvent) -> io::Result<()>,
1088 use std::collections::{self, HashMap};
1089 use std::hash::BuildHasherDefault;
1090 use std::sync::mpsc::RecvTimeoutError;
1091 // Use a deterministic hasher
1093 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1095 let tests_len = tests.len();
1097 let mut filtered_tests = filter_tests(opts, tests);
1098 if !opts.bench_benchmarks {
1099 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1102 let filtered_tests = {
1103 let mut filtered_tests = filtered_tests;
1104 for test in filtered_tests.iter_mut() {
1105 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1111 let filtered_out = tests_len - filtered_tests.len();
1112 callback(TeFilteredOut(filtered_out))?;
1114 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1116 callback(TeFiltered(filtered_descs))?;
1118 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1119 filtered_tests.into_iter().partition(|e| match e.testfn {
1120 StaticTestFn(_) | DynTestFn(_) => true,
1124 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1126 let mut remaining = filtered_tests;
1127 remaining.reverse();
1128 let mut pending = 0;
1130 let (tx, rx) = channel::<MonitorMsg>();
1132 let mut running_tests: TestMap = HashMap::default();
1134 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1135 let now = Instant::now();
1136 let timed_out = running_tests
1138 .filter_map(|(desc, timeout)| {
1139 if &now >= timeout {
1146 for test in &timed_out {
1147 running_tests.remove(test);
1152 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1153 running_tests.values().min().map(|next_timeout| {
1154 let now = Instant::now();
1155 if *next_timeout >= now {
1163 if concurrency == 1 {
1164 while !remaining.is_empty() {
1165 let test = remaining.pop().unwrap();
1166 callback(TeWait(test.desc.clone()))?;
1167 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1168 let (test, result, stdout) = rx.recv().unwrap();
1169 callback(TeResult(test, result, stdout))?;
1172 while pending > 0 || !remaining.is_empty() {
1173 while pending < concurrency && !remaining.is_empty() {
1174 let test = remaining.pop().unwrap();
1175 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1176 running_tests.insert(test.desc.clone(), timeout);
1177 callback(TeWait(test.desc.clone()))?; //here no pad
1178 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1184 if let Some(timeout) = calc_timeout(&running_tests) {
1185 res = rx.recv_timeout(timeout);
1186 for test in get_timed_out_tests(&mut running_tests) {
1187 callback(TeTimeout(test))?;
1189 if res != Err(RecvTimeoutError::Timeout) {
1193 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1198 let (desc, result, stdout) = res.unwrap();
1199 running_tests.remove(&desc);
1201 callback(TeResult(desc, result, stdout))?;
1206 if opts.bench_benchmarks {
1207 // All benchmarks run at the end, in serial.
1208 for b in filtered_benchs {
1209 callback(TeWait(b.desc.clone()))?;
1210 run_test(opts, false, b, tx.clone(), Concurrent::No);
1211 let (test, result, stdout) = rx.recv().unwrap();
1212 callback(TeResult(test, result, stdout))?;
1218 #[allow(deprecated)]
1219 fn get_concurrency() -> usize {
1220 return match env::var("RUST_TEST_THREADS") {
1222 let opt_n: Option<usize> = s.parse().ok();
1224 Some(n) if n > 0 => n,
1226 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1231 Err(..) => num_cpus(),
1235 #[allow(nonstandard_style)]
1236 fn num_cpus() -> usize {
1238 struct SYSTEM_INFO {
1239 wProcessorArchitecture: u16,
1242 lpMinimumApplicationAddress: *mut u8,
1243 lpMaximumApplicationAddress: *mut u8,
1244 dwActiveProcessorMask: *mut u8,
1245 dwNumberOfProcessors: u32,
1246 dwProcessorType: u32,
1247 dwAllocationGranularity: u32,
1248 wProcessorLevel: u16,
1249 wProcessorRevision: u16,
1252 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1255 let mut sysinfo = std::mem::zeroed();
1256 GetSystemInfo(&mut sysinfo);
1257 sysinfo.dwNumberOfProcessors as usize
1261 #[cfg(target_os = "redox")]
1262 fn num_cpus() -> usize {
1263 // FIXME: Implement num_cpus on Redox
1268 all(target_arch = "wasm32", not(target_os = "emscripten")),
1269 all(target_vendor = "fortanix", target_env = "sgx")
1271 fn num_cpus() -> usize {
1276 target_os = "android",
1277 target_os = "cloudabi",
1278 target_os = "emscripten",
1279 target_os = "fuchsia",
1281 target_os = "linux",
1282 target_os = "macos",
1283 target_os = "solaris"
1285 fn num_cpus() -> usize {
1286 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1290 target_os = "freebsd",
1291 target_os = "dragonfly",
1292 target_os = "netbsd"
1294 fn num_cpus() -> usize {
1297 let mut cpus: libc::c_uint = 0;
1298 let mut cpus_size = std::mem::size_of_val(&cpus);
1301 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1304 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1309 &mut cpus as *mut _ as *mut _,
1310 &mut cpus_size as *mut _ as *mut _,
1322 #[cfg(target_os = "openbsd")]
1323 fn num_cpus() -> usize {
1326 let mut cpus: libc::c_uint = 0;
1327 let mut cpus_size = std::mem::size_of_val(&cpus);
1328 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1334 &mut cpus as *mut _ as *mut _,
1335 &mut cpus_size as *mut _ as *mut _,
1346 #[cfg(target_os = "haiku")]
1347 fn num_cpus() -> usize {
1352 #[cfg(target_os = "l4re")]
1353 fn num_cpus() -> usize {
1359 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1360 let mut filtered = tests;
1361 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1362 let test_name = test.desc.name.as_slice();
1364 match opts.filter_exact {
1365 true => test_name == filter,
1366 false => test_name.contains(filter),
1370 // Remove tests that don't match the test filter
1371 if let Some(ref filter) = opts.filter {
1372 filtered.retain(|test| matches_filter(test, filter));
1375 // Skip tests that match any of the skip filters
1376 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
1378 // Excludes #[should_panic] tests
1379 if opts.exclude_should_panic {
1380 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
1383 // maybe unignore tests
1384 match opts.run_ignored {
1385 RunIgnored::Yes => {
1388 .for_each(|test| test.desc.ignore = false);
1390 RunIgnored::Only => {
1391 filtered.retain(|test| test.desc.ignore);
1394 .for_each(|test| test.desc.ignore = false);
1396 RunIgnored::No => {}
1399 // Sort the tests alphabetically
1400 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1405 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1406 // convert benchmarks to tests, if we're not benchmarking them
1410 let testfn = match x.testfn {
1411 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1412 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1414 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1415 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1430 test: TestDescAndFn,
1431 monitor_ch: Sender<MonitorMsg>,
1432 concurrency: Concurrent,
1434 let TestDescAndFn { desc, testfn } = test;
1436 let ignore_because_panic_abort = cfg!(target_arch = "wasm32")
1437 && !cfg!(target_os = "emscripten")
1438 && desc.should_panic != ShouldPanic::No;
1440 if force_ignore || desc.ignore || ignore_because_panic_abort {
1441 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1447 monitor_ch: Sender<MonitorMsg>,
1449 testfn: Box<dyn FnBox() + Send>,
1450 concurrency: Concurrent,
1452 // Buffer for capturing standard I/O
1453 let data = Arc::new(Mutex::new(Vec::new()));
1454 let data2 = data.clone();
1456 let name = desc.name.clone();
1457 let runtest = move || {
1458 let oldio = if !nocapture {
1460 io::set_print(Some(Box::new(Sink(data2.clone())))),
1461 io::set_panic(Some(Box::new(Sink(data2)))),
1467 let result = catch_unwind(AssertUnwindSafe(testfn));
1469 if let Some((printio, panicio)) = oldio {
1470 io::set_print(printio);
1471 io::set_panic(panicio);
1474 let test_result = calc_result(&desc, result);
1475 let stdout = data.lock().unwrap().to_vec();
1477 .send((desc.clone(), test_result, stdout))
1481 // If the platform is single-threaded we're just going to run
1482 // the test synchronously, regardless of the concurrency
1484 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1485 if concurrency == Concurrent::Yes && supports_threads {
1486 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1487 cfg.spawn(runtest).unwrap();
1494 DynBenchFn(bencher) => {
1495 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1496 bencher.run(harness)
1499 StaticBenchFn(benchfn) => {
1500 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1501 (benchfn.clone())(harness)
1505 let cb = move || __rust_begin_short_backtrace(f);
1506 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1508 StaticTestFn(f) => run_test_inner(
1512 Box::new(move || __rust_begin_short_backtrace(f)),
1518 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1520 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1524 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1525 match (&desc.should_panic, task_result) {
1526 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1527 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1529 .downcast_ref::<String>()
1531 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1532 .map(|e| e.contains(msg))
1537 if desc.allow_fail {
1540 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1544 _ if desc.allow_fail => TrAllowedFail,
1549 #[derive(Clone, PartialEq)]
1550 pub struct MetricMap(BTreeMap<String, Metric>);
1553 pub fn new() -> MetricMap {
1554 MetricMap(BTreeMap::new())
1557 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1558 /// must be non-negative. The `noise` indicates the uncertainty of the
1559 /// metric, which doubles as the "noise range" of acceptable
1560 /// pairwise-regressions on this named value, when comparing from one
1561 /// metric to the next using `compare_to_old`.
1563 /// If `noise` is positive, then it means this metric is of a value
1564 /// you want to see grow smaller, so a change larger than `noise` in the
1565 /// positive direction represents a regression.
1567 /// If `noise` is negative, then it means this metric is of a value
1568 /// you want to see grow larger, so a change larger than `noise` in the
1569 /// negative direction represents a regression.
1570 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1571 let m = Metric { value, noise };
1572 self.0.insert(name.to_owned(), m);
1575 pub fn fmt_metrics(&self) -> String {
1579 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1580 .collect::<Vec<_>>();
1587 pub use std::hint::black_box;
1590 /// Callback for benchmark functions to run in their body.
1591 pub fn iter<T, F>(&mut self, mut inner: F)
1595 if self.mode == BenchMode::Single {
1596 ns_iter_inner(&mut inner, 1);
1600 self.summary = Some(iter(&mut inner));
1603 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1605 F: FnMut(&mut Bencher),
1608 return self.summary;
1612 fn ns_from_dur(dur: Duration) -> u64 {
1613 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1616 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1620 let start = Instant::now();
1624 return ns_from_dur(start.elapsed());
1627 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1631 // Initial bench run to get ballpark figure.
1632 let ns_single = ns_iter_inner(inner, 1);
1634 // Try to estimate iter count for 1ms falling back to 1m
1635 // iterations if first run took < 1ns.
1636 let ns_target_total = 1_000_000; // 1ms
1637 let mut n = ns_target_total / cmp::max(1, ns_single);
1639 // if the first run took more than 1ms we don't want to just
1640 // be left doing 0 iterations on every loop. The unfortunate
1641 // side effect of not being able to do as many runs is
1642 // automatically handled by the statistical analysis below
1643 // (i.e., larger error bars).
1646 let mut total_run = Duration::new(0, 0);
1647 let samples: &mut [f64] = &mut [0.0_f64; 50];
1649 let loop_start = Instant::now();
1651 for p in &mut *samples {
1652 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1655 stats::winsorize(samples, 5.0);
1656 let summ = stats::Summary::new(samples);
1658 for p in &mut *samples {
1659 let ns = ns_iter_inner(inner, 5 * n);
1660 *p = ns as f64 / (5 * n) as f64;
1663 stats::winsorize(samples, 5.0);
1664 let summ5 = stats::Summary::new(samples);
1666 let loop_run = loop_start.elapsed();
1668 // If we've run for 100ms and seem to have converged to a
1670 if loop_run > Duration::from_millis(100)
1671 && summ.median_abs_dev_pct < 1.0
1672 && summ.median - summ5.median < summ5.median_abs_dev
1677 total_run = total_run + loop_run;
1678 // Longest we ever run for is 3s.
1679 if total_run > Duration::from_secs(3) {
1683 // If we overflow here just return the results so far. We check a
1684 // multiplier of 10 because we're about to multiply by 2 and the
1685 // next iteration of the loop will also multiply by 5 (to calculate
1686 // the summ5 result)
1687 n = match n.checked_mul(10) {
1697 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1701 use std::panic::{catch_unwind, AssertUnwindSafe};
1702 use std::sync::{Arc, Mutex};
1704 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1706 F: FnMut(&mut Bencher),
1708 let mut bs = Bencher {
1709 mode: BenchMode::Auto,
1714 let data = Arc::new(Mutex::new(Vec::new()));
1715 let data2 = data.clone();
1717 let oldio = if !nocapture {
1719 io::set_print(Some(Box::new(Sink(data2.clone())))),
1720 io::set_panic(Some(Box::new(Sink(data2)))),
1726 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1728 if let Some((printio, panicio)) = oldio {
1729 io::set_print(printio);
1730 io::set_panic(panicio);
1733 let test_result = match result {
1735 Ok(Some(ns_iter_summ)) => {
1736 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1737 let mb_s = bs.bytes * 1000 / ns_iter;
1739 let bs = BenchSamples {
1741 mb_s: mb_s as usize,
1743 TestResult::TrBench(bs)
1746 // iter not called, so no data.
1747 // FIXME: error in this case?
1748 let samples: &mut [f64] = &mut [0.0_f64; 1];
1749 let bs = BenchSamples {
1750 ns_iter_summ: stats::Summary::new(samples),
1753 TestResult::TrBench(bs)
1755 Err(_) => TestResult::TrFailed,
1758 let stdout = data.lock().unwrap().to_vec();
1759 monitor_ch.send((desc, test_result, stdout)).unwrap();
1762 pub fn run_once<F>(f: F)
1764 F: FnMut(&mut Bencher),
1766 let mut bs = Bencher {
1767 mode: BenchMode::Single,
1779 filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
1780 ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1784 use crate::Concurrent;
1785 use std::sync::mpsc::channel;
1787 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
1791 name: StaticTestName("1"),
1793 should_panic: ShouldPanic::No,
1796 testfn: DynTestFn(Box::new(move || {})),
1800 name: StaticTestName("2"),
1802 should_panic: ShouldPanic::No,
1805 testfn: DynTestFn(Box::new(move || {})),
1811 pub fn do_not_run_ignored_tests() {
1815 let desc = TestDescAndFn {
1817 name: StaticTestName("whatever"),
1819 should_panic: ShouldPanic::No,
1822 testfn: DynTestFn(Box::new(f)),
1824 let (tx, rx) = channel();
1825 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1826 let (_, res, _) = rx.recv().unwrap();
1827 assert!(res != TrOk);
1831 pub fn ignored_tests_result_in_ignored() {
1833 let desc = TestDescAndFn {
1835 name: StaticTestName("whatever"),
1837 should_panic: ShouldPanic::No,
1840 testfn: DynTestFn(Box::new(f)),
1842 let (tx, rx) = channel();
1843 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1844 let (_, res, _) = rx.recv().unwrap();
1845 assert!(res == TrIgnored);
1849 fn test_should_panic() {
1853 let desc = TestDescAndFn {
1855 name: StaticTestName("whatever"),
1857 should_panic: ShouldPanic::Yes,
1860 testfn: DynTestFn(Box::new(f)),
1862 let (tx, rx) = channel();
1863 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1864 let (_, res, _) = rx.recv().unwrap();
1865 assert!(res == TrOk);
1869 fn test_should_panic_good_message() {
1871 panic!("an error message");
1873 let desc = TestDescAndFn {
1875 name: StaticTestName("whatever"),
1877 should_panic: ShouldPanic::YesWithMessage("error message"),
1880 testfn: DynTestFn(Box::new(f)),
1882 let (tx, rx) = channel();
1883 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1884 let (_, res, _) = rx.recv().unwrap();
1885 assert!(res == TrOk);
1889 fn test_should_panic_bad_message() {
1891 panic!("an error message");
1893 let expected = "foobar";
1894 let failed_msg = "Panic did not include expected string";
1895 let desc = TestDescAndFn {
1897 name: StaticTestName("whatever"),
1899 should_panic: ShouldPanic::YesWithMessage(expected),
1902 testfn: DynTestFn(Box::new(f)),
1904 let (tx, rx) = channel();
1905 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1906 let (_, res, _) = rx.recv().unwrap();
1907 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1911 fn test_should_panic_but_succeeds() {
1913 let desc = TestDescAndFn {
1915 name: StaticTestName("whatever"),
1917 should_panic: ShouldPanic::Yes,
1920 testfn: DynTestFn(Box::new(f)),
1922 let (tx, rx) = channel();
1923 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1924 let (_, res, _) = rx.recv().unwrap();
1925 assert!(res == TrFailed);
1929 fn parse_ignored_flag() {
1931 "progname".to_string(),
1932 "filter".to_string(),
1933 "--ignored".to_string(),
1935 let opts = parse_opts(&args).unwrap().unwrap();
1936 assert_eq!(opts.run_ignored, RunIgnored::Only);
1940 fn parse_include_ignored_flag() {
1942 "progname".to_string(),
1943 "filter".to_string(),
1944 "-Zunstable-options".to_string(),
1945 "--include-ignored".to_string(),
1947 let opts = parse_opts(&args).unwrap().unwrap();
1948 assert_eq!(opts.run_ignored, RunIgnored::Yes);
1952 pub fn filter_for_ignored_option() {
1953 // When we run ignored tests the test filter should filter out all the
1954 // unignored tests and flip the ignore flag on the rest to false
1956 let mut opts = TestOpts::new();
1957 opts.run_tests = true;
1958 opts.run_ignored = RunIgnored::Only;
1960 let tests = one_ignored_one_unignored_test();
1961 let filtered = filter_tests(&opts, tests);
1963 assert_eq!(filtered.len(), 1);
1964 assert_eq!(filtered[0].desc.name.to_string(), "1");
1965 assert!(!filtered[0].desc.ignore);
1969 pub fn run_include_ignored_option() {
1970 // When we "--include-ignored" tests, the ignore flag should be set to false on
1971 // all tests and no test filtered out
1973 let mut opts = TestOpts::new();
1974 opts.run_tests = true;
1975 opts.run_ignored = RunIgnored::Yes;
1977 let tests = one_ignored_one_unignored_test();
1978 let filtered = filter_tests(&opts, tests);
1980 assert_eq!(filtered.len(), 2);
1981 assert!(!filtered[0].desc.ignore);
1982 assert!(!filtered[1].desc.ignore);
1986 pub fn exclude_should_panic_option() {
1987 let mut opts = TestOpts::new();
1988 opts.run_tests = true;
1989 opts.exclude_should_panic = true;
1991 let mut tests = one_ignored_one_unignored_test();
1992 tests.push(TestDescAndFn {
1994 name: StaticTestName("3"),
1996 should_panic: ShouldPanic::Yes,
1999 testfn: DynTestFn(Box::new(move || {})),
2002 let filtered = filter_tests(&opts, tests);
2004 assert_eq!(filtered.len(), 2);
2005 assert!(filtered.iter().all(|test| test.desc.should_panic == ShouldPanic::No));
2009 pub fn exact_filter_match() {
2010 fn tests() -> Vec<TestDescAndFn> {
2011 vec!["base", "base::test", "base::test1", "base::test2"]
2013 .map(|name| TestDescAndFn {
2015 name: StaticTestName(name),
2017 should_panic: ShouldPanic::No,
2020 testfn: DynTestFn(Box::new(move || {})),
2025 let substr = filter_tests(
2027 filter: Some("base".into()),
2032 assert_eq!(substr.len(), 4);
2034 let substr = filter_tests(
2036 filter: Some("bas".into()),
2041 assert_eq!(substr.len(), 4);
2043 let substr = filter_tests(
2045 filter: Some("::test".into()),
2050 assert_eq!(substr.len(), 3);
2052 let substr = filter_tests(
2054 filter: Some("base::test".into()),
2059 assert_eq!(substr.len(), 3);
2061 let exact = filter_tests(
2063 filter: Some("base".into()),
2069 assert_eq!(exact.len(), 1);
2071 let exact = filter_tests(
2073 filter: Some("bas".into()),
2079 assert_eq!(exact.len(), 0);
2081 let exact = filter_tests(
2083 filter: Some("::test".into()),
2089 assert_eq!(exact.len(), 0);
2091 let exact = filter_tests(
2093 filter: Some("base::test".into()),
2099 assert_eq!(exact.len(), 1);
2103 pub fn sort_tests() {
2104 let mut opts = TestOpts::new();
2105 opts.run_tests = true;
2108 "sha1::test".to_string(),
2109 "isize::test_to_str".to_string(),
2110 "isize::test_pow".to_string(),
2111 "test::do_not_run_ignored_tests".to_string(),
2112 "test::ignored_tests_result_in_ignored".to_string(),
2113 "test::first_free_arg_should_be_a_filter".to_string(),
2114 "test::parse_ignored_flag".to_string(),
2115 "test::parse_include_ignored_flag".to_string(),
2116 "test::filter_for_ignored_option".to_string(),
2117 "test::run_include_ignored_option".to_string(),
2118 "test::sort_tests".to_string(),
2122 let mut tests = Vec::new();
2123 for name in &names {
2124 let test = TestDescAndFn {
2126 name: DynTestName((*name).clone()),
2128 should_panic: ShouldPanic::No,
2131 testfn: DynTestFn(Box::new(testfn)),
2137 let filtered = filter_tests(&opts, tests);
2139 let expected = vec![
2140 "isize::test_pow".to_string(),
2141 "isize::test_to_str".to_string(),
2142 "sha1::test".to_string(),
2143 "test::do_not_run_ignored_tests".to_string(),
2144 "test::filter_for_ignored_option".to_string(),
2145 "test::first_free_arg_should_be_a_filter".to_string(),
2146 "test::ignored_tests_result_in_ignored".to_string(),
2147 "test::parse_ignored_flag".to_string(),
2148 "test::parse_include_ignored_flag".to_string(),
2149 "test::run_include_ignored_option".to_string(),
2150 "test::sort_tests".to_string(),
2153 for (a, b) in expected.iter().zip(filtered) {
2154 assert!(*a == b.desc.name.to_string());
2159 pub fn test_metricmap_compare() {
2160 let mut m1 = MetricMap::new();
2161 let mut m2 = MetricMap::new();
2162 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2163 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2165 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2166 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2168 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2169 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2171 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2172 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2174 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2175 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2177 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2178 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2182 pub fn test_bench_once_no_iter() {
2183 fn f(_: &mut Bencher) {}
2188 pub fn test_bench_once_iter() {
2189 fn f(b: &mut Bencher) {
2196 pub fn test_bench_no_iter() {
2197 fn f(_: &mut Bencher) {}
2199 let (tx, rx) = channel();
2201 let desc = TestDesc {
2202 name: StaticTestName("f"),
2204 should_panic: ShouldPanic::No,
2208 crate::bench::benchmark(desc, tx, true, f);
2213 pub fn test_bench_iter() {
2214 fn f(b: &mut Bencher) {
2218 let (tx, rx) = channel();
2220 let desc = TestDesc {
2221 name: StaticTestName("f"),
2223 should_panic: ShouldPanic::No,
2227 crate::bench::benchmark(desc, tx, true, f);