1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![deny(rust_2018_idioms)]
21 #![crate_name = "test"]
22 #![unstable(feature = "test", issue = "27812")]
23 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
26 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
28 #![feature(set_stdio)]
29 #![feature(panic_unwind)]
30 #![feature(staged_api)]
31 #![feature(termination_trait_lib)]
35 #[cfg(any(unix, target_os = "cloudabi"))]
39 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
40 // on aarch64-pc-windows-msvc, or thumbv7a-pc-windows-msvc, so we don't link libtest against
41 // libunwind (for the time being), even though it means that
42 // libtest won't be fully functional on these platforms.
44 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
45 #[cfg(not(any(all(windows, target_arch = "aarch64"), all(windows, target_arch = "arm"))))]
46 extern crate panic_unwind;
48 pub use self::ColorConfig::*;
49 use self::NamePadding::*;
50 use self::OutputLocation::*;
51 use self::TestEvent::*;
52 pub use self::TestFn::*;
53 pub use self::TestName::*;
54 pub use self::TestResult::*;
58 use std::boxed::FnBox;
60 use std::collections::BTreeMap;
65 use std::io::prelude::*;
66 use std::panic::{catch_unwind, AssertUnwindSafe};
67 use std::path::PathBuf;
69 use std::process::Termination;
70 use std::sync::mpsc::{channel, Sender};
71 use std::sync::{Arc, Mutex};
73 use std::time::{Duration, Instant};
75 const TEST_WARN_TIMEOUT_S: u64 = 60;
76 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
78 // to be used by rustc to compile tests in libtest
81 assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
82 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
83 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestOpts,
84 TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
91 use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
93 /// Whether to execute tests concurrently or not
94 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
100 // The name of a test. By convention this follows the rules for rust
101 // paths; i.e., it should be a series of identifiers separated by double
102 // colons. This way if some test runner wants to arrange the tests
103 // hierarchically it may.
105 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
107 StaticTestName(&'static str),
109 AlignedTestName(Cow<'static, str>, NamePadding),
112 fn as_slice(&self) -> &str {
114 StaticTestName(s) => s,
115 DynTestName(ref s) => s,
116 AlignedTestName(ref s, _) => &*s,
120 fn padding(&self) -> NamePadding {
122 &AlignedTestName(_, p) => p,
127 fn with_padding(&self, padding: NamePadding) -> TestName {
128 let name = match self {
129 &TestName::StaticTestName(name) => Cow::Borrowed(name),
130 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
131 &TestName::AlignedTestName(ref name, _) => name.clone(),
134 TestName::AlignedTestName(name, padding)
137 impl fmt::Display for TestName {
138 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
139 fmt::Display::fmt(self.as_slice(), f)
143 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
144 pub enum NamePadding {
150 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
151 let mut name = String::from(self.name.as_slice());
152 let fill = column_count.saturating_sub(name.len());
153 let pad = " ".repeat(fill);
164 /// Represents a benchmark function.
165 pub trait TDynBenchFn: Send {
166 fn run(&self, harness: &mut Bencher);
169 // A function that runs a test. If the function returns successfully,
170 // the test succeeds; if the function panics then the test fails. We
171 // may need to come up with a more clever definition of test in order
172 // to support isolation of tests into threads.
175 StaticBenchFn(fn(&mut Bencher)),
176 DynTestFn(Box<dyn FnBox() + Send>),
177 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
181 fn padding(&self) -> NamePadding {
183 StaticTestFn(..) => PadNone,
184 StaticBenchFn(..) => PadOnRight,
185 DynTestFn(..) => PadNone,
186 DynBenchFn(..) => PadOnRight,
191 impl fmt::Debug for TestFn {
192 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
193 f.write_str(match *self {
194 StaticTestFn(..) => "StaticTestFn(..)",
195 StaticBenchFn(..) => "StaticBenchFn(..)",
196 DynTestFn(..) => "DynTestFn(..)",
197 DynBenchFn(..) => "DynBenchFn(..)",
202 /// Manager of the benchmarking runs.
204 /// This is fed into functions marked with `#[bench]` to allow for
205 /// set-up & tear-down before running a piece of code repeatedly via a
210 summary: Option<stats::Summary>,
214 #[derive(Clone, PartialEq, Eq)]
220 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
221 pub enum ShouldPanic {
224 YesWithMessage(&'static str),
227 // The definition of a single test. A test runner will run a list of
229 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
230 pub struct TestDesc {
233 pub should_panic: ShouldPanic,
234 pub allow_fail: bool,
238 pub struct TestDescAndFn {
243 #[derive(Clone, PartialEq, Debug, Copy)]
250 pub fn new(value: f64, noise: f64) -> Metric {
251 Metric { value, noise }
255 /// In case we want to add other options as well, just add them in this struct.
256 #[derive(Copy, Clone, Debug)]
258 display_output: bool,
262 pub fn new() -> Options {
264 display_output: false,
268 pub fn display_output(mut self, display_output: bool) -> Options {
269 self.display_output = display_output;
274 // The default console test runner. It accepts the command line
275 // arguments and a vector of test_descs.
276 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
277 let mut opts = match parse_opts(args) {
280 eprintln!("error: {}", msg);
286 opts.options = options;
288 if let Err(e) = list_tests_console(&opts, tests) {
289 eprintln!("error: io error when listing tests: {:?}", e);
293 match run_tests_console(&opts, tests) {
295 Ok(false) => process::exit(101),
297 eprintln!("error: io error when listing tests: {:?}", e);
304 // A variant optimized for invocation with a static test vector.
305 // This will panic (intentionally) when fed any dynamic tests, because
306 // it is copying the static values out into a dynamic vector and cannot
307 // copy dynamic values. It is doing this because from this point on
308 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
309 // semantics into parallel test runners, which in turn requires a Vec<>
310 // rather than a &[].
311 pub fn test_main_static(tests: &[&TestDescAndFn]) {
312 let args = env::args().collect::<Vec<_>>();
313 let owned_tests = tests
315 .map(|t| match t.testfn {
316 StaticTestFn(f) => TestDescAndFn {
317 testfn: StaticTestFn(f),
318 desc: t.desc.clone(),
320 StaticBenchFn(f) => TestDescAndFn {
321 testfn: StaticBenchFn(f),
322 desc: t.desc.clone(),
324 _ => panic!("non-static tests passed to test::test_main_static"),
327 test_main(&args, owned_tests, Options::new())
330 /// Invoked when unit tests terminate. Should panic if the unit
331 /// Tests is considered a failure. By default, invokes `report()`
332 /// and checks for a `0` result.
333 pub fn assert_test_result<T: Termination>(result: T) {
334 let code = result.report();
337 "the test returned a termination value with a non-zero status code ({}) \
338 which indicates a failure",
343 #[derive(Copy, Clone, Debug)]
344 pub enum ColorConfig {
350 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
351 pub enum OutputFormat {
357 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
358 pub enum RunIgnored {
365 pub struct TestOpts {
367 pub filter: Option<String>,
368 pub filter_exact: bool,
369 pub exclude_should_panic: bool,
370 pub run_ignored: RunIgnored,
372 pub bench_benchmarks: bool,
373 pub logfile: Option<PathBuf>,
375 pub color: ColorConfig,
376 pub format: OutputFormat,
377 pub test_threads: Option<usize>,
378 pub skip: Vec<String>,
379 pub options: Options,
384 fn new() -> TestOpts {
389 exclude_should_panic: false,
390 run_ignored: RunIgnored::No,
392 bench_benchmarks: false,
396 format: OutputFormat::Pretty,
399 options: Options::new(),
404 /// Result of parsing the options.
405 pub type OptRes = Result<TestOpts, String>;
407 fn optgroups() -> getopts::Options {
408 let mut opts = getopts::Options::new();
409 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
410 .optflag("", "ignored", "Run only ignored tests")
411 .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
412 .optflag("", "test", "Run tests and not benchmarks")
413 .optflag("", "bench", "Run benchmarks instead of tests")
414 .optflag("", "list", "List all tests and benchmarks")
415 .optflag("h", "help", "Display this message (longer with --help)")
419 "Write logs to the specified file instead \
426 "don't capture stdout/stderr of each \
427 task, allow printing directly",
432 "Number of threads used for running tests \
439 "Skip tests whose names contain FILTER (this flag can \
440 be used multiple times)",
446 "Display one character per test instead of one line. \
447 Alias to --format=terse",
452 "Exactly match filters rather than by substring",
457 "Configure coloring of output:
458 auto = colorize if stdout is a tty and tests are run on serially (default);
459 always = always colorize output;
460 never = never colorize output;",
466 "Configure formatting of output:
467 pretty = Print verbose output;
468 terse = Display one character per test;
469 json = Output a json document",
475 "Enable nightly-only flags:
476 unstable-options = Allow use of experimental features",
482 fn usage(binary: &str, options: &getopts::Options) {
483 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
487 The FILTER string is tested against the name of all tests, and only those
488 tests whose names contain the filter are run.
490 By default, all tests are run in parallel. This can be altered with the
491 --test-threads flag or the RUST_TEST_THREADS environment variable when running
494 All tests have their standard output and standard error captured by default.
495 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
496 environment variable to a value other than "0". Logging is not captured by default.
500 #[test] - Indicates a function is a test to be run. This function
502 #[bench] - Indicates a function is a benchmark to be run. This
503 function takes one argument (test::Bencher).
504 #[should_panic] - This function (also labeled with #[test]) will only pass if
505 the code causes a panic (an assertion failure or panic!)
506 A message may be provided, which the failure string must
507 contain: #[should_panic(expected = "foo")].
508 #[ignore] - When applied to a function which is already attributed as a
509 test, then the test runner will ignore these tests during
510 normal test runs. Running with --ignored or --include-ignored will run
512 usage = options.usage(&message)
516 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
517 fn is_nightly() -> bool {
518 // Whether this is a feature-staged build, i.e., on the beta or stable channel
519 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
520 // Whether we should enable unstable features for bootstrapping
521 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
523 bootstrap || !disable_unstable_features
526 // Parses command line arguments into test options
527 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
528 let mut allow_unstable = false;
529 let opts = optgroups();
530 let args = args.get(1..).unwrap_or(args);
531 let matches = match opts.parse(args) {
533 Err(f) => return Some(Err(f.to_string())),
536 if let Some(opt) = matches.opt_str("Z") {
539 "the option `Z` is only accepted on the nightly compiler".into(),
544 "unstable-options" => {
545 allow_unstable = true;
548 return Some(Err("Unrecognized option to `Z`".into()));
553 if matches.opt_present("h") {
554 usage(&args[0], &opts);
558 let filter = if !matches.free.is_empty() {
559 Some(matches.free[0].clone())
564 let exclude_should_panic = matches.opt_present("exclude-should-panic");
565 if !allow_unstable && exclude_should_panic {
567 "The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(),
571 let include_ignored = matches.opt_present("include-ignored");
572 if !allow_unstable && include_ignored {
574 "The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
578 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
581 "the options --include-ignored and --ignored are mutually exclusive".into(),
584 (true, false) => RunIgnored::Yes,
585 (false, true) => RunIgnored::Only,
586 (false, false) => RunIgnored::No,
588 let quiet = matches.opt_present("quiet");
589 let exact = matches.opt_present("exact");
590 let list = matches.opt_present("list");
592 let logfile = matches.opt_str("logfile");
593 let logfile = logfile.map(|s| PathBuf::from(&s));
595 let bench_benchmarks = matches.opt_present("bench");
596 let run_tests = !bench_benchmarks || matches.opt_present("test");
598 let mut nocapture = matches.opt_present("nocapture");
600 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
601 Ok(val) => &val != "0",
606 let test_threads = match matches.opt_str("test-threads") {
607 Some(n_str) => match n_str.parse::<usize>() {
608 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
611 return Some(Err(format!(
612 "argument for --test-threads must be a number > 0 \
621 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
622 Some("auto") | None => AutoColor,
623 Some("always") => AlwaysColor,
624 Some("never") => NeverColor,
627 return Some(Err(format!(
628 "argument for --color must be auto, always, or never (was \
635 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
636 None if quiet => OutputFormat::Terse,
637 Some("pretty") | None => OutputFormat::Pretty,
638 Some("terse") => OutputFormat::Terse,
642 "The \"json\" format is only accepted on the nightly compiler".into(),
649 return Some(Err(format!(
650 "argument for --format must be pretty, terse, or json (was \
657 let test_opts = TestOpts {
661 exclude_should_panic,
670 skip: matches.opt_strs("skip"),
671 options: Options::new(),
677 #[derive(Clone, PartialEq)]
678 pub struct BenchSamples {
679 ns_iter_summ: stats::Summary,
683 #[derive(Clone, PartialEq)]
684 pub enum TestResult {
690 TrBench(BenchSamples),
693 unsafe impl Send for TestResult {}
695 enum OutputLocation<T> {
696 Pretty(Box<term::StdoutTerminal>),
700 impl<T: Write> Write for OutputLocation<T> {
701 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
703 Pretty(ref mut term) => term.write(buf),
704 Raw(ref mut stdout) => stdout.write(buf),
708 fn flush(&mut self) -> io::Result<()> {
710 Pretty(ref mut term) => term.flush(),
711 Raw(ref mut stdout) => stdout.flush(),
716 struct ConsoleTestState {
717 log_out: Option<File>,
726 failures: Vec<(TestDesc, Vec<u8>)>,
727 not_failures: Vec<(TestDesc, Vec<u8>)>,
731 impl ConsoleTestState {
732 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
733 let log_out = match opts.logfile {
734 Some(ref path) => Some(File::create(path)?),
738 Ok(ConsoleTestState {
747 metrics: MetricMap::new(),
748 failures: Vec::new(),
749 not_failures: Vec::new(),
750 options: opts.options,
754 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
755 let msg = msg.as_ref();
758 Some(ref mut o) => o.write_all(msg.as_bytes()),
762 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
763 self.write_log(format!(
766 TrOk => "ok".to_owned(),
767 TrFailed => "failed".to_owned(),
768 TrFailedMsg(ref msg) => format!("failed: {}", msg),
769 TrIgnored => "ignored".to_owned(),
770 TrAllowedFail => "failed (allowed)".to_owned(),
771 TrBench(ref bs) => fmt_bench_samples(bs),
777 fn current_test_count(&self) -> usize {
778 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
782 // Format a number with thousands separators
783 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
785 let mut output = String::new();
786 let mut trailing = false;
787 for &pow in &[9, 6, 3, 0] {
788 let base = 10_usize.pow(pow);
789 if pow == 0 || trailing || n / base != 0 {
791 output.write_fmt(format_args!("{}", n / base)).unwrap();
793 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
806 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
808 let mut output = String::new();
810 let median = bs.ns_iter_summ.median as usize;
811 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
814 .write_fmt(format_args!(
815 "{:>11} ns/iter (+/- {})",
816 fmt_thousands_sep(median, ','),
817 fmt_thousands_sep(deviation, ',')
822 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
828 // List the tests to console, and optionally to logfile. Filters are honored.
829 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
830 let mut output = match term::stdout() {
831 None => Raw(io::stdout()),
832 Some(t) => Pretty(t),
835 let quiet = opts.format == OutputFormat::Terse;
836 let mut st = ConsoleTestState::new(opts)?;
841 for test in filter_tests(&opts, tests) {
842 use crate::TestFn::*;
845 desc: TestDesc { name, .. },
849 let fntype = match testfn {
850 StaticTestFn(..) | DynTestFn(..) => {
854 StaticBenchFn(..) | DynBenchFn(..) => {
860 writeln!(output, "{}: {}", name, fntype)?;
861 st.write_log(format!("{} {}\n", fntype, name))?;
864 fn plural(count: u32, s: &str) -> String {
866 1 => format!("{} {}", 1, s),
867 n => format!("{} {}s", n, s),
872 if ntest != 0 || nbench != 0 {
873 writeln!(output, "")?;
879 plural(ntest, "test"),
880 plural(nbench, "benchmark")
887 // A simple console test runner
888 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
891 st: &mut ConsoleTestState,
892 out: &mut dyn OutputFormatter,
893 ) -> io::Result<()> {
894 match (*event).clone() {
895 TeFiltered(ref filtered_tests) => {
896 st.total = filtered_tests.len();
897 out.write_run_start(filtered_tests.len())
899 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
900 TeWait(ref test) => out.write_test_start(test),
901 TeTimeout(ref test) => out.write_timeout(test),
902 TeResult(test, result, stdout) => {
903 st.write_log_result(&test, &result)?;
904 out.write_result(&test, &result, &*stdout)?;
908 st.not_failures.push((test, stdout));
910 TrIgnored => st.ignored += 1,
911 TrAllowedFail => st.allowed_fail += 1,
913 st.metrics.insert_metric(
914 test.name.as_slice(),
915 bs.ns_iter_summ.median,
916 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
922 st.failures.push((test, stdout));
924 TrFailedMsg(msg) => {
926 let mut stdout = stdout;
927 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
928 st.failures.push((test, stdout));
936 let output = match term::stdout() {
937 None => Raw(io::stdout()),
938 Some(t) => Pretty(t),
941 let max_name_len = tests
943 .max_by_key(|t| len_if_padded(*t))
944 .map(|t| t.desc.name.as_slice().len())
947 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
949 let mut out: Box<dyn OutputFormatter> = match opts.format {
950 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
956 OutputFormat::Terse => Box::new(TerseFormatter::new(
962 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
964 let mut st = ConsoleTestState::new(opts)?;
965 fn len_if_padded(t: &TestDescAndFn) -> usize {
966 match t.testfn.padding() {
968 PadOnRight => t.desc.name.as_slice().len(),
972 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
974 assert!(st.current_test_count() == st.total);
976 return out.write_run_finish(&st);
980 fn should_sort_failures_before_printing_them() {
981 let test_a = TestDesc {
982 name: StaticTestName("a"),
984 should_panic: ShouldPanic::No,
988 let test_b = TestDesc {
989 name: StaticTestName("b"),
991 should_panic: ShouldPanic::No,
995 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
997 let st = ConsoleTestState {
1006 metrics: MetricMap::new(),
1007 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
1008 options: Options::new(),
1009 not_failures: Vec::new(),
1012 out.write_failures(&st).unwrap();
1013 let s = match out.output_location() {
1014 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1015 &Pretty(_) => unreachable!(),
1018 let apos = s.find("a").unwrap();
1019 let bpos = s.find("b").unwrap();
1020 assert!(apos < bpos);
1023 fn use_color(opts: &TestOpts) -> bool {
1025 AutoColor => !opts.nocapture && stdout_isatty(),
1026 AlwaysColor => true,
1027 NeverColor => false,
1032 target_os = "cloudabi",
1033 target_os = "redox",
1034 all(target_arch = "wasm32", not(target_os = "emscripten")),
1035 all(target_vendor = "fortanix", target_env = "sgx")
1037 fn stdout_isatty() -> bool {
1038 // FIXME: Implement isatty on Redox and SGX
1042 fn stdout_isatty() -> bool {
1043 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1046 fn stdout_isatty() -> bool {
1049 type HANDLE = *mut u8;
1050 type LPDWORD = *mut u32;
1051 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1053 fn GetStdHandle(which: DWORD) -> HANDLE;
1054 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1057 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1059 GetConsoleMode(handle, &mut out) != 0
1064 pub enum TestEvent {
1065 TeFiltered(Vec<TestDesc>),
1067 TeResult(TestDesc, TestResult, Vec<u8>),
1068 TeTimeout(TestDesc),
1069 TeFilteredOut(usize),
1072 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1074 struct Sink(Arc<Mutex<Vec<u8>>>);
1075 impl Write for Sink {
1076 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1077 Write::write(&mut *self.0.lock().unwrap(), data)
1079 fn flush(&mut self) -> io::Result<()> {
1084 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1086 F: FnMut(TestEvent) -> io::Result<()>,
1088 use std::collections::{self, HashMap};
1089 use std::hash::BuildHasherDefault;
1090 use std::sync::mpsc::RecvTimeoutError;
1091 // Use a deterministic hasher
1093 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1095 let tests_len = tests.len();
1097 let mut filtered_tests = filter_tests(opts, tests);
1098 if !opts.bench_benchmarks {
1099 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1102 let filtered_tests = {
1103 let mut filtered_tests = filtered_tests;
1104 for test in filtered_tests.iter_mut() {
1105 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1111 let filtered_out = tests_len - filtered_tests.len();
1112 callback(TeFilteredOut(filtered_out))?;
1114 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1116 callback(TeFiltered(filtered_descs))?;
1118 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1119 filtered_tests.into_iter().partition(|e| match e.testfn {
1120 StaticTestFn(_) | DynTestFn(_) => true,
1124 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1126 let mut remaining = filtered_tests;
1127 remaining.reverse();
1128 let mut pending = 0;
1130 let (tx, rx) = channel::<MonitorMsg>();
1132 let mut running_tests: TestMap = HashMap::default();
1134 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1135 let now = Instant::now();
1136 let timed_out = running_tests
1138 .filter_map(|(desc, timeout)| {
1139 if &now >= timeout {
1146 for test in &timed_out {
1147 running_tests.remove(test);
1152 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1153 running_tests.values().min().map(|next_timeout| {
1154 let now = Instant::now();
1155 if *next_timeout >= now {
1163 if concurrency == 1 {
1164 while !remaining.is_empty() {
1165 let test = remaining.pop().unwrap();
1166 callback(TeWait(test.desc.clone()))?;
1167 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1168 let (test, result, stdout) = rx.recv().unwrap();
1169 callback(TeResult(test, result, stdout))?;
1172 while pending > 0 || !remaining.is_empty() {
1173 while pending < concurrency && !remaining.is_empty() {
1174 let test = remaining.pop().unwrap();
1175 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1176 running_tests.insert(test.desc.clone(), timeout);
1177 callback(TeWait(test.desc.clone()))?; //here no pad
1178 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1184 if let Some(timeout) = calc_timeout(&running_tests) {
1185 res = rx.recv_timeout(timeout);
1186 for test in get_timed_out_tests(&mut running_tests) {
1187 callback(TeTimeout(test))?;
1189 if res != Err(RecvTimeoutError::Timeout) {
1193 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1198 let (desc, result, stdout) = res.unwrap();
1199 running_tests.remove(&desc);
1201 callback(TeResult(desc, result, stdout))?;
1206 if opts.bench_benchmarks {
1207 // All benchmarks run at the end, in serial.
1208 for b in filtered_benchs {
1209 callback(TeWait(b.desc.clone()))?;
1210 run_test(opts, false, b, tx.clone(), Concurrent::No);
1211 let (test, result, stdout) = rx.recv().unwrap();
1212 callback(TeResult(test, result, stdout))?;
1218 #[allow(deprecated)]
1219 fn get_concurrency() -> usize {
1220 return match env::var("RUST_TEST_THREADS") {
1222 let opt_n: Option<usize> = s.parse().ok();
1224 Some(n) if n > 0 => n,
1226 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1231 Err(..) => num_cpus(),
1235 #[allow(nonstandard_style)]
1236 fn num_cpus() -> usize {
1238 struct SYSTEM_INFO {
1239 wProcessorArchitecture: u16,
1242 lpMinimumApplicationAddress: *mut u8,
1243 lpMaximumApplicationAddress: *mut u8,
1244 dwActiveProcessorMask: *mut u8,
1245 dwNumberOfProcessors: u32,
1246 dwProcessorType: u32,
1247 dwAllocationGranularity: u32,
1248 wProcessorLevel: u16,
1249 wProcessorRevision: u16,
1252 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1255 let mut sysinfo = std::mem::zeroed();
1256 GetSystemInfo(&mut sysinfo);
1257 sysinfo.dwNumberOfProcessors as usize
1261 #[cfg(target_os = "redox")]
1262 fn num_cpus() -> usize {
1263 // FIXME: Implement num_cpus on Redox
1268 all(target_arch = "wasm32", not(target_os = "emscripten")),
1269 all(target_vendor = "fortanix", target_env = "sgx")
1271 fn num_cpus() -> usize {
1276 target_os = "android",
1277 target_os = "cloudabi",
1278 target_os = "emscripten",
1279 target_os = "fuchsia",
1281 target_os = "linux",
1282 target_os = "macos",
1283 target_os = "solaris"
1285 fn num_cpus() -> usize {
1286 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1290 target_os = "freebsd",
1291 target_os = "dragonfly",
1292 target_os = "bitrig",
1293 target_os = "netbsd"
1295 fn num_cpus() -> usize {
1298 let mut cpus: libc::c_uint = 0;
1299 let mut cpus_size = std::mem::size_of_val(&cpus);
1302 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1305 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1310 &mut cpus as *mut _ as *mut _,
1311 &mut cpus_size as *mut _ as *mut _,
1323 #[cfg(target_os = "openbsd")]
1324 fn num_cpus() -> usize {
1327 let mut cpus: libc::c_uint = 0;
1328 let mut cpus_size = std::mem::size_of_val(&cpus);
1329 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1335 &mut cpus as *mut _ as *mut _,
1336 &mut cpus_size as *mut _ as *mut _,
1347 #[cfg(target_os = "haiku")]
1348 fn num_cpus() -> usize {
1353 #[cfg(target_os = "l4re")]
1354 fn num_cpus() -> usize {
1360 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1361 let mut filtered = tests;
1362 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1363 let test_name = test.desc.name.as_slice();
1365 match opts.filter_exact {
1366 true => test_name == filter,
1367 false => test_name.contains(filter),
1371 // Remove tests that don't match the test filter
1372 if let Some(ref filter) = opts.filter {
1373 filtered.retain(|test| matches_filter(test, filter));
1376 // Skip tests that match any of the skip filters
1377 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
1379 // Excludes #[should_panic] tests
1380 if opts.exclude_should_panic {
1381 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
1384 // maybe unignore tests
1385 match opts.run_ignored {
1386 RunIgnored::Yes => {
1389 .for_each(|test| test.desc.ignore = false);
1391 RunIgnored::Only => {
1392 filtered.retain(|test| test.desc.ignore);
1395 .for_each(|test| test.desc.ignore = false);
1397 RunIgnored::No => {}
1400 // Sort the tests alphabetically
1401 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1406 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1407 // convert benchmarks to tests, if we're not benchmarking them
1411 let testfn = match x.testfn {
1412 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1413 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1415 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1416 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1431 test: TestDescAndFn,
1432 monitor_ch: Sender<MonitorMsg>,
1433 concurrency: Concurrent,
1435 let TestDescAndFn { desc, testfn } = test;
1437 let ignore_because_panic_abort = cfg!(target_arch = "wasm32")
1438 && !cfg!(target_os = "emscripten")
1439 && desc.should_panic != ShouldPanic::No;
1441 if force_ignore || desc.ignore || ignore_because_panic_abort {
1442 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1448 monitor_ch: Sender<MonitorMsg>,
1450 testfn: Box<dyn FnBox() + Send>,
1451 concurrency: Concurrent,
1453 // Buffer for capturing standard I/O
1454 let data = Arc::new(Mutex::new(Vec::new()));
1455 let data2 = data.clone();
1457 let name = desc.name.clone();
1458 let runtest = move || {
1459 let oldio = if !nocapture {
1461 io::set_print(Some(Box::new(Sink(data2.clone())))),
1462 io::set_panic(Some(Box::new(Sink(data2)))),
1468 let result = catch_unwind(AssertUnwindSafe(testfn));
1470 if let Some((printio, panicio)) = oldio {
1471 io::set_print(printio);
1472 io::set_panic(panicio);
1475 let test_result = calc_result(&desc, result);
1476 let stdout = data.lock().unwrap().to_vec();
1478 .send((desc.clone(), test_result, stdout))
1482 // If the platform is single-threaded we're just going to run
1483 // the test synchronously, regardless of the concurrency
1485 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1486 if concurrency == Concurrent::Yes && supports_threads {
1487 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1488 cfg.spawn(runtest).unwrap();
1495 DynBenchFn(bencher) => {
1496 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1497 bencher.run(harness)
1500 StaticBenchFn(benchfn) => {
1501 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1502 (benchfn.clone())(harness)
1506 let cb = move || __rust_begin_short_backtrace(f);
1507 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1509 StaticTestFn(f) => run_test_inner(
1513 Box::new(move || __rust_begin_short_backtrace(f)),
1519 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1521 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1525 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1526 match (&desc.should_panic, task_result) {
1527 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1528 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1530 .downcast_ref::<String>()
1532 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1533 .map(|e| e.contains(msg))
1538 if desc.allow_fail {
1541 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1545 _ if desc.allow_fail => TrAllowedFail,
1550 #[derive(Clone, PartialEq)]
1551 pub struct MetricMap(BTreeMap<String, Metric>);
1554 pub fn new() -> MetricMap {
1555 MetricMap(BTreeMap::new())
1558 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1559 /// must be non-negative. The `noise` indicates the uncertainty of the
1560 /// metric, which doubles as the "noise range" of acceptable
1561 /// pairwise-regressions on this named value, when comparing from one
1562 /// metric to the next using `compare_to_old`.
1564 /// If `noise` is positive, then it means this metric is of a value
1565 /// you want to see grow smaller, so a change larger than `noise` in the
1566 /// positive direction represents a regression.
1568 /// If `noise` is negative, then it means this metric is of a value
1569 /// you want to see grow larger, so a change larger than `noise` in the
1570 /// negative direction represents a regression.
1571 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1572 let m = Metric { value, noise };
1573 self.0.insert(name.to_owned(), m);
1576 pub fn fmt_metrics(&self) -> String {
1580 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1581 .collect::<Vec<_>>();
1588 pub use std::hint::black_box;
1591 /// Callback for benchmark functions to run in their body.
1592 pub fn iter<T, F>(&mut self, mut inner: F)
1596 if self.mode == BenchMode::Single {
1597 ns_iter_inner(&mut inner, 1);
1601 self.summary = Some(iter(&mut inner));
1604 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1606 F: FnMut(&mut Bencher),
1609 return self.summary;
1613 fn ns_from_dur(dur: Duration) -> u64 {
1614 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1617 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1621 let start = Instant::now();
1625 return ns_from_dur(start.elapsed());
1628 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1632 // Initial bench run to get ballpark figure.
1633 let ns_single = ns_iter_inner(inner, 1);
1635 // Try to estimate iter count for 1ms falling back to 1m
1636 // iterations if first run took < 1ns.
1637 let ns_target_total = 1_000_000; // 1ms
1638 let mut n = ns_target_total / cmp::max(1, ns_single);
1640 // if the first run took more than 1ms we don't want to just
1641 // be left doing 0 iterations on every loop. The unfortunate
1642 // side effect of not being able to do as many runs is
1643 // automatically handled by the statistical analysis below
1644 // (i.e., larger error bars).
1647 let mut total_run = Duration::new(0, 0);
1648 let samples: &mut [f64] = &mut [0.0_f64; 50];
1650 let loop_start = Instant::now();
1652 for p in &mut *samples {
1653 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1656 stats::winsorize(samples, 5.0);
1657 let summ = stats::Summary::new(samples);
1659 for p in &mut *samples {
1660 let ns = ns_iter_inner(inner, 5 * n);
1661 *p = ns as f64 / (5 * n) as f64;
1664 stats::winsorize(samples, 5.0);
1665 let summ5 = stats::Summary::new(samples);
1667 let loop_run = loop_start.elapsed();
1669 // If we've run for 100ms and seem to have converged to a
1671 if loop_run > Duration::from_millis(100)
1672 && summ.median_abs_dev_pct < 1.0
1673 && summ.median - summ5.median < summ5.median_abs_dev
1678 total_run = total_run + loop_run;
1679 // Longest we ever run for is 3s.
1680 if total_run > Duration::from_secs(3) {
1684 // If we overflow here just return the results so far. We check a
1685 // multiplier of 10 because we're about to multiply by 2 and the
1686 // next iteration of the loop will also multiply by 5 (to calculate
1687 // the summ5 result)
1688 n = match n.checked_mul(10) {
1698 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1702 use std::panic::{catch_unwind, AssertUnwindSafe};
1703 use std::sync::{Arc, Mutex};
1705 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1707 F: FnMut(&mut Bencher),
1709 let mut bs = Bencher {
1710 mode: BenchMode::Auto,
1715 let data = Arc::new(Mutex::new(Vec::new()));
1716 let data2 = data.clone();
1718 let oldio = if !nocapture {
1720 io::set_print(Some(Box::new(Sink(data2.clone())))),
1721 io::set_panic(Some(Box::new(Sink(data2)))),
1727 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1729 if let Some((printio, panicio)) = oldio {
1730 io::set_print(printio);
1731 io::set_panic(panicio);
1734 let test_result = match result {
1736 Ok(Some(ns_iter_summ)) => {
1737 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1738 let mb_s = bs.bytes * 1000 / ns_iter;
1740 let bs = BenchSamples {
1742 mb_s: mb_s as usize,
1744 TestResult::TrBench(bs)
1747 // iter not called, so no data.
1748 // FIXME: error in this case?
1749 let samples: &mut [f64] = &mut [0.0_f64; 1];
1750 let bs = BenchSamples {
1751 ns_iter_summ: stats::Summary::new(samples),
1754 TestResult::TrBench(bs)
1756 Err(_) => TestResult::TrFailed,
1759 let stdout = data.lock().unwrap().to_vec();
1760 monitor_ch.send((desc, test_result, stdout)).unwrap();
1763 pub fn run_once<F>(f: F)
1765 F: FnMut(&mut Bencher),
1767 let mut bs = Bencher {
1768 mode: BenchMode::Single,
1780 filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
1781 ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1785 use crate::Concurrent;
1786 use std::sync::mpsc::channel;
1788 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
1792 name: StaticTestName("1"),
1794 should_panic: ShouldPanic::No,
1797 testfn: DynTestFn(Box::new(move || {})),
1801 name: StaticTestName("2"),
1803 should_panic: ShouldPanic::No,
1806 testfn: DynTestFn(Box::new(move || {})),
1812 pub fn do_not_run_ignored_tests() {
1816 let desc = TestDescAndFn {
1818 name: StaticTestName("whatever"),
1820 should_panic: ShouldPanic::No,
1823 testfn: DynTestFn(Box::new(f)),
1825 let (tx, rx) = channel();
1826 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1827 let (_, res, _) = rx.recv().unwrap();
1828 assert!(res != TrOk);
1832 pub fn ignored_tests_result_in_ignored() {
1834 let desc = TestDescAndFn {
1836 name: StaticTestName("whatever"),
1838 should_panic: ShouldPanic::No,
1841 testfn: DynTestFn(Box::new(f)),
1843 let (tx, rx) = channel();
1844 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1845 let (_, res, _) = rx.recv().unwrap();
1846 assert!(res == TrIgnored);
1850 fn test_should_panic() {
1854 let desc = TestDescAndFn {
1856 name: StaticTestName("whatever"),
1858 should_panic: ShouldPanic::Yes,
1861 testfn: DynTestFn(Box::new(f)),
1863 let (tx, rx) = channel();
1864 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1865 let (_, res, _) = rx.recv().unwrap();
1866 assert!(res == TrOk);
1870 fn test_should_panic_good_message() {
1872 panic!("an error message");
1874 let desc = TestDescAndFn {
1876 name: StaticTestName("whatever"),
1878 should_panic: ShouldPanic::YesWithMessage("error message"),
1881 testfn: DynTestFn(Box::new(f)),
1883 let (tx, rx) = channel();
1884 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1885 let (_, res, _) = rx.recv().unwrap();
1886 assert!(res == TrOk);
1890 fn test_should_panic_bad_message() {
1892 panic!("an error message");
1894 let expected = "foobar";
1895 let failed_msg = "Panic did not include expected string";
1896 let desc = TestDescAndFn {
1898 name: StaticTestName("whatever"),
1900 should_panic: ShouldPanic::YesWithMessage(expected),
1903 testfn: DynTestFn(Box::new(f)),
1905 let (tx, rx) = channel();
1906 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1907 let (_, res, _) = rx.recv().unwrap();
1908 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1912 fn test_should_panic_but_succeeds() {
1914 let desc = TestDescAndFn {
1916 name: StaticTestName("whatever"),
1918 should_panic: ShouldPanic::Yes,
1921 testfn: DynTestFn(Box::new(f)),
1923 let (tx, rx) = channel();
1924 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1925 let (_, res, _) = rx.recv().unwrap();
1926 assert!(res == TrFailed);
1930 fn parse_ignored_flag() {
1932 "progname".to_string(),
1933 "filter".to_string(),
1934 "--ignored".to_string(),
1936 let opts = parse_opts(&args).unwrap().unwrap();
1937 assert_eq!(opts.run_ignored, RunIgnored::Only);
1941 fn parse_include_ignored_flag() {
1943 "progname".to_string(),
1944 "filter".to_string(),
1945 "-Zunstable-options".to_string(),
1946 "--include-ignored".to_string(),
1948 let opts = parse_opts(&args).unwrap().unwrap();
1949 assert_eq!(opts.run_ignored, RunIgnored::Yes);
1953 pub fn filter_for_ignored_option() {
1954 // When we run ignored tests the test filter should filter out all the
1955 // unignored tests and flip the ignore flag on the rest to false
1957 let mut opts = TestOpts::new();
1958 opts.run_tests = true;
1959 opts.run_ignored = RunIgnored::Only;
1961 let tests = one_ignored_one_unignored_test();
1962 let filtered = filter_tests(&opts, tests);
1964 assert_eq!(filtered.len(), 1);
1965 assert_eq!(filtered[0].desc.name.to_string(), "1");
1966 assert!(!filtered[0].desc.ignore);
1970 pub fn run_include_ignored_option() {
1971 // When we "--include-ignored" tests, the ignore flag should be set to false on
1972 // all tests and no test filtered out
1974 let mut opts = TestOpts::new();
1975 opts.run_tests = true;
1976 opts.run_ignored = RunIgnored::Yes;
1978 let tests = one_ignored_one_unignored_test();
1979 let filtered = filter_tests(&opts, tests);
1981 assert_eq!(filtered.len(), 2);
1982 assert!(!filtered[0].desc.ignore);
1983 assert!(!filtered[1].desc.ignore);
1987 pub fn exclude_should_panic_option() {
1988 let mut opts = TestOpts::new();
1989 opts.run_tests = true;
1990 opts.exclude_should_panic = true;
1992 let mut tests = one_ignored_one_unignored_test();
1993 tests.push(TestDescAndFn {
1995 name: StaticTestName("3"),
1997 should_panic: ShouldPanic::Yes,
2000 testfn: DynTestFn(Box::new(move || {})),
2003 let filtered = filter_tests(&opts, tests);
2005 assert_eq!(filtered.len(), 2);
2006 assert!(filtered.iter().all(|test| test.desc.should_panic == ShouldPanic::No));
2010 pub fn exact_filter_match() {
2011 fn tests() -> Vec<TestDescAndFn> {
2012 vec!["base", "base::test", "base::test1", "base::test2"]
2014 .map(|name| TestDescAndFn {
2016 name: StaticTestName(name),
2018 should_panic: ShouldPanic::No,
2021 testfn: DynTestFn(Box::new(move || {})),
2026 let substr = filter_tests(
2028 filter: Some("base".into()),
2033 assert_eq!(substr.len(), 4);
2035 let substr = filter_tests(
2037 filter: Some("bas".into()),
2042 assert_eq!(substr.len(), 4);
2044 let substr = filter_tests(
2046 filter: Some("::test".into()),
2051 assert_eq!(substr.len(), 3);
2053 let substr = filter_tests(
2055 filter: Some("base::test".into()),
2060 assert_eq!(substr.len(), 3);
2062 let exact = filter_tests(
2064 filter: Some("base".into()),
2070 assert_eq!(exact.len(), 1);
2072 let exact = filter_tests(
2074 filter: Some("bas".into()),
2080 assert_eq!(exact.len(), 0);
2082 let exact = filter_tests(
2084 filter: Some("::test".into()),
2090 assert_eq!(exact.len(), 0);
2092 let exact = filter_tests(
2094 filter: Some("base::test".into()),
2100 assert_eq!(exact.len(), 1);
2104 pub fn sort_tests() {
2105 let mut opts = TestOpts::new();
2106 opts.run_tests = true;
2109 "sha1::test".to_string(),
2110 "isize::test_to_str".to_string(),
2111 "isize::test_pow".to_string(),
2112 "test::do_not_run_ignored_tests".to_string(),
2113 "test::ignored_tests_result_in_ignored".to_string(),
2114 "test::first_free_arg_should_be_a_filter".to_string(),
2115 "test::parse_ignored_flag".to_string(),
2116 "test::parse_include_ignored_flag".to_string(),
2117 "test::filter_for_ignored_option".to_string(),
2118 "test::run_include_ignored_option".to_string(),
2119 "test::sort_tests".to_string(),
2123 let mut tests = Vec::new();
2124 for name in &names {
2125 let test = TestDescAndFn {
2127 name: DynTestName((*name).clone()),
2129 should_panic: ShouldPanic::No,
2132 testfn: DynTestFn(Box::new(testfn)),
2138 let filtered = filter_tests(&opts, tests);
2140 let expected = vec![
2141 "isize::test_pow".to_string(),
2142 "isize::test_to_str".to_string(),
2143 "sha1::test".to_string(),
2144 "test::do_not_run_ignored_tests".to_string(),
2145 "test::filter_for_ignored_option".to_string(),
2146 "test::first_free_arg_should_be_a_filter".to_string(),
2147 "test::ignored_tests_result_in_ignored".to_string(),
2148 "test::parse_ignored_flag".to_string(),
2149 "test::parse_include_ignored_flag".to_string(),
2150 "test::run_include_ignored_option".to_string(),
2151 "test::sort_tests".to_string(),
2154 for (a, b) in expected.iter().zip(filtered) {
2155 assert!(*a == b.desc.name.to_string());
2160 pub fn test_metricmap_compare() {
2161 let mut m1 = MetricMap::new();
2162 let mut m2 = MetricMap::new();
2163 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2164 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2166 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2167 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2169 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2170 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2172 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2173 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2175 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2176 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2178 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2179 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2183 pub fn test_bench_once_no_iter() {
2184 fn f(_: &mut Bencher) {}
2189 pub fn test_bench_once_iter() {
2190 fn f(b: &mut Bencher) {
2197 pub fn test_bench_no_iter() {
2198 fn f(_: &mut Bencher) {}
2200 let (tx, rx) = channel();
2202 let desc = TestDesc {
2203 name: StaticTestName("f"),
2205 should_panic: ShouldPanic::No,
2209 crate::bench::benchmark(desc, tx, true, f);
2214 pub fn test_bench_iter() {
2215 fn f(b: &mut Bencher) {
2219 let (tx, rx) = channel();
2221 let desc = TestDesc {
2222 name: StaticTestName("f"),
2224 should_panic: ShouldPanic::No,
2228 crate::bench::benchmark(desc, tx, true, f);