1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![deny(rust_2018_idioms)]
21 #![crate_name = "test"]
22 #![unstable(feature = "test", issue = "27812")]
23 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
26 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
28 #![feature(set_stdio)]
29 #![feature(panic_unwind)]
30 #![feature(staged_api)]
31 #![feature(termination_trait_lib)]
35 #[cfg(any(unix, target_os = "cloudabi"))]
39 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
40 // on aarch64-pc-windows-msvc, or thumbv7a-pc-windows-msvc
41 // so we don't link libtest against libunwind (for the time being)
42 // even though it means that libtest won't be fully functional on
45 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
46 #[cfg(not(all(windows, any(target_arch = "aarch64", target_arch = "arm"))))]
47 extern crate panic_unwind;
49 pub use self::ColorConfig::*;
50 use self::NamePadding::*;
51 use self::OutputLocation::*;
52 use self::TestEvent::*;
53 pub use self::TestFn::*;
54 pub use self::TestName::*;
55 pub use self::TestResult::*;
59 use std::boxed::FnBox;
61 use std::collections::BTreeMap;
66 use std::io::prelude::*;
67 use std::panic::{catch_unwind, AssertUnwindSafe};
68 use std::path::PathBuf;
70 use std::process::Termination;
71 use std::sync::mpsc::{channel, Sender};
72 use std::sync::{Arc, Mutex};
74 use std::time::{Duration, Instant};
76 const TEST_WARN_TIMEOUT_S: u64 = 60;
77 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
79 // to be used by rustc to compile tests in libtest
82 assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
83 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
84 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestOpts,
85 TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
92 use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
94 /// Whether to execute tests concurrently or not
95 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
101 // The name of a test. By convention this follows the rules for rust
102 // paths; i.e., it should be a series of identifiers separated by double
103 // colons. This way if some test runner wants to arrange the tests
104 // hierarchically it may.
106 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
108 StaticTestName(&'static str),
110 AlignedTestName(Cow<'static, str>, NamePadding),
113 fn as_slice(&self) -> &str {
115 StaticTestName(s) => s,
116 DynTestName(ref s) => s,
117 AlignedTestName(ref s, _) => &*s,
121 fn padding(&self) -> NamePadding {
123 &AlignedTestName(_, p) => p,
128 fn with_padding(&self, padding: NamePadding) -> TestName {
129 let name = match self {
130 &TestName::StaticTestName(name) => Cow::Borrowed(name),
131 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
132 &TestName::AlignedTestName(ref name, _) => name.clone(),
135 TestName::AlignedTestName(name, padding)
138 impl fmt::Display for TestName {
139 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
140 fmt::Display::fmt(self.as_slice(), f)
144 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
145 pub enum NamePadding {
151 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
152 let mut name = String::from(self.name.as_slice());
153 let fill = column_count.saturating_sub(name.len());
154 let pad = " ".repeat(fill);
165 /// Represents a benchmark function.
166 pub trait TDynBenchFn: Send {
167 fn run(&self, harness: &mut Bencher);
170 // A function that runs a test. If the function returns successfully,
171 // the test succeeds; if the function panics then the test fails. We
172 // may need to come up with a more clever definition of test in order
173 // to support isolation of tests into threads.
176 StaticBenchFn(fn(&mut Bencher)),
177 DynTestFn(Box<dyn FnBox() + Send>),
178 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
182 fn padding(&self) -> NamePadding {
184 StaticTestFn(..) => PadNone,
185 StaticBenchFn(..) => PadOnRight,
186 DynTestFn(..) => PadNone,
187 DynBenchFn(..) => PadOnRight,
192 impl fmt::Debug for TestFn {
193 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
194 f.write_str(match *self {
195 StaticTestFn(..) => "StaticTestFn(..)",
196 StaticBenchFn(..) => "StaticBenchFn(..)",
197 DynTestFn(..) => "DynTestFn(..)",
198 DynBenchFn(..) => "DynBenchFn(..)",
203 /// Manager of the benchmarking runs.
205 /// This is fed into functions marked with `#[bench]` to allow for
206 /// set-up & tear-down before running a piece of code repeatedly via a
211 summary: Option<stats::Summary>,
215 #[derive(Clone, PartialEq, Eq)]
221 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
222 pub enum ShouldPanic {
225 YesWithMessage(&'static str),
228 // The definition of a single test. A test runner will run a list of
230 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
231 pub struct TestDesc {
234 pub should_panic: ShouldPanic,
235 pub allow_fail: bool,
239 pub struct TestDescAndFn {
244 #[derive(Clone, PartialEq, Debug, Copy)]
251 pub fn new(value: f64, noise: f64) -> Metric {
252 Metric { value, noise }
256 /// In case we want to add other options as well, just add them in this struct.
257 #[derive(Copy, Clone, Debug)]
259 display_output: bool,
263 pub fn new() -> Options {
265 display_output: false,
269 pub fn display_output(mut self, display_output: bool) -> Options {
270 self.display_output = display_output;
275 // The default console test runner. It accepts the command line
276 // arguments and a vector of test_descs.
277 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
278 let mut opts = match parse_opts(args) {
281 eprintln!("error: {}", msg);
287 opts.options = options;
289 if let Err(e) = list_tests_console(&opts, tests) {
290 eprintln!("error: io error when listing tests: {:?}", e);
294 match run_tests_console(&opts, tests) {
296 Ok(false) => process::exit(101),
298 eprintln!("error: io error when listing tests: {:?}", e);
305 // A variant optimized for invocation with a static test vector.
306 // This will panic (intentionally) when fed any dynamic tests, because
307 // it is copying the static values out into a dynamic vector and cannot
308 // copy dynamic values. It is doing this because from this point on
309 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
310 // semantics into parallel test runners, which in turn requires a Vec<>
311 // rather than a &[].
312 pub fn test_main_static(tests: &[&TestDescAndFn]) {
313 let args = env::args().collect::<Vec<_>>();
314 let owned_tests = tests
316 .map(|t| match t.testfn {
317 StaticTestFn(f) => TestDescAndFn {
318 testfn: StaticTestFn(f),
319 desc: t.desc.clone(),
321 StaticBenchFn(f) => TestDescAndFn {
322 testfn: StaticBenchFn(f),
323 desc: t.desc.clone(),
325 _ => panic!("non-static tests passed to test::test_main_static"),
328 test_main(&args, owned_tests, Options::new())
331 /// Invoked when unit tests terminate. Should panic if the unit
332 /// Tests is considered a failure. By default, invokes `report()`
333 /// and checks for a `0` result.
334 pub fn assert_test_result<T: Termination>(result: T) {
335 let code = result.report();
338 "the test returned a termination value with a non-zero status code ({}) \
339 which indicates a failure",
344 #[derive(Copy, Clone, Debug)]
345 pub enum ColorConfig {
351 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
352 pub enum OutputFormat {
358 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
359 pub enum RunIgnored {
366 pub struct TestOpts {
368 pub filter: Option<String>,
369 pub filter_exact: bool,
370 pub exclude_should_panic: bool,
371 pub run_ignored: RunIgnored,
373 pub bench_benchmarks: bool,
374 pub logfile: Option<PathBuf>,
376 pub color: ColorConfig,
377 pub format: OutputFormat,
378 pub test_threads: Option<usize>,
379 pub skip: Vec<String>,
380 pub options: Options,
385 fn new() -> TestOpts {
390 exclude_should_panic: false,
391 run_ignored: RunIgnored::No,
393 bench_benchmarks: false,
397 format: OutputFormat::Pretty,
400 options: Options::new(),
405 /// Result of parsing the options.
406 pub type OptRes = Result<TestOpts, String>;
408 fn optgroups() -> getopts::Options {
409 let mut opts = getopts::Options::new();
410 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
411 .optflag("", "ignored", "Run only ignored tests")
412 .optflag("", "exclude-should-panic", "Excludes tests marked as should_panic")
413 .optflag("", "test", "Run tests and not benchmarks")
414 .optflag("", "bench", "Run benchmarks instead of tests")
415 .optflag("", "list", "List all tests and benchmarks")
416 .optflag("h", "help", "Display this message (longer with --help)")
420 "Write logs to the specified file instead \
427 "don't capture stdout/stderr of each \
428 task, allow printing directly",
433 "Number of threads used for running tests \
440 "Skip tests whose names contain FILTER (this flag can \
441 be used multiple times)",
447 "Display one character per test instead of one line. \
448 Alias to --format=terse",
453 "Exactly match filters rather than by substring",
458 "Configure coloring of output:
459 auto = colorize if stdout is a tty and tests are run on serially (default);
460 always = always colorize output;
461 never = never colorize output;",
467 "Configure formatting of output:
468 pretty = Print verbose output;
469 terse = Display one character per test;
470 json = Output a json document",
476 "Enable nightly-only flags:
477 unstable-options = Allow use of experimental features",
483 fn usage(binary: &str, options: &getopts::Options) {
484 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
488 The FILTER string is tested against the name of all tests, and only those
489 tests whose names contain the filter are run.
491 By default, all tests are run in parallel. This can be altered with the
492 --test-threads flag or the RUST_TEST_THREADS environment variable when running
495 All tests have their standard output and standard error captured by default.
496 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
497 environment variable to a value other than "0". Logging is not captured by default.
501 #[test] - Indicates a function is a test to be run. This function
503 #[bench] - Indicates a function is a benchmark to be run. This
504 function takes one argument (test::Bencher).
505 #[should_panic] - This function (also labeled with #[test]) will only pass if
506 the code causes a panic (an assertion failure or panic!)
507 A message may be provided, which the failure string must
508 contain: #[should_panic(expected = "foo")].
509 #[ignore] - When applied to a function which is already attributed as a
510 test, then the test runner will ignore these tests during
511 normal test runs. Running with --ignored or --include-ignored will run
513 usage = options.usage(&message)
517 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
518 fn is_nightly() -> bool {
519 // Whether this is a feature-staged build, i.e., on the beta or stable channel
520 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
521 // Whether we should enable unstable features for bootstrapping
522 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
524 bootstrap || !disable_unstable_features
527 // Parses command line arguments into test options
528 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
529 let mut allow_unstable = false;
530 let opts = optgroups();
531 let args = args.get(1..).unwrap_or(args);
532 let matches = match opts.parse(args) {
534 Err(f) => return Some(Err(f.to_string())),
537 if let Some(opt) = matches.opt_str("Z") {
540 "the option `Z` is only accepted on the nightly compiler".into(),
545 "unstable-options" => {
546 allow_unstable = true;
549 return Some(Err("Unrecognized option to `Z`".into()));
554 if matches.opt_present("h") {
555 usage(&args[0], &opts);
559 let filter = if !matches.free.is_empty() {
560 Some(matches.free[0].clone())
565 let exclude_should_panic = matches.opt_present("exclude-should-panic");
566 if !allow_unstable && exclude_should_panic {
568 "The \"exclude-should-panic\" flag is only accepted on the nightly compiler".into(),
572 let include_ignored = matches.opt_present("include-ignored");
573 if !allow_unstable && include_ignored {
575 "The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
579 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
582 "the options --include-ignored and --ignored are mutually exclusive".into(),
585 (true, false) => RunIgnored::Yes,
586 (false, true) => RunIgnored::Only,
587 (false, false) => RunIgnored::No,
589 let quiet = matches.opt_present("quiet");
590 let exact = matches.opt_present("exact");
591 let list = matches.opt_present("list");
593 let logfile = matches.opt_str("logfile");
594 let logfile = logfile.map(|s| PathBuf::from(&s));
596 let bench_benchmarks = matches.opt_present("bench");
597 let run_tests = !bench_benchmarks || matches.opt_present("test");
599 let mut nocapture = matches.opt_present("nocapture");
601 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
602 Ok(val) => &val != "0",
607 let test_threads = match matches.opt_str("test-threads") {
608 Some(n_str) => match n_str.parse::<usize>() {
609 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
612 return Some(Err(format!(
613 "argument for --test-threads must be a number > 0 \
622 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
623 Some("auto") | None => AutoColor,
624 Some("always") => AlwaysColor,
625 Some("never") => NeverColor,
628 return Some(Err(format!(
629 "argument for --color must be auto, always, or never (was \
636 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
637 None if quiet => OutputFormat::Terse,
638 Some("pretty") | None => OutputFormat::Pretty,
639 Some("terse") => OutputFormat::Terse,
643 "The \"json\" format is only accepted on the nightly compiler".into(),
650 return Some(Err(format!(
651 "argument for --format must be pretty, terse, or json (was \
658 let test_opts = TestOpts {
662 exclude_should_panic,
671 skip: matches.opt_strs("skip"),
672 options: Options::new(),
678 #[derive(Clone, PartialEq)]
679 pub struct BenchSamples {
680 ns_iter_summ: stats::Summary,
684 #[derive(Clone, PartialEq)]
685 pub enum TestResult {
691 TrBench(BenchSamples),
694 unsafe impl Send for TestResult {}
696 enum OutputLocation<T> {
697 Pretty(Box<term::StdoutTerminal>),
701 impl<T: Write> Write for OutputLocation<T> {
702 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
704 Pretty(ref mut term) => term.write(buf),
705 Raw(ref mut stdout) => stdout.write(buf),
709 fn flush(&mut self) -> io::Result<()> {
711 Pretty(ref mut term) => term.flush(),
712 Raw(ref mut stdout) => stdout.flush(),
717 struct ConsoleTestState {
718 log_out: Option<File>,
727 failures: Vec<(TestDesc, Vec<u8>)>,
728 not_failures: Vec<(TestDesc, Vec<u8>)>,
732 impl ConsoleTestState {
733 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
734 let log_out = match opts.logfile {
735 Some(ref path) => Some(File::create(path)?),
739 Ok(ConsoleTestState {
748 metrics: MetricMap::new(),
749 failures: Vec::new(),
750 not_failures: Vec::new(),
751 options: opts.options,
755 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
756 let msg = msg.as_ref();
759 Some(ref mut o) => o.write_all(msg.as_bytes()),
763 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
764 self.write_log(format!(
767 TrOk => "ok".to_owned(),
768 TrFailed => "failed".to_owned(),
769 TrFailedMsg(ref msg) => format!("failed: {}", msg),
770 TrIgnored => "ignored".to_owned(),
771 TrAllowedFail => "failed (allowed)".to_owned(),
772 TrBench(ref bs) => fmt_bench_samples(bs),
778 fn current_test_count(&self) -> usize {
779 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
783 // Format a number with thousands separators
784 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
786 let mut output = String::new();
787 let mut trailing = false;
788 for &pow in &[9, 6, 3, 0] {
789 let base = 10_usize.pow(pow);
790 if pow == 0 || trailing || n / base != 0 {
792 output.write_fmt(format_args!("{}", n / base)).unwrap();
794 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
807 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
809 let mut output = String::new();
811 let median = bs.ns_iter_summ.median as usize;
812 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
815 .write_fmt(format_args!(
816 "{:>11} ns/iter (+/- {})",
817 fmt_thousands_sep(median, ','),
818 fmt_thousands_sep(deviation, ',')
823 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
829 // List the tests to console, and optionally to logfile. Filters are honored.
830 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
831 let mut output = match term::stdout() {
832 None => Raw(io::stdout()),
833 Some(t) => Pretty(t),
836 let quiet = opts.format == OutputFormat::Terse;
837 let mut st = ConsoleTestState::new(opts)?;
842 for test in filter_tests(&opts, tests) {
843 use crate::TestFn::*;
846 desc: TestDesc { name, .. },
850 let fntype = match testfn {
851 StaticTestFn(..) | DynTestFn(..) => {
855 StaticBenchFn(..) | DynBenchFn(..) => {
861 writeln!(output, "{}: {}", name, fntype)?;
862 st.write_log(format!("{} {}\n", fntype, name))?;
865 fn plural(count: u32, s: &str) -> String {
867 1 => format!("{} {}", 1, s),
868 n => format!("{} {}s", n, s),
873 if ntest != 0 || nbench != 0 {
874 writeln!(output, "")?;
880 plural(ntest, "test"),
881 plural(nbench, "benchmark")
888 // A simple console test runner
889 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
892 st: &mut ConsoleTestState,
893 out: &mut dyn OutputFormatter,
894 ) -> io::Result<()> {
895 match (*event).clone() {
896 TeFiltered(ref filtered_tests) => {
897 st.total = filtered_tests.len();
898 out.write_run_start(filtered_tests.len())
900 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
901 TeWait(ref test) => out.write_test_start(test),
902 TeTimeout(ref test) => out.write_timeout(test),
903 TeResult(test, result, stdout) => {
904 st.write_log_result(&test, &result)?;
905 out.write_result(&test, &result, &*stdout)?;
909 st.not_failures.push((test, stdout));
911 TrIgnored => st.ignored += 1,
912 TrAllowedFail => st.allowed_fail += 1,
914 st.metrics.insert_metric(
915 test.name.as_slice(),
916 bs.ns_iter_summ.median,
917 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
923 st.failures.push((test, stdout));
925 TrFailedMsg(msg) => {
927 let mut stdout = stdout;
928 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
929 st.failures.push((test, stdout));
937 let output = match term::stdout() {
938 None => Raw(io::stdout()),
939 Some(t) => Pretty(t),
942 let max_name_len = tests
944 .max_by_key(|t| len_if_padded(*t))
945 .map(|t| t.desc.name.as_slice().len())
948 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
950 let mut out: Box<dyn OutputFormatter> = match opts.format {
951 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
957 OutputFormat::Terse => Box::new(TerseFormatter::new(
963 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
965 let mut st = ConsoleTestState::new(opts)?;
966 fn len_if_padded(t: &TestDescAndFn) -> usize {
967 match t.testfn.padding() {
969 PadOnRight => t.desc.name.as_slice().len(),
973 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
975 assert!(st.current_test_count() == st.total);
977 return out.write_run_finish(&st);
981 fn should_sort_failures_before_printing_them() {
982 let test_a = TestDesc {
983 name: StaticTestName("a"),
985 should_panic: ShouldPanic::No,
989 let test_b = TestDesc {
990 name: StaticTestName("b"),
992 should_panic: ShouldPanic::No,
996 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
998 let st = ConsoleTestState {
1007 metrics: MetricMap::new(),
1008 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
1009 options: Options::new(),
1010 not_failures: Vec::new(),
1013 out.write_failures(&st).unwrap();
1014 let s = match out.output_location() {
1015 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1016 &Pretty(_) => unreachable!(),
1019 let apos = s.find("a").unwrap();
1020 let bpos = s.find("b").unwrap();
1021 assert!(apos < bpos);
1024 fn use_color(opts: &TestOpts) -> bool {
1026 AutoColor => !opts.nocapture && stdout_isatty(),
1027 AlwaysColor => true,
1028 NeverColor => false,
1033 target_os = "cloudabi",
1034 target_os = "redox",
1035 all(target_arch = "wasm32", not(target_os = "emscripten")),
1036 all(target_vendor = "fortanix", target_env = "sgx")
1038 fn stdout_isatty() -> bool {
1039 // FIXME: Implement isatty on Redox and SGX
1043 fn stdout_isatty() -> bool {
1044 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1047 fn stdout_isatty() -> bool {
1050 type HANDLE = *mut u8;
1051 type LPDWORD = *mut u32;
1052 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1054 fn GetStdHandle(which: DWORD) -> HANDLE;
1055 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1058 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1060 GetConsoleMode(handle, &mut out) != 0
1065 pub enum TestEvent {
1066 TeFiltered(Vec<TestDesc>),
1068 TeResult(TestDesc, TestResult, Vec<u8>),
1069 TeTimeout(TestDesc),
1070 TeFilteredOut(usize),
1073 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1075 struct Sink(Arc<Mutex<Vec<u8>>>);
1076 impl Write for Sink {
1077 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1078 Write::write(&mut *self.0.lock().unwrap(), data)
1080 fn flush(&mut self) -> io::Result<()> {
1085 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1087 F: FnMut(TestEvent) -> io::Result<()>,
1089 use std::collections::{self, HashMap};
1090 use std::hash::BuildHasherDefault;
1091 use std::sync::mpsc::RecvTimeoutError;
1092 // Use a deterministic hasher
1094 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1096 let tests_len = tests.len();
1098 let mut filtered_tests = filter_tests(opts, tests);
1099 if !opts.bench_benchmarks {
1100 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1103 let filtered_tests = {
1104 let mut filtered_tests = filtered_tests;
1105 for test in filtered_tests.iter_mut() {
1106 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1112 let filtered_out = tests_len - filtered_tests.len();
1113 callback(TeFilteredOut(filtered_out))?;
1115 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1117 callback(TeFiltered(filtered_descs))?;
1119 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1120 filtered_tests.into_iter().partition(|e| match e.testfn {
1121 StaticTestFn(_) | DynTestFn(_) => true,
1125 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1127 let mut remaining = filtered_tests;
1128 remaining.reverse();
1129 let mut pending = 0;
1131 let (tx, rx) = channel::<MonitorMsg>();
1133 let mut running_tests: TestMap = HashMap::default();
1135 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1136 let now = Instant::now();
1137 let timed_out = running_tests
1139 .filter_map(|(desc, timeout)| {
1140 if &now >= timeout {
1147 for test in &timed_out {
1148 running_tests.remove(test);
1153 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1154 running_tests.values().min().map(|next_timeout| {
1155 let now = Instant::now();
1156 if *next_timeout >= now {
1164 if concurrency == 1 {
1165 while !remaining.is_empty() {
1166 let test = remaining.pop().unwrap();
1167 callback(TeWait(test.desc.clone()))?;
1168 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1169 let (test, result, stdout) = rx.recv().unwrap();
1170 callback(TeResult(test, result, stdout))?;
1173 while pending > 0 || !remaining.is_empty() {
1174 while pending < concurrency && !remaining.is_empty() {
1175 let test = remaining.pop().unwrap();
1176 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1177 running_tests.insert(test.desc.clone(), timeout);
1178 callback(TeWait(test.desc.clone()))?; //here no pad
1179 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1185 if let Some(timeout) = calc_timeout(&running_tests) {
1186 res = rx.recv_timeout(timeout);
1187 for test in get_timed_out_tests(&mut running_tests) {
1188 callback(TeTimeout(test))?;
1190 if res != Err(RecvTimeoutError::Timeout) {
1194 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1199 let (desc, result, stdout) = res.unwrap();
1200 running_tests.remove(&desc);
1202 callback(TeResult(desc, result, stdout))?;
1207 if opts.bench_benchmarks {
1208 // All benchmarks run at the end, in serial.
1209 for b in filtered_benchs {
1210 callback(TeWait(b.desc.clone()))?;
1211 run_test(opts, false, b, tx.clone(), Concurrent::No);
1212 let (test, result, stdout) = rx.recv().unwrap();
1213 callback(TeResult(test, result, stdout))?;
1219 #[allow(deprecated)]
1220 fn get_concurrency() -> usize {
1221 return match env::var("RUST_TEST_THREADS") {
1223 let opt_n: Option<usize> = s.parse().ok();
1225 Some(n) if n > 0 => n,
1227 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1232 Err(..) => num_cpus(),
1236 #[allow(nonstandard_style)]
1237 fn num_cpus() -> usize {
1239 struct SYSTEM_INFO {
1240 wProcessorArchitecture: u16,
1243 lpMinimumApplicationAddress: *mut u8,
1244 lpMaximumApplicationAddress: *mut u8,
1245 dwActiveProcessorMask: *mut u8,
1246 dwNumberOfProcessors: u32,
1247 dwProcessorType: u32,
1248 dwAllocationGranularity: u32,
1249 wProcessorLevel: u16,
1250 wProcessorRevision: u16,
1253 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1256 let mut sysinfo = std::mem::zeroed();
1257 GetSystemInfo(&mut sysinfo);
1258 sysinfo.dwNumberOfProcessors as usize
1262 #[cfg(target_os = "redox")]
1263 fn num_cpus() -> usize {
1264 // FIXME: Implement num_cpus on Redox
1269 all(target_arch = "wasm32", not(target_os = "emscripten")),
1270 all(target_vendor = "fortanix", target_env = "sgx")
1272 fn num_cpus() -> usize {
1277 target_os = "android",
1278 target_os = "cloudabi",
1279 target_os = "emscripten",
1280 target_os = "fuchsia",
1282 target_os = "linux",
1283 target_os = "macos",
1284 target_os = "solaris"
1286 fn num_cpus() -> usize {
1287 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1291 target_os = "freebsd",
1292 target_os = "dragonfly",
1293 target_os = "netbsd"
1295 fn num_cpus() -> usize {
1298 let mut cpus: libc::c_uint = 0;
1299 let mut cpus_size = std::mem::size_of_val(&cpus);
1302 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1305 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1310 &mut cpus as *mut _ as *mut _,
1311 &mut cpus_size as *mut _ as *mut _,
1323 #[cfg(target_os = "openbsd")]
1324 fn num_cpus() -> usize {
1327 let mut cpus: libc::c_uint = 0;
1328 let mut cpus_size = std::mem::size_of_val(&cpus);
1329 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1335 &mut cpus as *mut _ as *mut _,
1336 &mut cpus_size as *mut _ as *mut _,
1347 #[cfg(target_os = "haiku")]
1348 fn num_cpus() -> usize {
1353 #[cfg(target_os = "l4re")]
1354 fn num_cpus() -> usize {
1360 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1361 let mut filtered = tests;
1362 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1363 let test_name = test.desc.name.as_slice();
1365 match opts.filter_exact {
1366 true => test_name == filter,
1367 false => test_name.contains(filter),
1371 // Remove tests that don't match the test filter
1372 if let Some(ref filter) = opts.filter {
1373 filtered.retain(|test| matches_filter(test, filter));
1376 // Skip tests that match any of the skip filters
1377 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
1379 // Excludes #[should_panic] tests
1380 if opts.exclude_should_panic {
1381 filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
1384 // maybe unignore tests
1385 match opts.run_ignored {
1386 RunIgnored::Yes => {
1389 .for_each(|test| test.desc.ignore = false);
1391 RunIgnored::Only => {
1392 filtered.retain(|test| test.desc.ignore);
1395 .for_each(|test| test.desc.ignore = false);
1397 RunIgnored::No => {}
1400 // Sort the tests alphabetically
1401 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1406 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1407 // convert benchmarks to tests, if we're not benchmarking them
1411 let testfn = match x.testfn {
1412 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1413 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1415 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1416 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1431 test: TestDescAndFn,
1432 monitor_ch: Sender<MonitorMsg>,
1433 concurrency: Concurrent,
1435 let TestDescAndFn { desc, testfn } = test;
1437 let ignore_because_panic_abort = cfg!(target_arch = "wasm32")
1438 && !cfg!(target_os = "emscripten")
1439 && desc.should_panic != ShouldPanic::No;
1441 if force_ignore || desc.ignore || ignore_because_panic_abort {
1442 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1448 monitor_ch: Sender<MonitorMsg>,
1450 testfn: Box<dyn FnBox() + Send>,
1451 concurrency: Concurrent,
1453 // Buffer for capturing standard I/O
1454 let data = Arc::new(Mutex::new(Vec::new()));
1455 let data2 = data.clone();
1457 let name = desc.name.clone();
1458 let runtest = move || {
1459 let oldio = if !nocapture {
1461 io::set_print(Some(Box::new(Sink(data2.clone())))),
1462 io::set_panic(Some(Box::new(Sink(data2)))),
1468 let result = catch_unwind(AssertUnwindSafe(testfn));
1470 if let Some((printio, panicio)) = oldio {
1471 io::set_print(printio);
1472 io::set_panic(panicio);
1475 let test_result = calc_result(&desc, result);
1476 let stdout = data.lock().unwrap().to_vec();
1478 .send((desc.clone(), test_result, stdout))
1482 // If the platform is single-threaded we're just going to run
1483 // the test synchronously, regardless of the concurrency
1485 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1486 if concurrency == Concurrent::Yes && supports_threads {
1487 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1488 cfg.spawn(runtest).unwrap();
1495 DynBenchFn(bencher) => {
1496 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1497 bencher.run(harness)
1500 StaticBenchFn(benchfn) => {
1501 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1502 (benchfn.clone())(harness)
1506 let cb = move || __rust_begin_short_backtrace(f);
1507 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1509 StaticTestFn(f) => run_test_inner(
1513 Box::new(move || __rust_begin_short_backtrace(f)),
1519 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1521 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1525 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1526 match (&desc.should_panic, task_result) {
1527 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1528 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1530 .downcast_ref::<String>()
1532 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1533 .map(|e| e.contains(msg))
1538 if desc.allow_fail {
1541 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1545 _ if desc.allow_fail => TrAllowedFail,
1550 #[derive(Clone, PartialEq)]
1551 pub struct MetricMap(BTreeMap<String, Metric>);
1554 pub fn new() -> MetricMap {
1555 MetricMap(BTreeMap::new())
1558 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1559 /// must be non-negative. The `noise` indicates the uncertainty of the
1560 /// metric, which doubles as the "noise range" of acceptable
1561 /// pairwise-regressions on this named value, when comparing from one
1562 /// metric to the next using `compare_to_old`.
1564 /// If `noise` is positive, then it means this metric is of a value
1565 /// you want to see grow smaller, so a change larger than `noise` in the
1566 /// positive direction represents a regression.
1568 /// If `noise` is negative, then it means this metric is of a value
1569 /// you want to see grow larger, so a change larger than `noise` in the
1570 /// negative direction represents a regression.
1571 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1572 let m = Metric { value, noise };
1573 self.0.insert(name.to_owned(), m);
1576 pub fn fmt_metrics(&self) -> String {
1580 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1581 .collect::<Vec<_>>();
1588 pub use std::hint::black_box;
1591 /// Callback for benchmark functions to run in their body.
1592 pub fn iter<T, F>(&mut self, mut inner: F)
1596 if self.mode == BenchMode::Single {
1597 ns_iter_inner(&mut inner, 1);
1601 self.summary = Some(iter(&mut inner));
1604 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1606 F: FnMut(&mut Bencher),
1609 return self.summary;
1613 fn ns_from_dur(dur: Duration) -> u64 {
1614 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1617 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1621 let start = Instant::now();
1625 return ns_from_dur(start.elapsed());
1628 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1632 // Initial bench run to get ballpark figure.
1633 let ns_single = ns_iter_inner(inner, 1);
1635 // Try to estimate iter count for 1ms falling back to 1m
1636 // iterations if first run took < 1ns.
1637 let ns_target_total = 1_000_000; // 1ms
1638 let mut n = ns_target_total / cmp::max(1, ns_single);
1640 // if the first run took more than 1ms we don't want to just
1641 // be left doing 0 iterations on every loop. The unfortunate
1642 // side effect of not being able to do as many runs is
1643 // automatically handled by the statistical analysis below
1644 // (i.e., larger error bars).
1647 let mut total_run = Duration::new(0, 0);
1648 let samples: &mut [f64] = &mut [0.0_f64; 50];
1650 let loop_start = Instant::now();
1652 for p in &mut *samples {
1653 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1656 stats::winsorize(samples, 5.0);
1657 let summ = stats::Summary::new(samples);
1659 for p in &mut *samples {
1660 let ns = ns_iter_inner(inner, 5 * n);
1661 *p = ns as f64 / (5 * n) as f64;
1664 stats::winsorize(samples, 5.0);
1665 let summ5 = stats::Summary::new(samples);
1667 let loop_run = loop_start.elapsed();
1669 // If we've run for 100ms and seem to have converged to a
1671 if loop_run > Duration::from_millis(100)
1672 && summ.median_abs_dev_pct < 1.0
1673 && summ.median - summ5.median < summ5.median_abs_dev
1678 total_run = total_run + loop_run;
1679 // Longest we ever run for is 3s.
1680 if total_run > Duration::from_secs(3) {
1684 // If we overflow here just return the results so far. We check a
1685 // multiplier of 10 because we're about to multiply by 2 and the
1686 // next iteration of the loop will also multiply by 5 (to calculate
1687 // the summ5 result)
1688 n = match n.checked_mul(10) {
1698 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1702 use std::panic::{catch_unwind, AssertUnwindSafe};
1703 use std::sync::{Arc, Mutex};
1705 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1707 F: FnMut(&mut Bencher),
1709 let mut bs = Bencher {
1710 mode: BenchMode::Auto,
1715 let data = Arc::new(Mutex::new(Vec::new()));
1716 let data2 = data.clone();
1718 let oldio = if !nocapture {
1720 io::set_print(Some(Box::new(Sink(data2.clone())))),
1721 io::set_panic(Some(Box::new(Sink(data2)))),
1727 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1729 if let Some((printio, panicio)) = oldio {
1730 io::set_print(printio);
1731 io::set_panic(panicio);
1734 let test_result = match result {
1736 Ok(Some(ns_iter_summ)) => {
1737 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1738 let mb_s = bs.bytes * 1000 / ns_iter;
1740 let bs = BenchSamples {
1742 mb_s: mb_s as usize,
1744 TestResult::TrBench(bs)
1747 // iter not called, so no data.
1748 // FIXME: error in this case?
1749 let samples: &mut [f64] = &mut [0.0_f64; 1];
1750 let bs = BenchSamples {
1751 ns_iter_summ: stats::Summary::new(samples),
1754 TestResult::TrBench(bs)
1756 Err(_) => TestResult::TrFailed,
1759 let stdout = data.lock().unwrap().to_vec();
1760 monitor_ch.send((desc, test_result, stdout)).unwrap();
1763 pub fn run_once<F>(f: F)
1765 F: FnMut(&mut Bencher),
1767 let mut bs = Bencher {
1768 mode: BenchMode::Single,
1780 filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
1781 ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1785 use crate::Concurrent;
1786 use std::sync::mpsc::channel;
1788 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
1792 name: StaticTestName("1"),
1794 should_panic: ShouldPanic::No,
1797 testfn: DynTestFn(Box::new(move || {})),
1801 name: StaticTestName("2"),
1803 should_panic: ShouldPanic::No,
1806 testfn: DynTestFn(Box::new(move || {})),
1812 pub fn do_not_run_ignored_tests() {
1816 let desc = TestDescAndFn {
1818 name: StaticTestName("whatever"),
1820 should_panic: ShouldPanic::No,
1823 testfn: DynTestFn(Box::new(f)),
1825 let (tx, rx) = channel();
1826 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1827 let (_, res, _) = rx.recv().unwrap();
1828 assert!(res != TrOk);
1832 pub fn ignored_tests_result_in_ignored() {
1834 let desc = TestDescAndFn {
1836 name: StaticTestName("whatever"),
1838 should_panic: ShouldPanic::No,
1841 testfn: DynTestFn(Box::new(f)),
1843 let (tx, rx) = channel();
1844 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1845 let (_, res, _) = rx.recv().unwrap();
1846 assert!(res == TrIgnored);
1850 fn test_should_panic() {
1854 let desc = TestDescAndFn {
1856 name: StaticTestName("whatever"),
1858 should_panic: ShouldPanic::Yes,
1861 testfn: DynTestFn(Box::new(f)),
1863 let (tx, rx) = channel();
1864 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1865 let (_, res, _) = rx.recv().unwrap();
1866 assert!(res == TrOk);
1870 fn test_should_panic_good_message() {
1872 panic!("an error message");
1874 let desc = TestDescAndFn {
1876 name: StaticTestName("whatever"),
1878 should_panic: ShouldPanic::YesWithMessage("error message"),
1881 testfn: DynTestFn(Box::new(f)),
1883 let (tx, rx) = channel();
1884 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1885 let (_, res, _) = rx.recv().unwrap();
1886 assert!(res == TrOk);
1890 fn test_should_panic_bad_message() {
1892 panic!("an error message");
1894 let expected = "foobar";
1895 let failed_msg = "Panic did not include expected string";
1896 let desc = TestDescAndFn {
1898 name: StaticTestName("whatever"),
1900 should_panic: ShouldPanic::YesWithMessage(expected),
1903 testfn: DynTestFn(Box::new(f)),
1905 let (tx, rx) = channel();
1906 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1907 let (_, res, _) = rx.recv().unwrap();
1908 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1912 fn test_should_panic_but_succeeds() {
1914 let desc = TestDescAndFn {
1916 name: StaticTestName("whatever"),
1918 should_panic: ShouldPanic::Yes,
1921 testfn: DynTestFn(Box::new(f)),
1923 let (tx, rx) = channel();
1924 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1925 let (_, res, _) = rx.recv().unwrap();
1926 assert!(res == TrFailed);
1930 fn parse_ignored_flag() {
1932 "progname".to_string(),
1933 "filter".to_string(),
1934 "--ignored".to_string(),
1936 let opts = parse_opts(&args).unwrap().unwrap();
1937 assert_eq!(opts.run_ignored, RunIgnored::Only);
1941 fn parse_include_ignored_flag() {
1943 "progname".to_string(),
1944 "filter".to_string(),
1945 "-Zunstable-options".to_string(),
1946 "--include-ignored".to_string(),
1948 let opts = parse_opts(&args).unwrap().unwrap();
1949 assert_eq!(opts.run_ignored, RunIgnored::Yes);
1953 pub fn filter_for_ignored_option() {
1954 // When we run ignored tests the test filter should filter out all the
1955 // unignored tests and flip the ignore flag on the rest to false
1957 let mut opts = TestOpts::new();
1958 opts.run_tests = true;
1959 opts.run_ignored = RunIgnored::Only;
1961 let tests = one_ignored_one_unignored_test();
1962 let filtered = filter_tests(&opts, tests);
1964 assert_eq!(filtered.len(), 1);
1965 assert_eq!(filtered[0].desc.name.to_string(), "1");
1966 assert!(!filtered[0].desc.ignore);
1970 pub fn run_include_ignored_option() {
1971 // When we "--include-ignored" tests, the ignore flag should be set to false on
1972 // all tests and no test filtered out
1974 let mut opts = TestOpts::new();
1975 opts.run_tests = true;
1976 opts.run_ignored = RunIgnored::Yes;
1978 let tests = one_ignored_one_unignored_test();
1979 let filtered = filter_tests(&opts, tests);
1981 assert_eq!(filtered.len(), 2);
1982 assert!(!filtered[0].desc.ignore);
1983 assert!(!filtered[1].desc.ignore);
1987 pub fn exclude_should_panic_option() {
1988 let mut opts = TestOpts::new();
1989 opts.run_tests = true;
1990 opts.exclude_should_panic = true;
1992 let mut tests = one_ignored_one_unignored_test();
1993 tests.push(TestDescAndFn {
1995 name: StaticTestName("3"),
1997 should_panic: ShouldPanic::Yes,
2000 testfn: DynTestFn(Box::new(move || {})),
2003 let filtered = filter_tests(&opts, tests);
2005 assert_eq!(filtered.len(), 2);
2006 assert!(filtered.iter().all(|test| test.desc.should_panic == ShouldPanic::No));
2010 pub fn exact_filter_match() {
2011 fn tests() -> Vec<TestDescAndFn> {
2012 vec!["base", "base::test", "base::test1", "base::test2"]
2014 .map(|name| TestDescAndFn {
2016 name: StaticTestName(name),
2018 should_panic: ShouldPanic::No,
2021 testfn: DynTestFn(Box::new(move || {})),
2026 let substr = filter_tests(
2028 filter: Some("base".into()),
2033 assert_eq!(substr.len(), 4);
2035 let substr = filter_tests(
2037 filter: Some("bas".into()),
2042 assert_eq!(substr.len(), 4);
2044 let substr = filter_tests(
2046 filter: Some("::test".into()),
2051 assert_eq!(substr.len(), 3);
2053 let substr = filter_tests(
2055 filter: Some("base::test".into()),
2060 assert_eq!(substr.len(), 3);
2062 let exact = filter_tests(
2064 filter: Some("base".into()),
2070 assert_eq!(exact.len(), 1);
2072 let exact = filter_tests(
2074 filter: Some("bas".into()),
2080 assert_eq!(exact.len(), 0);
2082 let exact = filter_tests(
2084 filter: Some("::test".into()),
2090 assert_eq!(exact.len(), 0);
2092 let exact = filter_tests(
2094 filter: Some("base::test".into()),
2100 assert_eq!(exact.len(), 1);
2104 pub fn sort_tests() {
2105 let mut opts = TestOpts::new();
2106 opts.run_tests = true;
2109 "sha1::test".to_string(),
2110 "isize::test_to_str".to_string(),
2111 "isize::test_pow".to_string(),
2112 "test::do_not_run_ignored_tests".to_string(),
2113 "test::ignored_tests_result_in_ignored".to_string(),
2114 "test::first_free_arg_should_be_a_filter".to_string(),
2115 "test::parse_ignored_flag".to_string(),
2116 "test::parse_include_ignored_flag".to_string(),
2117 "test::filter_for_ignored_option".to_string(),
2118 "test::run_include_ignored_option".to_string(),
2119 "test::sort_tests".to_string(),
2123 let mut tests = Vec::new();
2124 for name in &names {
2125 let test = TestDescAndFn {
2127 name: DynTestName((*name).clone()),
2129 should_panic: ShouldPanic::No,
2132 testfn: DynTestFn(Box::new(testfn)),
2138 let filtered = filter_tests(&opts, tests);
2140 let expected = vec![
2141 "isize::test_pow".to_string(),
2142 "isize::test_to_str".to_string(),
2143 "sha1::test".to_string(),
2144 "test::do_not_run_ignored_tests".to_string(),
2145 "test::filter_for_ignored_option".to_string(),
2146 "test::first_free_arg_should_be_a_filter".to_string(),
2147 "test::ignored_tests_result_in_ignored".to_string(),
2148 "test::parse_ignored_flag".to_string(),
2149 "test::parse_include_ignored_flag".to_string(),
2150 "test::run_include_ignored_option".to_string(),
2151 "test::sort_tests".to_string(),
2154 for (a, b) in expected.iter().zip(filtered) {
2155 assert!(*a == b.desc.name.to_string());
2160 pub fn test_metricmap_compare() {
2161 let mut m1 = MetricMap::new();
2162 let mut m2 = MetricMap::new();
2163 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2164 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2166 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2167 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2169 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2170 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2172 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2173 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2175 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2176 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2178 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2179 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2183 pub fn test_bench_once_no_iter() {
2184 fn f(_: &mut Bencher) {}
2189 pub fn test_bench_once_iter() {
2190 fn f(b: &mut Bencher) {
2197 pub fn test_bench_no_iter() {
2198 fn f(_: &mut Bencher) {}
2200 let (tx, rx) = channel();
2202 let desc = TestDesc {
2203 name: StaticTestName("f"),
2205 should_panic: ShouldPanic::No,
2209 crate::bench::benchmark(desc, tx, true, f);
2214 pub fn test_bench_iter() {
2215 fn f(b: &mut Bencher) {
2219 let (tx, rx) = channel();
2221 let desc = TestDesc {
2222 name: StaticTestName("f"),
2224 should_panic: ShouldPanic::No,
2228 crate::bench::benchmark(desc, tx, true, f);