1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![deny(rust_2018_idioms)]
21 #![crate_name = "test"]
22 #![unstable(feature = "test", issue = "27812")]
23 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
26 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
28 #![feature(set_stdio)]
29 #![feature(panic_unwind)]
30 #![feature(staged_api)]
31 #![feature(termination_trait_lib)]
35 #[cfg(any(unix, target_os = "cloudabi"))]
39 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
40 // on aarch64-pc-windows-msvc, so we don't link libtest against
41 // libunwind (for the time being), even though it means that
42 // libtest won't be fully functional on this platform.
44 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
45 #[cfg(not(all(windows, target_arch = "aarch64")))]
46 extern crate panic_unwind;
48 pub use self::ColorConfig::*;
49 use self::NamePadding::*;
50 use self::OutputLocation::*;
51 use self::TestEvent::*;
52 pub use self::TestFn::*;
53 pub use self::TestName::*;
54 pub use self::TestResult::*;
58 use std::boxed::FnBox;
60 use std::collections::BTreeMap;
65 use std::io::prelude::*;
66 use std::panic::{catch_unwind, AssertUnwindSafe};
67 use std::path::PathBuf;
69 use std::process::Termination;
70 use std::sync::mpsc::{channel, Sender};
71 use std::sync::{Arc, Mutex};
73 use std::time::{Duration, Instant};
75 const TEST_WARN_TIMEOUT_S: u64 = 60;
76 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
78 // to be used by rustc to compile tests in libtest
81 assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
82 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
83 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName, TestOpts,
84 TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk,
91 use crate::formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
93 /// Whether to execute tests concurrently or not
94 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
100 // The name of a test. By convention this follows the rules for rust
101 // paths; i.e., it should be a series of identifiers separated by double
102 // colons. This way if some test runner wants to arrange the tests
103 // hierarchically it may.
105 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
107 StaticTestName(&'static str),
109 AlignedTestName(Cow<'static, str>, NamePadding),
112 fn as_slice(&self) -> &str {
114 StaticTestName(s) => s,
115 DynTestName(ref s) => s,
116 AlignedTestName(ref s, _) => &*s,
120 fn padding(&self) -> NamePadding {
122 &AlignedTestName(_, p) => p,
127 fn with_padding(&self, padding: NamePadding) -> TestName {
128 let name = match self {
129 &TestName::StaticTestName(name) => Cow::Borrowed(name),
130 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
131 &TestName::AlignedTestName(ref name, _) => name.clone(),
134 TestName::AlignedTestName(name, padding)
137 impl fmt::Display for TestName {
138 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
139 fmt::Display::fmt(self.as_slice(), f)
143 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
144 pub enum NamePadding {
150 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
151 let mut name = String::from(self.name.as_slice());
152 let fill = column_count.saturating_sub(name.len());
153 let pad = " ".repeat(fill);
164 /// Represents a benchmark function.
165 pub trait TDynBenchFn: Send {
166 fn run(&self, harness: &mut Bencher);
169 // A function that runs a test. If the function returns successfully,
170 // the test succeeds; if the function panics then the test fails. We
171 // may need to come up with a more clever definition of test in order
172 // to support isolation of tests into threads.
175 StaticBenchFn(fn(&mut Bencher)),
176 DynTestFn(Box<dyn FnBox() + Send>),
177 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
181 fn padding(&self) -> NamePadding {
183 StaticTestFn(..) => PadNone,
184 StaticBenchFn(..) => PadOnRight,
185 DynTestFn(..) => PadNone,
186 DynBenchFn(..) => PadOnRight,
191 impl fmt::Debug for TestFn {
192 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
193 f.write_str(match *self {
194 StaticTestFn(..) => "StaticTestFn(..)",
195 StaticBenchFn(..) => "StaticBenchFn(..)",
196 DynTestFn(..) => "DynTestFn(..)",
197 DynBenchFn(..) => "DynBenchFn(..)",
202 /// Manager of the benchmarking runs.
204 /// This is fed into functions marked with `#[bench]` to allow for
205 /// set-up & tear-down before running a piece of code repeatedly via a
210 summary: Option<stats::Summary>,
214 #[derive(Clone, PartialEq, Eq)]
220 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
221 pub enum ShouldPanic {
224 YesWithMessage(&'static str),
227 // The definition of a single test. A test runner will run a list of
229 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
230 pub struct TestDesc {
233 pub should_panic: ShouldPanic,
234 pub allow_fail: bool,
238 pub struct TestDescAndFn {
243 #[derive(Clone, PartialEq, Debug, Copy)]
250 pub fn new(value: f64, noise: f64) -> Metric {
251 Metric { value, noise }
255 /// In case we want to add other options as well, just add them in this struct.
256 #[derive(Copy, Clone, Debug)]
258 display_output: bool,
262 pub fn new() -> Options {
264 display_output: false,
268 pub fn display_output(mut self, display_output: bool) -> Options {
269 self.display_output = display_output;
274 // The default console test runner. It accepts the command line
275 // arguments and a vector of test_descs.
276 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
277 let mut opts = match parse_opts(args) {
280 eprintln!("error: {}", msg);
286 opts.options = options;
288 if let Err(e) = list_tests_console(&opts, tests) {
289 eprintln!("error: io error when listing tests: {:?}", e);
293 match run_tests_console(&opts, tests) {
295 Ok(false) => process::exit(101),
297 eprintln!("error: io error when listing tests: {:?}", e);
304 // A variant optimized for invocation with a static test vector.
305 // This will panic (intentionally) when fed any dynamic tests, because
306 // it is copying the static values out into a dynamic vector and cannot
307 // copy dynamic values. It is doing this because from this point on
308 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
309 // semantics into parallel test runners, which in turn requires a Vec<>
310 // rather than a &[].
311 pub fn test_main_static(tests: &[&TestDescAndFn]) {
312 let args = env::args().collect::<Vec<_>>();
313 let owned_tests = tests
315 .map(|t| match t.testfn {
316 StaticTestFn(f) => TestDescAndFn {
317 testfn: StaticTestFn(f),
318 desc: t.desc.clone(),
320 StaticBenchFn(f) => TestDescAndFn {
321 testfn: StaticBenchFn(f),
322 desc: t.desc.clone(),
324 _ => panic!("non-static tests passed to test::test_main_static"),
327 test_main(&args, owned_tests, Options::new())
330 /// Invoked when unit tests terminate. Should panic if the unit
331 /// Tests is considered a failure. By default, invokes `report()`
332 /// and checks for a `0` result.
333 pub fn assert_test_result<T: Termination>(result: T) {
334 let code = result.report();
337 "the test returned a termination value with a non-zero status code ({}) \
338 which indicates a failure",
343 #[derive(Copy, Clone, Debug)]
344 pub enum ColorConfig {
350 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
351 pub enum OutputFormat {
357 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
358 pub enum RunIgnored {
365 pub struct TestOpts {
367 pub filter: Option<String>,
368 pub filter_exact: bool,
369 pub run_ignored: RunIgnored,
371 pub bench_benchmarks: bool,
372 pub logfile: Option<PathBuf>,
374 pub color: ColorConfig,
375 pub format: OutputFormat,
376 pub test_threads: Option<usize>,
377 pub skip: Vec<String>,
378 pub options: Options,
383 fn new() -> TestOpts {
388 run_ignored: RunIgnored::No,
390 bench_benchmarks: false,
394 format: OutputFormat::Pretty,
397 options: Options::new(),
402 /// Result of parsing the options.
403 pub type OptRes = Result<TestOpts, String>;
405 fn optgroups() -> getopts::Options {
406 let mut opts = getopts::Options::new();
407 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
408 .optflag("", "ignored", "Run only ignored tests")
409 .optflag("", "test", "Run tests and not benchmarks")
410 .optflag("", "bench", "Run benchmarks instead of tests")
411 .optflag("", "list", "List all tests and benchmarks")
412 .optflag("h", "help", "Display this message (longer with --help)")
416 "Write logs to the specified file instead \
423 "don't capture stdout/stderr of each \
424 task, allow printing directly",
429 "Number of threads used for running tests \
436 "Skip tests whose names contain FILTER (this flag can \
437 be used multiple times)",
443 "Display one character per test instead of one line. \
444 Alias to --format=terse",
449 "Exactly match filters rather than by substring",
454 "Configure coloring of output:
455 auto = colorize if stdout is a tty and tests are run on serially (default);
456 always = always colorize output;
457 never = never colorize output;",
463 "Configure formatting of output:
464 pretty = Print verbose output;
465 terse = Display one character per test;
466 json = Output a json document",
472 "Enable nightly-only flags:
473 unstable-options = Allow use of experimental features",
479 fn usage(binary: &str, options: &getopts::Options) {
480 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
484 The FILTER string is tested against the name of all tests, and only those
485 tests whose names contain the filter are run.
487 By default, all tests are run in parallel. This can be altered with the
488 --test-threads flag or the RUST_TEST_THREADS environment variable when running
491 All tests have their standard output and standard error captured by default.
492 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
493 environment variable to a value other than "0". Logging is not captured by default.
497 #[test] - Indicates a function is a test to be run. This function
499 #[bench] - Indicates a function is a benchmark to be run. This
500 function takes one argument (test::Bencher).
501 #[should_panic] - This function (also labeled with #[test]) will only pass if
502 the code causes a panic (an assertion failure or panic!)
503 A message may be provided, which the failure string must
504 contain: #[should_panic(expected = "foo")].
505 #[ignore] - When applied to a function which is already attributed as a
506 test, then the test runner will ignore these tests during
507 normal test runs. Running with --ignored or --include-ignored will run
509 usage = options.usage(&message)
513 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
514 fn is_nightly() -> bool {
515 // Whether this is a feature-staged build, i.e., on the beta or stable channel
516 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
517 // Whether we should enable unstable features for bootstrapping
518 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
520 bootstrap || !disable_unstable_features
523 // Parses command line arguments into test options
524 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
525 let mut allow_unstable = false;
526 let opts = optgroups();
527 let args = args.get(1..).unwrap_or(args);
528 let matches = match opts.parse(args) {
530 Err(f) => return Some(Err(f.to_string())),
533 if let Some(opt) = matches.opt_str("Z") {
536 "the option `Z` is only accepted on the nightly compiler".into(),
541 "unstable-options" => {
542 allow_unstable = true;
545 return Some(Err("Unrecognized option to `Z`".into()));
550 if matches.opt_present("h") {
551 usage(&args[0], &opts);
555 let filter = if !matches.free.is_empty() {
556 Some(matches.free[0].clone())
561 let include_ignored = matches.opt_present("include-ignored");
562 if !allow_unstable && include_ignored {
564 "The \"include-ignored\" flag is only accepted on the nightly compiler".into(),
568 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
571 "the options --include-ignored and --ignored are mutually exclusive".into(),
574 (true, false) => RunIgnored::Yes,
575 (false, true) => RunIgnored::Only,
576 (false, false) => RunIgnored::No,
578 let quiet = matches.opt_present("quiet");
579 let exact = matches.opt_present("exact");
580 let list = matches.opt_present("list");
582 let logfile = matches.opt_str("logfile");
583 let logfile = logfile.map(|s| PathBuf::from(&s));
585 let bench_benchmarks = matches.opt_present("bench");
586 let run_tests = !bench_benchmarks || matches.opt_present("test");
588 let mut nocapture = matches.opt_present("nocapture");
590 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
591 Ok(val) => &val != "0",
596 let test_threads = match matches.opt_str("test-threads") {
597 Some(n_str) => match n_str.parse::<usize>() {
598 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
601 return Some(Err(format!(
602 "argument for --test-threads must be a number > 0 \
611 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
612 Some("auto") | None => AutoColor,
613 Some("always") => AlwaysColor,
614 Some("never") => NeverColor,
617 return Some(Err(format!(
618 "argument for --color must be auto, always, or never (was \
625 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
626 None if quiet => OutputFormat::Terse,
627 Some("pretty") | None => OutputFormat::Pretty,
628 Some("terse") => OutputFormat::Terse,
632 "The \"json\" format is only accepted on the nightly compiler".into(),
639 return Some(Err(format!(
640 "argument for --format must be pretty, terse, or json (was \
647 let test_opts = TestOpts {
659 skip: matches.opt_strs("skip"),
660 options: Options::new(),
666 #[derive(Clone, PartialEq)]
667 pub struct BenchSamples {
668 ns_iter_summ: stats::Summary,
672 #[derive(Clone, PartialEq)]
673 pub enum TestResult {
679 TrBench(BenchSamples),
682 unsafe impl Send for TestResult {}
684 enum OutputLocation<T> {
685 Pretty(Box<term::StdoutTerminal>),
689 impl<T: Write> Write for OutputLocation<T> {
690 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
692 Pretty(ref mut term) => term.write(buf),
693 Raw(ref mut stdout) => stdout.write(buf),
697 fn flush(&mut self) -> io::Result<()> {
699 Pretty(ref mut term) => term.flush(),
700 Raw(ref mut stdout) => stdout.flush(),
705 struct ConsoleTestState {
706 log_out: Option<File>,
715 failures: Vec<(TestDesc, Vec<u8>)>,
716 not_failures: Vec<(TestDesc, Vec<u8>)>,
720 impl ConsoleTestState {
721 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
722 let log_out = match opts.logfile {
723 Some(ref path) => Some(File::create(path)?),
727 Ok(ConsoleTestState {
736 metrics: MetricMap::new(),
737 failures: Vec::new(),
738 not_failures: Vec::new(),
739 options: opts.options,
743 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
744 let msg = msg.as_ref();
747 Some(ref mut o) => o.write_all(msg.as_bytes()),
751 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
752 self.write_log(format!(
755 TrOk => "ok".to_owned(),
756 TrFailed => "failed".to_owned(),
757 TrFailedMsg(ref msg) => format!("failed: {}", msg),
758 TrIgnored => "ignored".to_owned(),
759 TrAllowedFail => "failed (allowed)".to_owned(),
760 TrBench(ref bs) => fmt_bench_samples(bs),
766 fn current_test_count(&self) -> usize {
767 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
771 // Format a number with thousands separators
772 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
774 let mut output = String::new();
775 let mut trailing = false;
776 for &pow in &[9, 6, 3, 0] {
777 let base = 10_usize.pow(pow);
778 if pow == 0 || trailing || n / base != 0 {
780 output.write_fmt(format_args!("{}", n / base)).unwrap();
782 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
795 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
797 let mut output = String::new();
799 let median = bs.ns_iter_summ.median as usize;
800 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
803 .write_fmt(format_args!(
804 "{:>11} ns/iter (+/- {})",
805 fmt_thousands_sep(median, ','),
806 fmt_thousands_sep(deviation, ',')
811 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
817 // List the tests to console, and optionally to logfile. Filters are honored.
818 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
819 let mut output = match term::stdout() {
820 None => Raw(io::stdout()),
821 Some(t) => Pretty(t),
824 let quiet = opts.format == OutputFormat::Terse;
825 let mut st = ConsoleTestState::new(opts)?;
830 for test in filter_tests(&opts, tests) {
831 use crate::TestFn::*;
834 desc: TestDesc { name, .. },
838 let fntype = match testfn {
839 StaticTestFn(..) | DynTestFn(..) => {
843 StaticBenchFn(..) | DynBenchFn(..) => {
849 writeln!(output, "{}: {}", name, fntype)?;
850 st.write_log(format!("{} {}\n", fntype, name))?;
853 fn plural(count: u32, s: &str) -> String {
855 1 => format!("{} {}", 1, s),
856 n => format!("{} {}s", n, s),
861 if ntest != 0 || nbench != 0 {
862 writeln!(output, "")?;
868 plural(ntest, "test"),
869 plural(nbench, "benchmark")
876 // A simple console test runner
877 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
880 st: &mut ConsoleTestState,
881 out: &mut dyn OutputFormatter,
882 ) -> io::Result<()> {
883 match (*event).clone() {
884 TeFiltered(ref filtered_tests) => {
885 st.total = filtered_tests.len();
886 out.write_run_start(filtered_tests.len())
888 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
889 TeWait(ref test) => out.write_test_start(test),
890 TeTimeout(ref test) => out.write_timeout(test),
891 TeResult(test, result, stdout) => {
892 st.write_log_result(&test, &result)?;
893 out.write_result(&test, &result, &*stdout)?;
897 st.not_failures.push((test, stdout));
899 TrIgnored => st.ignored += 1,
900 TrAllowedFail => st.allowed_fail += 1,
902 st.metrics.insert_metric(
903 test.name.as_slice(),
904 bs.ns_iter_summ.median,
905 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
911 st.failures.push((test, stdout));
913 TrFailedMsg(msg) => {
915 let mut stdout = stdout;
916 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
917 st.failures.push((test, stdout));
925 let output = match term::stdout() {
926 None => Raw(io::stdout()),
927 Some(t) => Pretty(t),
930 let max_name_len = tests
932 .max_by_key(|t| len_if_padded(*t))
933 .map(|t| t.desc.name.as_slice().len())
936 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
938 let mut out: Box<dyn OutputFormatter> = match opts.format {
939 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
945 OutputFormat::Terse => Box::new(TerseFormatter::new(
951 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
953 let mut st = ConsoleTestState::new(opts)?;
954 fn len_if_padded(t: &TestDescAndFn) -> usize {
955 match t.testfn.padding() {
957 PadOnRight => t.desc.name.as_slice().len(),
961 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
963 assert!(st.current_test_count() == st.total);
965 return out.write_run_finish(&st);
969 fn should_sort_failures_before_printing_them() {
970 let test_a = TestDesc {
971 name: StaticTestName("a"),
973 should_panic: ShouldPanic::No,
977 let test_b = TestDesc {
978 name: StaticTestName("b"),
980 should_panic: ShouldPanic::No,
984 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
986 let st = ConsoleTestState {
995 metrics: MetricMap::new(),
996 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
997 options: Options::new(),
998 not_failures: Vec::new(),
1001 out.write_failures(&st).unwrap();
1002 let s = match out.output_location() {
1003 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1004 &Pretty(_) => unreachable!(),
1007 let apos = s.find("a").unwrap();
1008 let bpos = s.find("b").unwrap();
1009 assert!(apos < bpos);
1012 fn use_color(opts: &TestOpts) -> bool {
1014 AutoColor => !opts.nocapture && stdout_isatty(),
1015 AlwaysColor => true,
1016 NeverColor => false,
1021 target_os = "cloudabi",
1022 target_os = "redox",
1023 all(target_arch = "wasm32", not(target_os = "emscripten")),
1024 all(target_vendor = "fortanix", target_env = "sgx")
1026 fn stdout_isatty() -> bool {
1027 // FIXME: Implement isatty on Redox and SGX
1031 fn stdout_isatty() -> bool {
1032 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1035 fn stdout_isatty() -> bool {
1038 type HANDLE = *mut u8;
1039 type LPDWORD = *mut u32;
1040 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1042 fn GetStdHandle(which: DWORD) -> HANDLE;
1043 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1046 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1048 GetConsoleMode(handle, &mut out) != 0
1053 pub enum TestEvent {
1054 TeFiltered(Vec<TestDesc>),
1056 TeResult(TestDesc, TestResult, Vec<u8>),
1057 TeTimeout(TestDesc),
1058 TeFilteredOut(usize),
1061 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1063 struct Sink(Arc<Mutex<Vec<u8>>>);
1064 impl Write for Sink {
1065 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1066 Write::write(&mut *self.0.lock().unwrap(), data)
1068 fn flush(&mut self) -> io::Result<()> {
1073 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1075 F: FnMut(TestEvent) -> io::Result<()>,
1077 use std::collections::{self, HashMap};
1078 use std::hash::BuildHasherDefault;
1079 use std::sync::mpsc::RecvTimeoutError;
1080 // Use a deterministic hasher
1082 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1084 let tests_len = tests.len();
1086 let mut filtered_tests = filter_tests(opts, tests);
1087 if !opts.bench_benchmarks {
1088 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1091 let filtered_tests = {
1092 let mut filtered_tests = filtered_tests;
1093 for test in filtered_tests.iter_mut() {
1094 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1100 let filtered_out = tests_len - filtered_tests.len();
1101 callback(TeFilteredOut(filtered_out))?;
1103 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1105 callback(TeFiltered(filtered_descs))?;
1107 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1108 filtered_tests.into_iter().partition(|e| match e.testfn {
1109 StaticTestFn(_) | DynTestFn(_) => true,
1113 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1115 let mut remaining = filtered_tests;
1116 remaining.reverse();
1117 let mut pending = 0;
1119 let (tx, rx) = channel::<MonitorMsg>();
1121 let mut running_tests: TestMap = HashMap::default();
1123 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1124 let now = Instant::now();
1125 let timed_out = running_tests
1127 .filter_map(|(desc, timeout)| {
1128 if &now >= timeout {
1135 for test in &timed_out {
1136 running_tests.remove(test);
1141 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1142 running_tests.values().min().map(|next_timeout| {
1143 let now = Instant::now();
1144 if *next_timeout >= now {
1152 if concurrency == 1 {
1153 while !remaining.is_empty() {
1154 let test = remaining.pop().unwrap();
1155 callback(TeWait(test.desc.clone()))?;
1156 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1157 let (test, result, stdout) = rx.recv().unwrap();
1158 callback(TeResult(test, result, stdout))?;
1161 while pending > 0 || !remaining.is_empty() {
1162 while pending < concurrency && !remaining.is_empty() {
1163 let test = remaining.pop().unwrap();
1164 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1165 running_tests.insert(test.desc.clone(), timeout);
1166 callback(TeWait(test.desc.clone()))?; //here no pad
1167 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1173 if let Some(timeout) = calc_timeout(&running_tests) {
1174 res = rx.recv_timeout(timeout);
1175 for test in get_timed_out_tests(&mut running_tests) {
1176 callback(TeTimeout(test))?;
1178 if res != Err(RecvTimeoutError::Timeout) {
1182 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1187 let (desc, result, stdout) = res.unwrap();
1188 running_tests.remove(&desc);
1190 callback(TeResult(desc, result, stdout))?;
1195 if opts.bench_benchmarks {
1196 // All benchmarks run at the end, in serial.
1197 for b in filtered_benchs {
1198 callback(TeWait(b.desc.clone()))?;
1199 run_test(opts, false, b, tx.clone(), Concurrent::No);
1200 let (test, result, stdout) = rx.recv().unwrap();
1201 callback(TeResult(test, result, stdout))?;
1207 #[allow(deprecated)]
1208 fn get_concurrency() -> usize {
1209 return match env::var("RUST_TEST_THREADS") {
1211 let opt_n: Option<usize> = s.parse().ok();
1213 Some(n) if n > 0 => n,
1215 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1220 Err(..) => num_cpus(),
1224 #[allow(nonstandard_style)]
1225 fn num_cpus() -> usize {
1227 struct SYSTEM_INFO {
1228 wProcessorArchitecture: u16,
1231 lpMinimumApplicationAddress: *mut u8,
1232 lpMaximumApplicationAddress: *mut u8,
1233 dwActiveProcessorMask: *mut u8,
1234 dwNumberOfProcessors: u32,
1235 dwProcessorType: u32,
1236 dwAllocationGranularity: u32,
1237 wProcessorLevel: u16,
1238 wProcessorRevision: u16,
1241 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1244 let mut sysinfo = std::mem::zeroed();
1245 GetSystemInfo(&mut sysinfo);
1246 sysinfo.dwNumberOfProcessors as usize
1250 #[cfg(target_os = "redox")]
1251 fn num_cpus() -> usize {
1252 // FIXME: Implement num_cpus on Redox
1257 all(target_arch = "wasm32", not(target_os = "emscripten")),
1258 all(target_vendor = "fortanix", target_env = "sgx")
1260 fn num_cpus() -> usize {
1265 target_os = "android",
1266 target_os = "cloudabi",
1267 target_os = "emscripten",
1268 target_os = "fuchsia",
1270 target_os = "linux",
1271 target_os = "macos",
1272 target_os = "solaris"
1274 fn num_cpus() -> usize {
1275 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1279 target_os = "freebsd",
1280 target_os = "dragonfly",
1281 target_os = "bitrig",
1282 target_os = "netbsd"
1284 fn num_cpus() -> usize {
1287 let mut cpus: libc::c_uint = 0;
1288 let mut cpus_size = std::mem::size_of_val(&cpus);
1291 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1294 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1299 &mut cpus as *mut _ as *mut _,
1300 &mut cpus_size as *mut _ as *mut _,
1312 #[cfg(target_os = "openbsd")]
1313 fn num_cpus() -> usize {
1316 let mut cpus: libc::c_uint = 0;
1317 let mut cpus_size = std::mem::size_of_val(&cpus);
1318 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1324 &mut cpus as *mut _ as *mut _,
1325 &mut cpus_size as *mut _ as *mut _,
1336 #[cfg(target_os = "haiku")]
1337 fn num_cpus() -> usize {
1342 #[cfg(target_os = "l4re")]
1343 fn num_cpus() -> usize {
1349 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1350 let mut filtered = tests;
1351 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1352 let test_name = test.desc.name.as_slice();
1354 match opts.filter_exact {
1355 true => test_name == filter,
1356 false => test_name.contains(filter),
1360 // Remove tests that don't match the test filter
1361 if let Some(ref filter) = opts.filter {
1362 filtered.retain(|test| matches_filter(test, filter));
1365 // Skip tests that match any of the skip filters
1366 filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
1368 // maybe unignore tests
1369 match opts.run_ignored {
1370 RunIgnored::Yes => {
1373 .for_each(|test| test.desc.ignore = false);
1375 RunIgnored::Only => {
1376 filtered.retain(|test| test.desc.ignore);
1379 .for_each(|test| test.desc.ignore = false);
1381 RunIgnored::No => {}
1384 // Sort the tests alphabetically
1385 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1390 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1391 // convert benchmarks to tests, if we're not benchmarking them
1395 let testfn = match x.testfn {
1396 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1397 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1399 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1400 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1415 test: TestDescAndFn,
1416 monitor_ch: Sender<MonitorMsg>,
1417 concurrency: Concurrent,
1419 let TestDescAndFn { desc, testfn } = test;
1421 let ignore_because_panic_abort = cfg!(target_arch = "wasm32")
1422 && !cfg!(target_os = "emscripten")
1423 && desc.should_panic != ShouldPanic::No;
1425 if force_ignore || desc.ignore || ignore_because_panic_abort {
1426 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1432 monitor_ch: Sender<MonitorMsg>,
1434 testfn: Box<dyn FnBox() + Send>,
1435 concurrency: Concurrent,
1437 // Buffer for capturing standard I/O
1438 let data = Arc::new(Mutex::new(Vec::new()));
1439 let data2 = data.clone();
1441 let name = desc.name.clone();
1442 let runtest = move || {
1443 let oldio = if !nocapture {
1445 io::set_print(Some(Box::new(Sink(data2.clone())))),
1446 io::set_panic(Some(Box::new(Sink(data2)))),
1452 let result = catch_unwind(AssertUnwindSafe(testfn));
1454 if let Some((printio, panicio)) = oldio {
1455 io::set_print(printio);
1456 io::set_panic(panicio);
1459 let test_result = calc_result(&desc, result);
1460 let stdout = data.lock().unwrap().to_vec();
1462 .send((desc.clone(), test_result, stdout))
1466 // If the platform is single-threaded we're just going to run
1467 // the test synchronously, regardless of the concurrency
1469 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1470 if concurrency == Concurrent::Yes && supports_threads {
1471 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1472 cfg.spawn(runtest).unwrap();
1479 DynBenchFn(bencher) => {
1480 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1481 bencher.run(harness)
1484 StaticBenchFn(benchfn) => {
1485 crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1486 (benchfn.clone())(harness)
1490 let cb = move || __rust_begin_short_backtrace(f);
1491 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1493 StaticTestFn(f) => run_test_inner(
1497 Box::new(move || __rust_begin_short_backtrace(f)),
1503 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1505 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1509 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1510 match (&desc.should_panic, task_result) {
1511 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1512 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1514 .downcast_ref::<String>()
1516 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1517 .map(|e| e.contains(msg))
1522 if desc.allow_fail {
1525 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1529 _ if desc.allow_fail => TrAllowedFail,
1534 #[derive(Clone, PartialEq)]
1535 pub struct MetricMap(BTreeMap<String, Metric>);
1538 pub fn new() -> MetricMap {
1539 MetricMap(BTreeMap::new())
1542 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1543 /// must be non-negative. The `noise` indicates the uncertainty of the
1544 /// metric, which doubles as the "noise range" of acceptable
1545 /// pairwise-regressions on this named value, when comparing from one
1546 /// metric to the next using `compare_to_old`.
1548 /// If `noise` is positive, then it means this metric is of a value
1549 /// you want to see grow smaller, so a change larger than `noise` in the
1550 /// positive direction represents a regression.
1552 /// If `noise` is negative, then it means this metric is of a value
1553 /// you want to see grow larger, so a change larger than `noise` in the
1554 /// negative direction represents a regression.
1555 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1556 let m = Metric { value, noise };
1557 self.0.insert(name.to_owned(), m);
1560 pub fn fmt_metrics(&self) -> String {
1564 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1565 .collect::<Vec<_>>();
1572 /// A function that is opaque to the optimizer, to allow benchmarks to
1573 /// pretend to use outputs to assist in avoiding dead-code
1576 /// This function is a no-op, and does not even read from `dummy`.
1577 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1578 pub fn black_box<T>(dummy: T) -> T {
1579 // we need to "use" the argument in some way LLVM can't
1581 unsafe { asm!("" : : "r"(&dummy)) }
1584 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1586 pub fn black_box<T>(dummy: T) -> T {
1591 /// Callback for benchmark functions to run in their body.
1592 pub fn iter<T, F>(&mut self, mut inner: F)
1596 if self.mode == BenchMode::Single {
1597 ns_iter_inner(&mut inner, 1);
1601 self.summary = Some(iter(&mut inner));
1604 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1606 F: FnMut(&mut Bencher),
1609 return self.summary;
1613 fn ns_from_dur(dur: Duration) -> u64 {
1614 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1617 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1621 let start = Instant::now();
1625 return ns_from_dur(start.elapsed());
1628 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1632 // Initial bench run to get ballpark figure.
1633 let ns_single = ns_iter_inner(inner, 1);
1635 // Try to estimate iter count for 1ms falling back to 1m
1636 // iterations if first run took < 1ns.
1637 let ns_target_total = 1_000_000; // 1ms
1638 let mut n = ns_target_total / cmp::max(1, ns_single);
1640 // if the first run took more than 1ms we don't want to just
1641 // be left doing 0 iterations on every loop. The unfortunate
1642 // side effect of not being able to do as many runs is
1643 // automatically handled by the statistical analysis below
1644 // (i.e., larger error bars).
1647 let mut total_run = Duration::new(0, 0);
1648 let samples: &mut [f64] = &mut [0.0_f64; 50];
1650 let loop_start = Instant::now();
1652 for p in &mut *samples {
1653 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1656 stats::winsorize(samples, 5.0);
1657 let summ = stats::Summary::new(samples);
1659 for p in &mut *samples {
1660 let ns = ns_iter_inner(inner, 5 * n);
1661 *p = ns as f64 / (5 * n) as f64;
1664 stats::winsorize(samples, 5.0);
1665 let summ5 = stats::Summary::new(samples);
1667 let loop_run = loop_start.elapsed();
1669 // If we've run for 100ms and seem to have converged to a
1671 if loop_run > Duration::from_millis(100)
1672 && summ.median_abs_dev_pct < 1.0
1673 && summ.median - summ5.median < summ5.median_abs_dev
1678 total_run = total_run + loop_run;
1679 // Longest we ever run for is 3s.
1680 if total_run > Duration::from_secs(3) {
1684 // If we overflow here just return the results so far. We check a
1685 // multiplier of 10 because we're about to multiply by 2 and the
1686 // next iteration of the loop will also multiply by 5 (to calculate
1687 // the summ5 result)
1688 n = match n.checked_mul(10) {
1698 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1702 use std::panic::{catch_unwind, AssertUnwindSafe};
1703 use std::sync::{Arc, Mutex};
1705 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1707 F: FnMut(&mut Bencher),
1709 let mut bs = Bencher {
1710 mode: BenchMode::Auto,
1715 let data = Arc::new(Mutex::new(Vec::new()));
1716 let data2 = data.clone();
1718 let oldio = if !nocapture {
1720 io::set_print(Some(Box::new(Sink(data2.clone())))),
1721 io::set_panic(Some(Box::new(Sink(data2)))),
1727 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1729 if let Some((printio, panicio)) = oldio {
1730 io::set_print(printio);
1731 io::set_panic(panicio);
1734 let test_result = match result {
1736 Ok(Some(ns_iter_summ)) => {
1737 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1738 let mb_s = bs.bytes * 1000 / ns_iter;
1740 let bs = BenchSamples {
1742 mb_s: mb_s as usize,
1744 TestResult::TrBench(bs)
1747 // iter not called, so no data.
1748 // FIXME: error in this case?
1749 let samples: &mut [f64] = &mut [0.0_f64; 1];
1750 let bs = BenchSamples {
1751 ns_iter_summ: stats::Summary::new(samples),
1754 TestResult::TrBench(bs)
1756 Err(_) => TestResult::TrFailed,
1759 let stdout = data.lock().unwrap().to_vec();
1760 monitor_ch.send((desc, test_result, stdout)).unwrap();
1763 pub fn run_once<F>(f: F)
1765 F: FnMut(&mut Bencher),
1767 let mut bs = Bencher {
1768 mode: BenchMode::Single,
1780 filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
1781 ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1785 use crate::Concurrent;
1786 use std::sync::mpsc::channel;
1788 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
1792 name: StaticTestName("1"),
1794 should_panic: ShouldPanic::No,
1797 testfn: DynTestFn(Box::new(move || {})),
1801 name: StaticTestName("2"),
1803 should_panic: ShouldPanic::No,
1806 testfn: DynTestFn(Box::new(move || {})),
1812 pub fn do_not_run_ignored_tests() {
1816 let desc = TestDescAndFn {
1818 name: StaticTestName("whatever"),
1820 should_panic: ShouldPanic::No,
1823 testfn: DynTestFn(Box::new(f)),
1825 let (tx, rx) = channel();
1826 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1827 let (_, res, _) = rx.recv().unwrap();
1828 assert!(res != TrOk);
1832 pub fn ignored_tests_result_in_ignored() {
1834 let desc = TestDescAndFn {
1836 name: StaticTestName("whatever"),
1838 should_panic: ShouldPanic::No,
1841 testfn: DynTestFn(Box::new(f)),
1843 let (tx, rx) = channel();
1844 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1845 let (_, res, _) = rx.recv().unwrap();
1846 assert!(res == TrIgnored);
1850 fn test_should_panic() {
1854 let desc = TestDescAndFn {
1856 name: StaticTestName("whatever"),
1858 should_panic: ShouldPanic::Yes,
1861 testfn: DynTestFn(Box::new(f)),
1863 let (tx, rx) = channel();
1864 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1865 let (_, res, _) = rx.recv().unwrap();
1866 assert!(res == TrOk);
1870 fn test_should_panic_good_message() {
1872 panic!("an error message");
1874 let desc = TestDescAndFn {
1876 name: StaticTestName("whatever"),
1878 should_panic: ShouldPanic::YesWithMessage("error message"),
1881 testfn: DynTestFn(Box::new(f)),
1883 let (tx, rx) = channel();
1884 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1885 let (_, res, _) = rx.recv().unwrap();
1886 assert!(res == TrOk);
1890 fn test_should_panic_bad_message() {
1892 panic!("an error message");
1894 let expected = "foobar";
1895 let failed_msg = "Panic did not include expected string";
1896 let desc = TestDescAndFn {
1898 name: StaticTestName("whatever"),
1900 should_panic: ShouldPanic::YesWithMessage(expected),
1903 testfn: DynTestFn(Box::new(f)),
1905 let (tx, rx) = channel();
1906 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1907 let (_, res, _) = rx.recv().unwrap();
1908 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1912 fn test_should_panic_but_succeeds() {
1914 let desc = TestDescAndFn {
1916 name: StaticTestName("whatever"),
1918 should_panic: ShouldPanic::Yes,
1921 testfn: DynTestFn(Box::new(f)),
1923 let (tx, rx) = channel();
1924 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1925 let (_, res, _) = rx.recv().unwrap();
1926 assert!(res == TrFailed);
1930 fn parse_ignored_flag() {
1932 "progname".to_string(),
1933 "filter".to_string(),
1934 "--ignored".to_string(),
1936 let opts = parse_opts(&args).unwrap().unwrap();
1937 assert_eq!(opts.run_ignored, RunIgnored::Only);
1941 fn parse_include_ignored_flag() {
1943 "progname".to_string(),
1944 "filter".to_string(),
1945 "-Zunstable-options".to_string(),
1946 "--include-ignored".to_string(),
1948 let opts = parse_opts(&args).unwrap().unwrap();
1949 assert_eq!(opts.run_ignored, RunIgnored::Yes);
1953 pub fn filter_for_ignored_option() {
1954 // When we run ignored tests the test filter should filter out all the
1955 // unignored tests and flip the ignore flag on the rest to false
1957 let mut opts = TestOpts::new();
1958 opts.run_tests = true;
1959 opts.run_ignored = RunIgnored::Only;
1961 let tests = one_ignored_one_unignored_test();
1962 let filtered = filter_tests(&opts, tests);
1964 assert_eq!(filtered.len(), 1);
1965 assert_eq!(filtered[0].desc.name.to_string(), "1");
1966 assert!(!filtered[0].desc.ignore);
1970 pub fn run_include_ignored_option() {
1971 // When we "--include-ignored" tests, the ignore flag should be set to false on
1972 // all tests and no test filtered out
1974 let mut opts = TestOpts::new();
1975 opts.run_tests = true;
1976 opts.run_ignored = RunIgnored::Yes;
1978 let tests = one_ignored_one_unignored_test();
1979 let filtered = filter_tests(&opts, tests);
1981 assert_eq!(filtered.len(), 2);
1982 assert!(!filtered[0].desc.ignore);
1983 assert!(!filtered[1].desc.ignore);
1987 pub fn exact_filter_match() {
1988 fn tests() -> Vec<TestDescAndFn> {
1989 vec!["base", "base::test", "base::test1", "base::test2"]
1991 .map(|name| TestDescAndFn {
1993 name: StaticTestName(name),
1995 should_panic: ShouldPanic::No,
1998 testfn: DynTestFn(Box::new(move || {})),
2003 let substr = filter_tests(
2005 filter: Some("base".into()),
2010 assert_eq!(substr.len(), 4);
2012 let substr = filter_tests(
2014 filter: Some("bas".into()),
2019 assert_eq!(substr.len(), 4);
2021 let substr = filter_tests(
2023 filter: Some("::test".into()),
2028 assert_eq!(substr.len(), 3);
2030 let substr = filter_tests(
2032 filter: Some("base::test".into()),
2037 assert_eq!(substr.len(), 3);
2039 let exact = filter_tests(
2041 filter: Some("base".into()),
2047 assert_eq!(exact.len(), 1);
2049 let exact = filter_tests(
2051 filter: Some("bas".into()),
2057 assert_eq!(exact.len(), 0);
2059 let exact = filter_tests(
2061 filter: Some("::test".into()),
2067 assert_eq!(exact.len(), 0);
2069 let exact = filter_tests(
2071 filter: Some("base::test".into()),
2077 assert_eq!(exact.len(), 1);
2081 pub fn sort_tests() {
2082 let mut opts = TestOpts::new();
2083 opts.run_tests = true;
2086 "sha1::test".to_string(),
2087 "isize::test_to_str".to_string(),
2088 "isize::test_pow".to_string(),
2089 "test::do_not_run_ignored_tests".to_string(),
2090 "test::ignored_tests_result_in_ignored".to_string(),
2091 "test::first_free_arg_should_be_a_filter".to_string(),
2092 "test::parse_ignored_flag".to_string(),
2093 "test::parse_include_ignored_flag".to_string(),
2094 "test::filter_for_ignored_option".to_string(),
2095 "test::run_include_ignored_option".to_string(),
2096 "test::sort_tests".to_string(),
2100 let mut tests = Vec::new();
2101 for name in &names {
2102 let test = TestDescAndFn {
2104 name: DynTestName((*name).clone()),
2106 should_panic: ShouldPanic::No,
2109 testfn: DynTestFn(Box::new(testfn)),
2115 let filtered = filter_tests(&opts, tests);
2117 let expected = vec![
2118 "isize::test_pow".to_string(),
2119 "isize::test_to_str".to_string(),
2120 "sha1::test".to_string(),
2121 "test::do_not_run_ignored_tests".to_string(),
2122 "test::filter_for_ignored_option".to_string(),
2123 "test::first_free_arg_should_be_a_filter".to_string(),
2124 "test::ignored_tests_result_in_ignored".to_string(),
2125 "test::parse_ignored_flag".to_string(),
2126 "test::parse_include_ignored_flag".to_string(),
2127 "test::run_include_ignored_option".to_string(),
2128 "test::sort_tests".to_string(),
2131 for (a, b) in expected.iter().zip(filtered) {
2132 assert!(*a == b.desc.name.to_string());
2137 pub fn test_metricmap_compare() {
2138 let mut m1 = MetricMap::new();
2139 let mut m2 = MetricMap::new();
2140 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2141 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2143 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2144 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2146 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2147 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2149 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2150 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2152 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2153 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2155 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2156 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2160 pub fn test_bench_once_no_iter() {
2161 fn f(_: &mut Bencher) {}
2166 pub fn test_bench_once_iter() {
2167 fn f(b: &mut Bencher) {
2174 pub fn test_bench_no_iter() {
2175 fn f(_: &mut Bencher) {}
2177 let (tx, rx) = channel();
2179 let desc = TestDesc {
2180 name: StaticTestName("f"),
2182 should_panic: ShouldPanic::No,
2186 crate::bench::benchmark(desc, tx, true, f);
2191 pub fn test_bench_iter() {
2192 fn f(b: &mut Bencher) {
2196 let (tx, rx) = channel();
2198 let desc = TestDesc {
2199 name: StaticTestName("f"),
2201 should_panic: ShouldPanic::No,
2205 crate::bench::benchmark(desc, tx, true, f);