1 //! Support code for rustc's built in unit-test and micro-benchmarking
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
9 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
20 #![crate_name = "test"]
21 #![unstable(feature = "test", issue = "27812")]
22 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
23 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
24 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
27 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc, rustc_private))]
29 #![feature(set_stdio)]
30 #![feature(panic_unwind)]
31 #![feature(staged_api)]
32 #![feature(termination_trait_lib)]
36 #[cfg(any(unix, target_os = "cloudabi"))]
40 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
41 // on aarch64-pc-windows-msvc, so we don't link libtest against
42 // libunwind (for the time being), even though it means that
43 // libtest won't be fully functional on this platform.
45 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
46 #[cfg(not(all(windows, target_arch = "aarch64")))]
47 extern crate panic_unwind;
49 pub use self::TestFn::*;
50 pub use self::ColorConfig::*;
51 pub use self::TestResult::*;
52 pub use self::TestName::*;
53 use self::TestEvent::*;
54 use self::NamePadding::*;
55 use self::OutputLocation::*;
57 use std::panic::{catch_unwind, AssertUnwindSafe};
59 use std::boxed::FnBox;
61 use std::collections::BTreeMap;
65 use std::io::prelude::*;
67 use std::path::PathBuf;
68 use std::process::Termination;
69 use std::sync::mpsc::{channel, Sender};
70 use std::sync::{Arc, Mutex};
72 use std::time::{Duration, Instant};
76 const TEST_WARN_TIMEOUT_S: u64 = 60;
77 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
79 // to be used by rustc to compile tests in libtest
81 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
82 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
83 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
84 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
90 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
92 /// Whether to execute tests concurrently or not
93 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
94 pub enum Concurrent { Yes, No }
96 // The name of a test. By convention this follows the rules for rust
97 // paths; i.e., it should be a series of identifiers separated by double
98 // colons. This way if some test runner wants to arrange the tests
99 // hierarchically it may.
101 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
103 StaticTestName(&'static str),
105 AlignedTestName(Cow<'static, str>, NamePadding),
108 fn as_slice(&self) -> &str {
110 StaticTestName(s) => s,
111 DynTestName(ref s) => s,
112 AlignedTestName(ref s, _) => &*s,
116 fn padding(&self) -> NamePadding {
118 &AlignedTestName(_, p) => p,
123 fn with_padding(&self, padding: NamePadding) -> TestName {
124 let name = match self {
125 &TestName::StaticTestName(name) => Cow::Borrowed(name),
126 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
127 &TestName::AlignedTestName(ref name, _) => name.clone(),
130 TestName::AlignedTestName(name, padding)
133 impl fmt::Display for TestName {
134 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
135 fmt::Display::fmt(self.as_slice(), f)
139 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
140 pub enum NamePadding {
146 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
147 let mut name = String::from(self.name.as_slice());
148 let fill = column_count.saturating_sub(name.len());
149 let pad = " ".repeat(fill);
160 /// Represents a benchmark function.
161 pub trait TDynBenchFn: Send {
162 fn run(&self, harness: &mut Bencher);
165 // A function that runs a test. If the function returns successfully,
166 // the test succeeds; if the function panics then the test fails. We
167 // may need to come up with a more clever definition of test in order
168 // to support isolation of tests into threads.
171 StaticBenchFn(fn(&mut Bencher)),
172 DynTestFn(Box<dyn FnBox() + Send>),
173 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
177 fn padding(&self) -> NamePadding {
179 StaticTestFn(..) => PadNone,
180 StaticBenchFn(..) => PadOnRight,
181 DynTestFn(..) => PadNone,
182 DynBenchFn(..) => PadOnRight,
187 impl fmt::Debug for TestFn {
188 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
189 f.write_str(match *self {
190 StaticTestFn(..) => "StaticTestFn(..)",
191 StaticBenchFn(..) => "StaticBenchFn(..)",
192 DynTestFn(..) => "DynTestFn(..)",
193 DynBenchFn(..) => "DynBenchFn(..)",
198 /// Manager of the benchmarking runs.
200 /// This is fed into functions marked with `#[bench]` to allow for
201 /// set-up & tear-down before running a piece of code repeatedly via a
206 summary: Option<stats::Summary>,
210 #[derive(Clone, PartialEq, Eq)]
216 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
217 pub enum ShouldPanic {
220 YesWithMessage(&'static str),
223 // The definition of a single test. A test runner will run a list of
225 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
226 pub struct TestDesc {
229 pub should_panic: ShouldPanic,
230 pub allow_fail: bool,
234 pub struct TestDescAndFn {
239 #[derive(Clone, PartialEq, Debug, Copy)]
246 pub fn new(value: f64, noise: f64) -> Metric {
247 Metric { value, noise }
251 /// In case we want to add other options as well, just add them in this struct.
252 #[derive(Copy, Clone, Debug)]
254 display_output: bool,
258 pub fn new() -> Options {
260 display_output: false,
264 pub fn display_output(mut self, display_output: bool) -> Options {
265 self.display_output = display_output;
270 // The default console test runner. It accepts the command line
271 // arguments and a vector of test_descs.
272 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
273 let mut opts = match parse_opts(args) {
276 eprintln!("error: {}", msg);
282 opts.options = options;
284 if let Err(e) = list_tests_console(&opts, tests) {
285 eprintln!("error: io error when listing tests: {:?}", e);
289 match run_tests_console(&opts, tests) {
291 Ok(false) => process::exit(101),
293 eprintln!("error: io error when listing tests: {:?}", e);
300 // A variant optimized for invocation with a static test vector.
301 // This will panic (intentionally) when fed any dynamic tests, because
302 // it is copying the static values out into a dynamic vector and cannot
303 // copy dynamic values. It is doing this because from this point on
304 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
305 // semantics into parallel test runners, which in turn requires a Vec<>
306 // rather than a &[].
307 pub fn test_main_static(tests: &[&TestDescAndFn]) {
308 let args = env::args().collect::<Vec<_>>();
309 let owned_tests = tests
311 .map(|t| match t.testfn {
312 StaticTestFn(f) => TestDescAndFn {
313 testfn: StaticTestFn(f),
314 desc: t.desc.clone(),
316 StaticBenchFn(f) => TestDescAndFn {
317 testfn: StaticBenchFn(f),
318 desc: t.desc.clone(),
320 _ => panic!("non-static tests passed to test::test_main_static"),
323 test_main(&args, owned_tests, Options::new())
326 /// Invoked when unit tests terminate. Should panic if the unit
327 /// test is considered a failure. By default, invokes `report()`
328 /// and checks for a `0` result.
329 pub fn assert_test_result<T: Termination>(result: T) {
330 let code = result.report();
334 "the test returned a termination value with a non-zero status code ({}) \
335 which indicates a failure",
340 #[derive(Copy, Clone, Debug)]
341 pub enum ColorConfig {
347 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
348 pub enum OutputFormat {
354 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
355 pub enum RunIgnored {
362 pub struct TestOpts {
364 pub filter: Option<String>,
365 pub filter_exact: bool,
366 pub run_ignored: RunIgnored,
368 pub bench_benchmarks: bool,
369 pub logfile: Option<PathBuf>,
371 pub color: ColorConfig,
372 pub format: OutputFormat,
373 pub test_threads: Option<usize>,
374 pub skip: Vec<String>,
375 pub options: Options,
380 fn new() -> TestOpts {
385 run_ignored: RunIgnored::No,
387 bench_benchmarks: false,
391 format: OutputFormat::Pretty,
394 options: Options::new(),
399 /// Result of parsing the options.
400 pub type OptRes = Result<TestOpts, String>;
402 fn optgroups() -> getopts::Options {
403 let mut opts = getopts::Options::new();
404 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
405 .optflag("", "ignored", "Run only ignored tests")
406 .optflag("", "test", "Run tests and not benchmarks")
407 .optflag("", "bench", "Run benchmarks instead of tests")
408 .optflag("", "list", "List all tests and benchmarks")
409 .optflag("h", "help", "Display this message (longer with --help)")
413 "Write logs to the specified file instead \
420 "don't capture stdout/stderr of each \
421 task, allow printing directly",
426 "Number of threads used for running tests \
433 "Skip tests whose names contain FILTER (this flag can \
434 be used multiple times)",
440 "Display one character per test instead of one line. \
441 Alias to --format=terse",
446 "Exactly match filters rather than by substring",
451 "Configure coloring of output:
452 auto = colorize if stdout is a tty and tests are run on serially (default);
453 always = always colorize output;
454 never = never colorize output;",
460 "Configure formatting of output:
461 pretty = Print verbose output;
462 terse = Display one character per test;
463 json = Output a json document",
469 "Enable nightly-only flags:
470 unstable-options = Allow use of experimental features",
476 fn usage(binary: &str, options: &getopts::Options) {
477 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
481 The FILTER string is tested against the name of all tests, and only those
482 tests whose names contain the filter are run.
484 By default, all tests are run in parallel. This can be altered with the
485 --test-threads flag or the RUST_TEST_THREADS environment variable when running
488 All tests have their standard output and standard error captured by default.
489 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
490 environment variable to a value other than "0". Logging is not captured by default.
494 #[test] - Indicates a function is a test to be run. This function
496 #[bench] - Indicates a function is a benchmark to be run. This
497 function takes one argument (test::Bencher).
498 #[should_panic] - This function (also labeled with #[test]) will only pass if
499 the code causes a panic (an assertion failure or panic!)
500 A message may be provided, which the failure string must
501 contain: #[should_panic(expected = "foo")].
502 #[ignore] - When applied to a function which is already attributed as a
503 test, then the test runner will ignore these tests during
504 normal test runs. Running with --ignored or --include-ignored will run
506 usage = options.usage(&message)
510 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
511 fn is_nightly() -> bool {
512 // Whether this is a feature-staged build, i.e., on the beta or stable channel
513 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
514 // Whether we should enable unstable features for bootstrapping
515 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
517 bootstrap || !disable_unstable_features
520 // Parses command line arguments into test options
521 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
522 let mut allow_unstable = false;
523 let opts = optgroups();
524 let args = args.get(1..).unwrap_or(args);
525 let matches = match opts.parse(args) {
527 Err(f) => return Some(Err(f.to_string())),
530 if let Some(opt) = matches.opt_str("Z") {
533 "the option `Z` is only accepted on the nightly compiler".into(),
538 "unstable-options" => {
539 allow_unstable = true;
542 return Some(Err("Unrecognized option to `Z`".into()));
547 if matches.opt_present("h") {
548 usage(&args[0], &opts);
552 let filter = if !matches.free.is_empty() {
553 Some(matches.free[0].clone())
558 let include_ignored = matches.opt_present("include-ignored");
559 if !allow_unstable && include_ignored {
561 "The \"include-ignored\" flag is only accepted on the nightly compiler".into()
565 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
566 (true, true) => return Some(Err(
567 "the options --include-ignored and --ignored are mutually exclusive".into()
569 (true, false) => RunIgnored::Yes,
570 (false, true) => RunIgnored::Only,
571 (false, false) => RunIgnored::No,
573 let quiet = matches.opt_present("quiet");
574 let exact = matches.opt_present("exact");
575 let list = matches.opt_present("list");
577 let logfile = matches.opt_str("logfile");
578 let logfile = logfile.map(|s| PathBuf::from(&s));
580 let bench_benchmarks = matches.opt_present("bench");
581 let run_tests = !bench_benchmarks || matches.opt_present("test");
583 let mut nocapture = matches.opt_present("nocapture");
585 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
586 Ok(val) => &val != "0",
591 let test_threads = match matches.opt_str("test-threads") {
592 Some(n_str) => match n_str.parse::<usize>() {
593 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
596 return Some(Err(format!(
597 "argument for --test-threads must be a number > 0 \
606 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
607 Some("auto") | None => AutoColor,
608 Some("always") => AlwaysColor,
609 Some("never") => NeverColor,
612 return Some(Err(format!(
613 "argument for --color must be auto, always, or never (was \
620 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
621 None if quiet => OutputFormat::Terse,
622 Some("pretty") | None => OutputFormat::Pretty,
623 Some("terse") => OutputFormat::Terse,
627 "The \"json\" format is only accepted on the nightly compiler".into(),
634 return Some(Err(format!(
635 "argument for --format must be pretty, terse, or json (was \
642 let test_opts = TestOpts {
654 skip: matches.opt_strs("skip"),
655 options: Options::new(),
661 #[derive(Clone, PartialEq)]
662 pub struct BenchSamples {
663 ns_iter_summ: stats::Summary,
667 #[derive(Clone, PartialEq)]
668 pub enum TestResult {
674 TrBench(BenchSamples),
677 unsafe impl Send for TestResult {}
679 enum OutputLocation<T> {
680 Pretty(Box<term::StdoutTerminal>),
684 impl<T: Write> Write for OutputLocation<T> {
685 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
687 Pretty(ref mut term) => term.write(buf),
688 Raw(ref mut stdout) => stdout.write(buf),
692 fn flush(&mut self) -> io::Result<()> {
694 Pretty(ref mut term) => term.flush(),
695 Raw(ref mut stdout) => stdout.flush(),
700 struct ConsoleTestState {
701 log_out: Option<File>,
710 failures: Vec<(TestDesc, Vec<u8>)>,
711 not_failures: Vec<(TestDesc, Vec<u8>)>,
715 impl ConsoleTestState {
716 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
717 let log_out = match opts.logfile {
718 Some(ref path) => Some(File::create(path)?),
722 Ok(ConsoleTestState {
731 metrics: MetricMap::new(),
732 failures: Vec::new(),
733 not_failures: Vec::new(),
734 options: opts.options,
738 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
739 let msg = msg.as_ref();
742 Some(ref mut o) => o.write_all(msg.as_bytes()),
746 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
747 self.write_log(format!(
750 TrOk => "ok".to_owned(),
751 TrFailed => "failed".to_owned(),
752 TrFailedMsg(ref msg) => format!("failed: {}", msg),
753 TrIgnored => "ignored".to_owned(),
754 TrAllowedFail => "failed (allowed)".to_owned(),
755 TrBench(ref bs) => fmt_bench_samples(bs),
761 fn current_test_count(&self) -> usize {
762 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
766 // Format a number with thousands separators
767 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
769 let mut output = String::new();
770 let mut trailing = false;
771 for &pow in &[9, 6, 3, 0] {
772 let base = 10_usize.pow(pow);
773 if pow == 0 || trailing || n / base != 0 {
775 output.write_fmt(format_args!("{}", n / base)).unwrap();
777 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
790 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
792 let mut output = String::new();
794 let median = bs.ns_iter_summ.median as usize;
795 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
798 .write_fmt(format_args!(
799 "{:>11} ns/iter (+/- {})",
800 fmt_thousands_sep(median, ','),
801 fmt_thousands_sep(deviation, ',')
806 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
812 // List the tests to console, and optionally to logfile. Filters are honored.
813 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
814 let mut output = match term::stdout() {
815 None => Raw(io::stdout()),
816 Some(t) => Pretty(t),
819 let quiet = opts.format == OutputFormat::Terse;
820 let mut st = ConsoleTestState::new(opts)?;
825 for test in filter_tests(&opts, tests) {
829 desc: TestDesc { name, .. },
833 let fntype = match testfn {
834 StaticTestFn(..) | DynTestFn(..) => {
838 StaticBenchFn(..) | DynBenchFn(..) => {
844 writeln!(output, "{}: {}", name, fntype)?;
845 st.write_log(format!("{} {}\n", fntype, name))?;
848 fn plural(count: u32, s: &str) -> String {
850 1 => format!("{} {}", 1, s),
851 n => format!("{} {}s", n, s),
856 if ntest != 0 || nbench != 0 {
857 writeln!(output, "")?;
863 plural(ntest, "test"),
864 plural(nbench, "benchmark")
871 // A simple console test runner
872 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
875 st: &mut ConsoleTestState,
876 out: &mut dyn OutputFormatter,
877 ) -> io::Result<()> {
878 match (*event).clone() {
879 TeFiltered(ref filtered_tests) => {
880 st.total = filtered_tests.len();
881 out.write_run_start(filtered_tests.len())
883 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
884 TeWait(ref test) => out.write_test_start(test),
885 TeTimeout(ref test) => out.write_timeout(test),
886 TeResult(test, result, stdout) => {
887 st.write_log_result(&test, &result)?;
888 out.write_result(&test, &result, &*stdout)?;
892 st.not_failures.push((test, stdout));
894 TrIgnored => st.ignored += 1,
895 TrAllowedFail => st.allowed_fail += 1,
897 st.metrics.insert_metric(
898 test.name.as_slice(),
899 bs.ns_iter_summ.median,
900 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
906 st.failures.push((test, stdout));
908 TrFailedMsg(msg) => {
910 let mut stdout = stdout;
911 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
912 st.failures.push((test, stdout));
920 let output = match term::stdout() {
921 None => Raw(io::stdout()),
922 Some(t) => Pretty(t),
925 let max_name_len = tests
927 .max_by_key(|t| len_if_padded(*t))
928 .map(|t| t.desc.name.as_slice().len())
931 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
933 let mut out: Box<dyn OutputFormatter> = match opts.format {
934 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
940 OutputFormat::Terse => Box::new(TerseFormatter::new(
946 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
948 let mut st = ConsoleTestState::new(opts)?;
949 fn len_if_padded(t: &TestDescAndFn) -> usize {
950 match t.testfn.padding() {
952 PadOnRight => t.desc.name.as_slice().len(),
956 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
958 assert!(st.current_test_count() == st.total);
960 return out.write_run_finish(&st);
964 fn should_sort_failures_before_printing_them() {
965 let test_a = TestDesc {
966 name: StaticTestName("a"),
968 should_panic: ShouldPanic::No,
972 let test_b = TestDesc {
973 name: StaticTestName("b"),
975 should_panic: ShouldPanic::No,
979 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
981 let st = ConsoleTestState {
990 metrics: MetricMap::new(),
991 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
992 options: Options::new(),
993 not_failures: Vec::new(),
996 out.write_failures(&st).unwrap();
997 let s = match out.output_location() {
998 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
999 &Pretty(_) => unreachable!(),
1002 let apos = s.find("a").unwrap();
1003 let bpos = s.find("b").unwrap();
1004 assert!(apos < bpos);
1007 fn use_color(opts: &TestOpts) -> bool {
1009 AutoColor => !opts.nocapture && stdout_isatty(),
1010 AlwaysColor => true,
1011 NeverColor => false,
1015 #[cfg(any(target_os = "cloudabi",
1016 target_os = "redox",
1017 all(target_arch = "wasm32", not(target_os = "emscripten")),
1018 all(target_vendor = "fortanix", target_env = "sgx")))]
1019 fn stdout_isatty() -> bool {
1020 // FIXME: Implement isatty on Redox and SGX
1024 fn stdout_isatty() -> bool {
1025 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1028 fn stdout_isatty() -> bool {
1031 type HANDLE = *mut u8;
1032 type LPDWORD = *mut u32;
1033 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1035 fn GetStdHandle(which: DWORD) -> HANDLE;
1036 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1039 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1041 GetConsoleMode(handle, &mut out) != 0
1046 pub enum TestEvent {
1047 TeFiltered(Vec<TestDesc>),
1049 TeResult(TestDesc, TestResult, Vec<u8>),
1050 TeTimeout(TestDesc),
1051 TeFilteredOut(usize),
1054 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1056 struct Sink(Arc<Mutex<Vec<u8>>>);
1057 impl Write for Sink {
1058 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1059 Write::write(&mut *self.0.lock().unwrap(), data)
1061 fn flush(&mut self) -> io::Result<()> {
1066 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1068 F: FnMut(TestEvent) -> io::Result<()>,
1070 use std::collections::{self, HashMap};
1071 use std::hash::BuildHasherDefault;
1072 use std::sync::mpsc::RecvTimeoutError;
1073 // Use a deterministic hasher
1075 HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
1077 let tests_len = tests.len();
1079 let mut filtered_tests = filter_tests(opts, tests);
1080 if !opts.bench_benchmarks {
1081 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1084 let filtered_tests = {
1085 let mut filtered_tests = filtered_tests;
1086 for test in filtered_tests.iter_mut() {
1087 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1093 let filtered_out = tests_len - filtered_tests.len();
1094 callback(TeFilteredOut(filtered_out))?;
1096 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1098 callback(TeFiltered(filtered_descs))?;
1100 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1101 filtered_tests.into_iter().partition(|e| match e.testfn {
1102 StaticTestFn(_) | DynTestFn(_) => true,
1106 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1108 let mut remaining = filtered_tests;
1109 remaining.reverse();
1110 let mut pending = 0;
1112 let (tx, rx) = channel::<MonitorMsg>();
1114 let mut running_tests: TestMap = HashMap::default();
1116 fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
1117 let now = Instant::now();
1118 let timed_out = running_tests
1120 .filter_map(|(desc, timeout)| {
1121 if &now >= timeout {
1128 for test in &timed_out {
1129 running_tests.remove(test);
1134 fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
1135 running_tests.values().min().map(|next_timeout| {
1136 let now = Instant::now();
1137 if *next_timeout >= now {
1145 if concurrency == 1 {
1146 while !remaining.is_empty() {
1147 let test = remaining.pop().unwrap();
1148 callback(TeWait(test.desc.clone()))?;
1149 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::No);
1150 let (test, result, stdout) = rx.recv().unwrap();
1151 callback(TeResult(test, result, stdout))?;
1154 while pending > 0 || !remaining.is_empty() {
1155 while pending < concurrency && !remaining.is_empty() {
1156 let test = remaining.pop().unwrap();
1157 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1158 running_tests.insert(test.desc.clone(), timeout);
1159 callback(TeWait(test.desc.clone()))?; //here no pad
1160 run_test(opts, !opts.run_tests, test, tx.clone(), Concurrent::Yes);
1166 if let Some(timeout) = calc_timeout(&running_tests) {
1167 res = rx.recv_timeout(timeout);
1168 for test in get_timed_out_tests(&mut running_tests) {
1169 callback(TeTimeout(test))?;
1171 if res != Err(RecvTimeoutError::Timeout) {
1175 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1180 let (desc, result, stdout) = res.unwrap();
1181 running_tests.remove(&desc);
1183 callback(TeResult(desc, result, stdout))?;
1188 if opts.bench_benchmarks {
1189 // All benchmarks run at the end, in serial.
1190 for b in filtered_benchs {
1191 callback(TeWait(b.desc.clone()))?;
1192 run_test(opts, false, b, tx.clone(), Concurrent::No);
1193 let (test, result, stdout) = rx.recv().unwrap();
1194 callback(TeResult(test, result, stdout))?;
1200 #[allow(deprecated)]
1201 fn get_concurrency() -> usize {
1202 return match env::var("RUST_TEST_THREADS") {
1204 let opt_n: Option<usize> = s.parse().ok();
1206 Some(n) if n > 0 => n,
1208 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1213 Err(..) => num_cpus(),
1217 #[allow(nonstandard_style)]
1218 fn num_cpus() -> usize {
1220 struct SYSTEM_INFO {
1221 wProcessorArchitecture: u16,
1224 lpMinimumApplicationAddress: *mut u8,
1225 lpMaximumApplicationAddress: *mut u8,
1226 dwActiveProcessorMask: *mut u8,
1227 dwNumberOfProcessors: u32,
1228 dwProcessorType: u32,
1229 dwAllocationGranularity: u32,
1230 wProcessorLevel: u16,
1231 wProcessorRevision: u16,
1234 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1237 let mut sysinfo = std::mem::zeroed();
1238 GetSystemInfo(&mut sysinfo);
1239 sysinfo.dwNumberOfProcessors as usize
1243 #[cfg(target_os = "redox")]
1244 fn num_cpus() -> usize {
1245 // FIXME: Implement num_cpus on Redox
1249 #[cfg(any(all(target_arch = "wasm32", not(target_os = "emscripten")),
1250 all(target_vendor = "fortanix", target_env = "sgx")))]
1251 fn num_cpus() -> usize {
1255 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1256 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1257 target_os = "macos", target_os = "solaris"))]
1258 fn num_cpus() -> usize {
1259 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1262 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1263 target_os = "netbsd"))]
1264 fn num_cpus() -> usize {
1267 let mut cpus: libc::c_uint = 0;
1268 let mut cpus_size = std::mem::size_of_val(&cpus);
1271 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1274 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1279 &mut cpus as *mut _ as *mut _,
1280 &mut cpus_size as *mut _ as *mut _,
1292 #[cfg(target_os = "openbsd")]
1293 fn num_cpus() -> usize {
1296 let mut cpus: libc::c_uint = 0;
1297 let mut cpus_size = std::mem::size_of_val(&cpus);
1298 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1304 &mut cpus as *mut _ as *mut _,
1305 &mut cpus_size as *mut _ as *mut _,
1316 #[cfg(target_os = "haiku")]
1317 fn num_cpus() -> usize {
1322 #[cfg(target_os = "l4re")]
1323 fn num_cpus() -> usize {
1329 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1330 let mut filtered = tests;
1331 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1332 let test_name = test.desc.name.as_slice();
1334 match opts.filter_exact {
1335 true => test_name == filter,
1336 false => test_name.contains(filter),
1340 // Remove tests that don't match the test filter
1341 if let Some(ref filter) = opts.filter {
1342 filtered.retain(|test| matches_filter(test, filter));
1345 // Skip tests that match any of the skip filters
1346 filtered.retain(|test| {
1347 !opts.skip.iter().any(|sf| matches_filter(test, sf))
1350 // maybe unignore tests
1351 match opts.run_ignored {
1352 RunIgnored::Yes => {
1353 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
1355 RunIgnored::Only => {
1356 filtered.retain(|test| test.desc.ignore);
1357 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
1359 RunIgnored::No => {}
1362 // Sort the tests alphabetically
1363 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1368 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1369 // convert benchmarks to tests, if we're not benchmarking them
1373 let testfn = match x.testfn {
1374 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1375 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1377 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1378 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1393 test: TestDescAndFn,
1394 monitor_ch: Sender<MonitorMsg>,
1395 concurrency: Concurrent,
1397 let TestDescAndFn { desc, testfn } = test;
1399 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1400 && desc.should_panic != ShouldPanic::No;
1402 if force_ignore || desc.ignore || ignore_because_panic_abort {
1403 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1409 monitor_ch: Sender<MonitorMsg>,
1411 testfn: Box<dyn FnBox() + Send>,
1412 concurrency: Concurrent,
1414 // Buffer for capturing standard I/O
1415 let data = Arc::new(Mutex::new(Vec::new()));
1416 let data2 = data.clone();
1418 let name = desc.name.clone();
1419 let runtest = move || {
1420 let oldio = if !nocapture {
1422 io::set_print(Some(Box::new(Sink(data2.clone())))),
1423 io::set_panic(Some(Box::new(Sink(data2)))),
1429 let result = catch_unwind(AssertUnwindSafe(testfn));
1431 if let Some((printio, panicio)) = oldio {
1432 io::set_print(printio);
1433 io::set_panic(panicio);
1436 let test_result = calc_result(&desc, result);
1437 let stdout = data.lock().unwrap().to_vec();
1439 .send((desc.clone(), test_result, stdout))
1443 // If the platform is single-threaded we're just going to run
1444 // the test synchronously, regardless of the concurrency
1446 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1447 if concurrency == Concurrent::Yes && supports_threads {
1448 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1449 cfg.spawn(runtest).unwrap();
1456 DynBenchFn(bencher) => {
1457 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1458 bencher.run(harness)
1461 StaticBenchFn(benchfn) => {
1462 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1463 (benchfn.clone())(harness)
1467 let cb = move || __rust_begin_short_backtrace(f);
1468 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb), concurrency)
1470 StaticTestFn(f) => run_test_inner(
1474 Box::new(move || __rust_begin_short_backtrace(f)),
1480 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1482 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1486 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1487 match (&desc.should_panic, task_result) {
1488 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1489 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1490 if err.downcast_ref::<String>()
1492 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1493 .map(|e| e.contains(msg))
1498 if desc.allow_fail {
1501 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1505 _ if desc.allow_fail => TrAllowedFail,
1510 #[derive(Clone, PartialEq)]
1511 pub struct MetricMap(BTreeMap<String, Metric>);
1514 pub fn new() -> MetricMap {
1515 MetricMap(BTreeMap::new())
1518 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1519 /// must be non-negative. The `noise` indicates the uncertainty of the
1520 /// metric, which doubles as the "noise range" of acceptable
1521 /// pairwise-regressions on this named value, when comparing from one
1522 /// metric to the next using `compare_to_old`.
1524 /// If `noise` is positive, then it means this metric is of a value
1525 /// you want to see grow smaller, so a change larger than `noise` in the
1526 /// positive direction represents a regression.
1528 /// If `noise` is negative, then it means this metric is of a value
1529 /// you want to see grow larger, so a change larger than `noise` in the
1530 /// negative direction represents a regression.
1531 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1532 let m = Metric { value, noise };
1533 self.0.insert(name.to_owned(), m);
1536 pub fn fmt_metrics(&self) -> String {
1539 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1540 .collect::<Vec<_>>();
1547 /// A function that is opaque to the optimizer, to allow benchmarks to
1548 /// pretend to use outputs to assist in avoiding dead-code
1551 /// This function is a no-op, and does not even read from `dummy`.
1552 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1553 pub fn black_box<T>(dummy: T) -> T {
1554 // we need to "use" the argument in some way LLVM can't
1556 unsafe { asm!("" : : "r"(&dummy)) }
1559 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1561 pub fn black_box<T>(dummy: T) -> T {
1566 /// Callback for benchmark functions to run in their body.
1567 pub fn iter<T, F>(&mut self, mut inner: F)
1571 if self.mode == BenchMode::Single {
1572 ns_iter_inner(&mut inner, 1);
1576 self.summary = Some(iter(&mut inner));
1579 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1581 F: FnMut(&mut Bencher),
1584 return self.summary;
1588 fn ns_from_dur(dur: Duration) -> u64 {
1589 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1592 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1596 let start = Instant::now();
1600 return ns_from_dur(start.elapsed());
1603 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1607 // Initial bench run to get ballpark figure.
1608 let ns_single = ns_iter_inner(inner, 1);
1610 // Try to estimate iter count for 1ms falling back to 1m
1611 // iterations if first run took < 1ns.
1612 let ns_target_total = 1_000_000; // 1ms
1613 let mut n = ns_target_total / cmp::max(1, ns_single);
1615 // if the first run took more than 1ms we don't want to just
1616 // be left doing 0 iterations on every loop. The unfortunate
1617 // side effect of not being able to do as many runs is
1618 // automatically handled by the statistical analysis below
1619 // (i.e., larger error bars).
1622 let mut total_run = Duration::new(0, 0);
1623 let samples: &mut [f64] = &mut [0.0_f64; 50];
1625 let loop_start = Instant::now();
1627 for p in &mut *samples {
1628 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1631 stats::winsorize(samples, 5.0);
1632 let summ = stats::Summary::new(samples);
1634 for p in &mut *samples {
1635 let ns = ns_iter_inner(inner, 5 * n);
1636 *p = ns as f64 / (5 * n) as f64;
1639 stats::winsorize(samples, 5.0);
1640 let summ5 = stats::Summary::new(samples);
1642 let loop_run = loop_start.elapsed();
1644 // If we've run for 100ms and seem to have converged to a
1646 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1647 && summ.median - summ5.median < summ5.median_abs_dev
1652 total_run = total_run + loop_run;
1653 // Longest we ever run for is 3s.
1654 if total_run > Duration::from_secs(3) {
1658 // If we overflow here just return the results so far. We check a
1659 // multiplier of 10 because we're about to multiply by 2 and the
1660 // next iteration of the loop will also multiply by 5 (to calculate
1661 // the summ5 result)
1662 n = match n.checked_mul(10) {
1672 use std::panic::{catch_unwind, AssertUnwindSafe};
1675 use std::sync::{Arc, Mutex};
1677 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1679 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1681 F: FnMut(&mut Bencher),
1683 let mut bs = Bencher {
1684 mode: BenchMode::Auto,
1689 let data = Arc::new(Mutex::new(Vec::new()));
1690 let data2 = data.clone();
1692 let oldio = if !nocapture {
1694 io::set_print(Some(Box::new(Sink(data2.clone())))),
1695 io::set_panic(Some(Box::new(Sink(data2)))),
1701 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1703 if let Some((printio, panicio)) = oldio {
1704 io::set_print(printio);
1705 io::set_panic(panicio);
1708 let test_result = match result {
1710 Ok(Some(ns_iter_summ)) => {
1711 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1712 let mb_s = bs.bytes * 1000 / ns_iter;
1714 let bs = BenchSamples {
1716 mb_s: mb_s as usize,
1718 TestResult::TrBench(bs)
1721 // iter not called, so no data.
1722 // FIXME: error in this case?
1723 let samples: &mut [f64] = &mut [0.0_f64; 1];
1724 let bs = BenchSamples {
1725 ns_iter_summ: stats::Summary::new(samples),
1728 TestResult::TrBench(bs)
1730 Err(_) => TestResult::TrFailed,
1733 let stdout = data.lock().unwrap().to_vec();
1734 monitor_ch.send((desc, test_result, stdout)).unwrap();
1737 pub fn run_once<F>(f: F)
1739 F: FnMut(&mut Bencher),
1741 let mut bs = Bencher {
1742 mode: BenchMode::Single,
1752 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
1753 ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed,
1754 TrFailedMsg, TrIgnored, TrOk};
1755 use std::sync::mpsc::channel;
1761 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
1765 name: StaticTestName("1"),
1767 should_panic: ShouldPanic::No,
1770 testfn: DynTestFn(Box::new(move || {})),
1774 name: StaticTestName("2"),
1776 should_panic: ShouldPanic::No,
1779 testfn: DynTestFn(Box::new(move || {})),
1785 pub fn do_not_run_ignored_tests() {
1789 let desc = TestDescAndFn {
1791 name: StaticTestName("whatever"),
1793 should_panic: ShouldPanic::No,
1796 testfn: DynTestFn(Box::new(f)),
1798 let (tx, rx) = channel();
1799 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1800 let (_, res, _) = rx.recv().unwrap();
1801 assert!(res != TrOk);
1805 pub fn ignored_tests_result_in_ignored() {
1807 let desc = TestDescAndFn {
1809 name: StaticTestName("whatever"),
1811 should_panic: ShouldPanic::No,
1814 testfn: DynTestFn(Box::new(f)),
1816 let (tx, rx) = channel();
1817 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1818 let (_, res, _) = rx.recv().unwrap();
1819 assert!(res == TrIgnored);
1823 fn test_should_panic() {
1827 let desc = TestDescAndFn {
1829 name: StaticTestName("whatever"),
1831 should_panic: ShouldPanic::Yes,
1834 testfn: DynTestFn(Box::new(f)),
1836 let (tx, rx) = channel();
1837 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1838 let (_, res, _) = rx.recv().unwrap();
1839 assert!(res == TrOk);
1843 fn test_should_panic_good_message() {
1845 panic!("an error message");
1847 let desc = TestDescAndFn {
1849 name: StaticTestName("whatever"),
1851 should_panic: ShouldPanic::YesWithMessage("error message"),
1854 testfn: DynTestFn(Box::new(f)),
1856 let (tx, rx) = channel();
1857 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1858 let (_, res, _) = rx.recv().unwrap();
1859 assert!(res == TrOk);
1863 fn test_should_panic_bad_message() {
1865 panic!("an error message");
1867 let expected = "foobar";
1868 let failed_msg = "Panic did not include expected string";
1869 let desc = TestDescAndFn {
1871 name: StaticTestName("whatever"),
1873 should_panic: ShouldPanic::YesWithMessage(expected),
1876 testfn: DynTestFn(Box::new(f)),
1878 let (tx, rx) = channel();
1879 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1880 let (_, res, _) = rx.recv().unwrap();
1881 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1885 fn test_should_panic_but_succeeds() {
1887 let desc = TestDescAndFn {
1889 name: StaticTestName("whatever"),
1891 should_panic: ShouldPanic::Yes,
1894 testfn: DynTestFn(Box::new(f)),
1896 let (tx, rx) = channel();
1897 run_test(&TestOpts::new(), false, desc, tx, Concurrent::No);
1898 let (_, res, _) = rx.recv().unwrap();
1899 assert!(res == TrFailed);
1903 fn parse_ignored_flag() {
1905 "progname".to_string(),
1906 "filter".to_string(),
1907 "--ignored".to_string(),
1909 let opts = parse_opts(&args).unwrap().unwrap();
1910 assert_eq!(opts.run_ignored, RunIgnored::Only);
1914 fn parse_include_ignored_flag() {
1916 "progname".to_string(),
1917 "filter".to_string(),
1918 "-Zunstable-options".to_string(),
1919 "--include-ignored".to_string(),
1921 let opts = parse_opts(&args).unwrap().unwrap();
1922 assert_eq!(opts.run_ignored, RunIgnored::Yes);
1926 pub fn filter_for_ignored_option() {
1927 // When we run ignored tests the test filter should filter out all the
1928 // unignored tests and flip the ignore flag on the rest to false
1930 let mut opts = TestOpts::new();
1931 opts.run_tests = true;
1932 opts.run_ignored = RunIgnored::Only;
1934 let tests = one_ignored_one_unignored_test();
1935 let filtered = filter_tests(&opts, tests);
1937 assert_eq!(filtered.len(), 1);
1938 assert_eq!(filtered[0].desc.name.to_string(), "1");
1939 assert!(!filtered[0].desc.ignore);
1943 pub fn run_include_ignored_option() {
1944 // When we "--include-ignored" tests, the ignore flag should be set to false on
1945 // all tests and no test filtered out
1947 let mut opts = TestOpts::new();
1948 opts.run_tests = true;
1949 opts.run_ignored = RunIgnored::Yes;
1951 let tests = one_ignored_one_unignored_test();
1952 let filtered = filter_tests(&opts, tests);
1954 assert_eq!(filtered.len(), 2);
1955 assert!(!filtered[0].desc.ignore);
1956 assert!(!filtered[1].desc.ignore);
1960 pub fn exact_filter_match() {
1961 fn tests() -> Vec<TestDescAndFn> {
1962 vec!["base", "base::test", "base::test1", "base::test2"]
1964 .map(|name| TestDescAndFn {
1966 name: StaticTestName(name),
1968 should_panic: ShouldPanic::No,
1971 testfn: DynTestFn(Box::new(move || {})),
1976 let substr = filter_tests(
1978 filter: Some("base".into()),
1983 assert_eq!(substr.len(), 4);
1985 let substr = filter_tests(
1987 filter: Some("bas".into()),
1992 assert_eq!(substr.len(), 4);
1994 let substr = filter_tests(
1996 filter: Some("::test".into()),
2001 assert_eq!(substr.len(), 3);
2003 let substr = filter_tests(
2005 filter: Some("base::test".into()),
2010 assert_eq!(substr.len(), 3);
2012 let exact = filter_tests(
2014 filter: Some("base".into()),
2020 assert_eq!(exact.len(), 1);
2022 let exact = filter_tests(
2024 filter: Some("bas".into()),
2030 assert_eq!(exact.len(), 0);
2032 let exact = filter_tests(
2034 filter: Some("::test".into()),
2040 assert_eq!(exact.len(), 0);
2042 let exact = filter_tests(
2044 filter: Some("base::test".into()),
2050 assert_eq!(exact.len(), 1);
2054 pub fn sort_tests() {
2055 let mut opts = TestOpts::new();
2056 opts.run_tests = true;
2059 "sha1::test".to_string(),
2060 "isize::test_to_str".to_string(),
2061 "isize::test_pow".to_string(),
2062 "test::do_not_run_ignored_tests".to_string(),
2063 "test::ignored_tests_result_in_ignored".to_string(),
2064 "test::first_free_arg_should_be_a_filter".to_string(),
2065 "test::parse_ignored_flag".to_string(),
2066 "test::parse_include_ignored_flag".to_string(),
2067 "test::filter_for_ignored_option".to_string(),
2068 "test::run_include_ignored_option".to_string(),
2069 "test::sort_tests".to_string(),
2073 let mut tests = Vec::new();
2074 for name in &names {
2075 let test = TestDescAndFn {
2077 name: DynTestName((*name).clone()),
2079 should_panic: ShouldPanic::No,
2082 testfn: DynTestFn(Box::new(testfn)),
2088 let filtered = filter_tests(&opts, tests);
2090 let expected = vec![
2091 "isize::test_pow".to_string(),
2092 "isize::test_to_str".to_string(),
2093 "sha1::test".to_string(),
2094 "test::do_not_run_ignored_tests".to_string(),
2095 "test::filter_for_ignored_option".to_string(),
2096 "test::first_free_arg_should_be_a_filter".to_string(),
2097 "test::ignored_tests_result_in_ignored".to_string(),
2098 "test::parse_ignored_flag".to_string(),
2099 "test::parse_include_ignored_flag".to_string(),
2100 "test::run_include_ignored_option".to_string(),
2101 "test::sort_tests".to_string(),
2104 for (a, b) in expected.iter().zip(filtered) {
2105 assert!(*a == b.desc.name.to_string());
2110 pub fn test_metricmap_compare() {
2111 let mut m1 = MetricMap::new();
2112 let mut m2 = MetricMap::new();
2113 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2114 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2116 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2117 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2119 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2120 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2122 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2123 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2125 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2126 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2128 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2129 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2133 pub fn test_bench_once_no_iter() {
2134 fn f(_: &mut Bencher) {}
2139 pub fn test_bench_once_iter() {
2140 fn f(b: &mut Bencher) {
2147 pub fn test_bench_no_iter() {
2148 fn f(_: &mut Bencher) {}
2150 let (tx, rx) = channel();
2152 let desc = TestDesc {
2153 name: StaticTestName("f"),
2155 should_panic: ShouldPanic::No,
2159 ::bench::benchmark(desc, tx, true, f);
2164 pub fn test_bench_iter() {
2165 fn f(b: &mut Bencher) {
2169 let (tx, rx) = channel();
2171 let desc = TestDesc {
2172 name: StaticTestName("f"),
2174 should_panic: ShouldPanic::No,
2178 ::bench::benchmark(desc, tx, true, f);