1 // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // NB: this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
27 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
28 // cargo) to detect this crate.
30 #![crate_name = "test"]
31 #![unstable(feature = "test", issue = "27812")]
32 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
33 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
34 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
37 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
39 #![feature(set_stdio)]
40 #![feature(panic_unwind)]
41 #![feature(staged_api)]
42 #![feature(termination_trait_lib)]
46 #[cfg(any(unix, target_os = "cloudabi"))]
50 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
51 // on aarch64-pc-windows-msvc, so we don't link libtest against
52 // libunwind (for the time being), even though it means that
53 // libtest won't be fully functional on this platform.
55 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
56 #[cfg(not(all(windows, target_arch = "aarch64")))]
57 extern crate panic_unwind;
59 pub use self::TestFn::*;
60 pub use self::ColorConfig::*;
61 pub use self::TestResult::*;
62 pub use self::TestName::*;
63 use self::TestEvent::*;
64 use self::NamePadding::*;
65 use self::OutputLocation::*;
67 use std::panic::{catch_unwind, AssertUnwindSafe};
69 use std::boxed::FnBox;
71 use std::collections::BTreeMap;
75 use std::io::prelude::*;
77 use std::path::PathBuf;
78 use std::process::Termination;
79 use std::sync::mpsc::{channel, Sender};
80 use std::sync::{Arc, Mutex};
82 use std::time::{Duration, Instant};
86 const TEST_WARN_TIMEOUT_S: u64 = 60;
87 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
89 // to be used by rustc to compile tests in libtest
91 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
92 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
93 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
94 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
100 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
102 // The name of a test. By convention this follows the rules for rust
103 // paths; i.e. it should be a series of identifiers separated by double
104 // colons. This way if some test runner wants to arrange the tests
105 // hierarchically it may.
107 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
109 StaticTestName(&'static str),
111 AlignedTestName(Cow<'static, str>, NamePadding),
114 fn as_slice(&self) -> &str {
116 StaticTestName(s) => s,
117 DynTestName(ref s) => s,
118 AlignedTestName(ref s, _) => &*s,
122 fn padding(&self) -> NamePadding {
124 &AlignedTestName(_, p) => p,
129 fn with_padding(&self, padding: NamePadding) -> TestName {
130 let name = match self {
131 &TestName::StaticTestName(name) => Cow::Borrowed(name),
132 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
133 &TestName::AlignedTestName(ref name, _) => name.clone(),
136 TestName::AlignedTestName(name, padding)
139 impl fmt::Display for TestName {
140 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
141 fmt::Display::fmt(self.as_slice(), f)
145 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
146 pub enum NamePadding {
152 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
153 let mut name = String::from(self.name.as_slice());
154 let fill = column_count.saturating_sub(name.len());
155 let pad = " ".repeat(fill);
166 /// Represents a benchmark function.
167 pub trait TDynBenchFn: Send {
168 fn run(&self, harness: &mut Bencher);
171 // A function that runs a test. If the function returns successfully,
172 // the test succeeds; if the function panics then the test fails. We
173 // may need to come up with a more clever definition of test in order
174 // to support isolation of tests into threads.
177 StaticBenchFn(fn(&mut Bencher)),
178 DynTestFn(Box<dyn FnBox() + Send>),
179 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
183 fn padding(&self) -> NamePadding {
185 StaticTestFn(..) => PadNone,
186 StaticBenchFn(..) => PadOnRight,
187 DynTestFn(..) => PadNone,
188 DynBenchFn(..) => PadOnRight,
193 impl fmt::Debug for TestFn {
194 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
195 f.write_str(match *self {
196 StaticTestFn(..) => "StaticTestFn(..)",
197 StaticBenchFn(..) => "StaticBenchFn(..)",
198 DynTestFn(..) => "DynTestFn(..)",
199 DynBenchFn(..) => "DynBenchFn(..)",
204 /// Manager of the benchmarking runs.
206 /// This is fed into functions marked with `#[bench]` to allow for
207 /// set-up & tear-down before running a piece of code repeatedly via a
212 summary: Option<stats::Summary>,
216 #[derive(Clone, PartialEq, Eq)]
222 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
223 pub enum ShouldPanic {
226 YesWithMessage(&'static str),
229 // The definition of a single test. A test runner will run a list of
231 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
232 pub struct TestDesc {
235 pub should_panic: ShouldPanic,
236 pub allow_fail: bool,
240 pub struct TestDescAndFn {
245 #[derive(Clone, PartialEq, Debug, Copy)]
252 pub fn new(value: f64, noise: f64) -> Metric {
253 Metric { value, noise }
257 /// In case we want to add other options as well, just add them in this struct.
258 #[derive(Copy, Clone, Debug)]
260 display_output: bool,
264 pub fn new() -> Options {
266 display_output: false,
270 pub fn display_output(mut self, display_output: bool) -> Options {
271 self.display_output = display_output;
276 // The default console test runner. It accepts the command line
277 // arguments and a vector of test_descs.
278 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
279 let mut opts = match parse_opts(args) {
282 eprintln!("error: {}", msg);
288 opts.options = options;
290 if let Err(e) = list_tests_console(&opts, tests) {
291 eprintln!("error: io error when listing tests: {:?}", e);
295 match run_tests_console(&opts, tests) {
297 Ok(false) => process::exit(101),
299 eprintln!("error: io error when listing tests: {:?}", e);
306 // A variant optimized for invocation with a static test vector.
307 // This will panic (intentionally) when fed any dynamic tests, because
308 // it is copying the static values out into a dynamic vector and cannot
309 // copy dynamic values. It is doing this because from this point on
310 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
311 // semantics into parallel test runners, which in turn requires a Vec<>
312 // rather than a &[].
313 pub fn test_main_static(tests: &[&TestDescAndFn]) {
314 let args = env::args().collect::<Vec<_>>();
315 let owned_tests = tests
317 .map(|t| match t.testfn {
318 StaticTestFn(f) => TestDescAndFn {
319 testfn: StaticTestFn(f),
320 desc: t.desc.clone(),
322 StaticBenchFn(f) => TestDescAndFn {
323 testfn: StaticBenchFn(f),
324 desc: t.desc.clone(),
326 _ => panic!("non-static tests passed to test::test_main_static"),
329 test_main(&args, owned_tests, Options::new())
332 /// Invoked when unit tests terminate. Should panic if the unit
333 /// test is considered a failure. By default, invokes `report()`
334 /// and checks for a `0` result.
335 pub fn assert_test_result<T: Termination>(result: T) {
336 let code = result.report();
340 "the test returned a termination value with a non-zero status code ({}) \
341 which indicates a failure",
346 #[derive(Copy, Clone, Debug)]
347 pub enum ColorConfig {
353 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
354 pub enum OutputFormat {
360 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
361 pub enum RunIgnored {
368 pub struct TestOpts {
370 pub filter: Option<String>,
371 pub filter_exact: bool,
372 pub run_ignored: RunIgnored,
374 pub bench_benchmarks: bool,
375 pub logfile: Option<PathBuf>,
377 pub color: ColorConfig,
378 pub format: OutputFormat,
379 pub test_threads: Option<usize>,
380 pub skip: Vec<String>,
381 pub options: Options,
386 fn new() -> TestOpts {
391 run_ignored: RunIgnored::No,
393 bench_benchmarks: false,
397 format: OutputFormat::Pretty,
400 options: Options::new(),
405 /// Result of parsing the options.
406 pub type OptRes = Result<TestOpts, String>;
408 fn optgroups() -> getopts::Options {
409 let mut opts = getopts::Options::new();
410 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
411 .optflag("", "ignored", "Run only ignored tests")
412 .optflag("", "test", "Run tests and not benchmarks")
413 .optflag("", "bench", "Run benchmarks instead of tests")
414 .optflag("", "list", "List all tests and benchmarks")
415 .optflag("h", "help", "Display this message (longer with --help)")
419 "Write logs to the specified file instead \
426 "don't capture stdout/stderr of each \
427 task, allow printing directly",
432 "Number of threads used for running tests \
439 "Skip tests whose names contain FILTER (this flag can \
440 be used multiple times)",
446 "Display one character per test instead of one line. \
447 Alias to --format=terse",
452 "Exactly match filters rather than by substring",
457 "Configure coloring of output:
458 auto = colorize if stdout is a tty and tests are run on serially (default);
459 always = always colorize output;
460 never = never colorize output;",
466 "Configure formatting of output:
467 pretty = Print verbose output;
468 terse = Display one character per test;
469 json = Output a json document",
475 "Enable nightly-only flags:
476 unstable-options = Allow use of experimental features",
482 fn usage(binary: &str, options: &getopts::Options) {
483 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
487 The FILTER string is tested against the name of all tests, and only those
488 tests whose names contain the filter are run.
490 By default, all tests are run in parallel. This can be altered with the
491 --test-threads flag or the RUST_TEST_THREADS environment variable when running
494 All tests have their standard output and standard error captured by default.
495 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
496 environment variable to a value other than "0". Logging is not captured by default.
500 #[test] - Indicates a function is a test to be run. This function
502 #[bench] - Indicates a function is a benchmark to be run. This
503 function takes one argument (test::Bencher).
504 #[should_panic] - This function (also labeled with #[test]) will only pass if
505 the code causes a panic (an assertion failure or panic!)
506 A message may be provided, which the failure string must
507 contain: #[should_panic(expected = "foo")].
508 #[ignore] - When applied to a function which is already attributed as a
509 test, then the test runner will ignore these tests during
510 normal test runs. Running with --ignored or --include-ignored will run
512 usage = options.usage(&message)
516 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
517 fn is_nightly() -> bool {
518 // Whether this is a feature-staged build, i.e. on the beta or stable channel
519 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
520 // Whether we should enable unstable features for bootstrapping
521 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
523 bootstrap || !disable_unstable_features
526 // Parses command line arguments into test options
527 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
528 let mut allow_unstable = false;
529 let opts = optgroups();
530 let args = args.get(1..).unwrap_or(args);
531 let matches = match opts.parse(args) {
533 Err(f) => return Some(Err(f.to_string())),
536 if let Some(opt) = matches.opt_str("Z") {
539 "the option `Z` is only accepted on the nightly compiler".into(),
544 "unstable-options" => {
545 allow_unstable = true;
548 return Some(Err("Unrecognized option to `Z`".into()));
553 if matches.opt_present("h") {
554 usage(&args[0], &opts);
558 let filter = if !matches.free.is_empty() {
559 Some(matches.free[0].clone())
564 let include_ignored = matches.opt_present("include-ignored");
565 if !allow_unstable && include_ignored {
567 "The \"include-ignored\" flag is only accepted on the nightly compiler".into()
571 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
572 (true, true) => return Some(Err(
573 "the options --include-ignored and --ignored are mutually exclusive".into()
575 (true, false) => RunIgnored::Yes,
576 (false, true) => RunIgnored::Only,
577 (false, false) => RunIgnored::No,
579 let quiet = matches.opt_present("quiet");
580 let exact = matches.opt_present("exact");
581 let list = matches.opt_present("list");
583 let logfile = matches.opt_str("logfile");
584 let logfile = logfile.map(|s| PathBuf::from(&s));
586 let bench_benchmarks = matches.opt_present("bench");
587 let run_tests = !bench_benchmarks || matches.opt_present("test");
589 let mut nocapture = matches.opt_present("nocapture");
591 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
592 Ok(val) => &val != "0",
597 let test_threads = match matches.opt_str("test-threads") {
598 Some(n_str) => match n_str.parse::<usize>() {
599 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
602 return Some(Err(format!(
603 "argument for --test-threads must be a number > 0 \
612 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
613 Some("auto") | None => AutoColor,
614 Some("always") => AlwaysColor,
615 Some("never") => NeverColor,
618 return Some(Err(format!(
619 "argument for --color must be auto, always, or never (was \
626 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
627 None if quiet => OutputFormat::Terse,
628 Some("pretty") | None => OutputFormat::Pretty,
629 Some("terse") => OutputFormat::Terse,
633 "The \"json\" format is only accepted on the nightly compiler".into(),
640 return Some(Err(format!(
641 "argument for --format must be pretty, terse, or json (was \
648 let test_opts = TestOpts {
660 skip: matches.opt_strs("skip"),
661 options: Options::new(),
667 #[derive(Clone, PartialEq)]
668 pub struct BenchSamples {
669 ns_iter_summ: stats::Summary,
673 #[derive(Clone, PartialEq)]
674 pub enum TestResult {
680 TrBench(BenchSamples),
683 unsafe impl Send for TestResult {}
685 enum OutputLocation<T> {
686 Pretty(Box<term::StdoutTerminal>),
690 impl<T: Write> Write for OutputLocation<T> {
691 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
693 Pretty(ref mut term) => term.write(buf),
694 Raw(ref mut stdout) => stdout.write(buf),
698 fn flush(&mut self) -> io::Result<()> {
700 Pretty(ref mut term) => term.flush(),
701 Raw(ref mut stdout) => stdout.flush(),
706 struct ConsoleTestState {
707 log_out: Option<File>,
716 failures: Vec<(TestDesc, Vec<u8>)>,
717 not_failures: Vec<(TestDesc, Vec<u8>)>,
721 impl ConsoleTestState {
722 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
723 let log_out = match opts.logfile {
724 Some(ref path) => Some(File::create(path)?),
728 Ok(ConsoleTestState {
737 metrics: MetricMap::new(),
738 failures: Vec::new(),
739 not_failures: Vec::new(),
740 options: opts.options,
744 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
745 let msg = msg.as_ref();
748 Some(ref mut o) => o.write_all(msg.as_bytes()),
752 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
753 self.write_log(format!(
756 TrOk => "ok".to_owned(),
757 TrFailed => "failed".to_owned(),
758 TrFailedMsg(ref msg) => format!("failed: {}", msg),
759 TrIgnored => "ignored".to_owned(),
760 TrAllowedFail => "failed (allowed)".to_owned(),
761 TrBench(ref bs) => fmt_bench_samples(bs),
767 fn current_test_count(&self) -> usize {
768 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
772 // Format a number with thousands separators
773 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
775 let mut output = String::new();
776 let mut trailing = false;
777 for &pow in &[9, 6, 3, 0] {
778 let base = 10_usize.pow(pow);
779 if pow == 0 || trailing || n / base != 0 {
781 output.write_fmt(format_args!("{}", n / base)).unwrap();
783 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
796 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
798 let mut output = String::new();
800 let median = bs.ns_iter_summ.median as usize;
801 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
804 .write_fmt(format_args!(
805 "{:>11} ns/iter (+/- {})",
806 fmt_thousands_sep(median, ','),
807 fmt_thousands_sep(deviation, ',')
812 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
818 // List the tests to console, and optionally to logfile. Filters are honored.
819 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
820 let mut output = match term::stdout() {
821 None => Raw(io::stdout()),
822 Some(t) => Pretty(t),
825 let quiet = opts.format == OutputFormat::Terse;
826 let mut st = ConsoleTestState::new(opts)?;
831 for test in filter_tests(&opts, tests) {
835 desc: TestDesc { name, .. },
839 let fntype = match testfn {
840 StaticTestFn(..) | DynTestFn(..) => {
844 StaticBenchFn(..) | DynBenchFn(..) => {
850 writeln!(output, "{}: {}", name, fntype)?;
851 st.write_log(format!("{} {}\n", fntype, name))?;
854 fn plural(count: u32, s: &str) -> String {
856 1 => format!("{} {}", 1, s),
857 n => format!("{} {}s", n, s),
862 if ntest != 0 || nbench != 0 {
863 writeln!(output, "")?;
869 plural(ntest, "test"),
870 plural(nbench, "benchmark")
877 // A simple console test runner
878 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
881 st: &mut ConsoleTestState,
882 out: &mut dyn OutputFormatter,
883 ) -> io::Result<()> {
884 match (*event).clone() {
885 TeFiltered(ref filtered_tests) => {
886 st.total = filtered_tests.len();
887 out.write_run_start(filtered_tests.len())
889 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
890 TeWait(ref test) => out.write_test_start(test),
891 TeTimeout(ref test) => out.write_timeout(test),
892 TeResult(test, result, stdout) => {
893 st.write_log_result(&test, &result)?;
894 out.write_result(&test, &result, &*stdout)?;
898 st.not_failures.push((test, stdout));
900 TrIgnored => st.ignored += 1,
901 TrAllowedFail => st.allowed_fail += 1,
903 st.metrics.insert_metric(
904 test.name.as_slice(),
905 bs.ns_iter_summ.median,
906 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
912 st.failures.push((test, stdout));
914 TrFailedMsg(msg) => {
916 let mut stdout = stdout;
917 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
918 st.failures.push((test, stdout));
926 let output = match term::stdout() {
927 None => Raw(io::stdout()),
928 Some(t) => Pretty(t),
931 let max_name_len = tests
933 .max_by_key(|t| len_if_padded(*t))
934 .map(|t| t.desc.name.as_slice().len())
937 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
939 let mut out: Box<dyn OutputFormatter> = match opts.format {
940 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
946 OutputFormat::Terse => Box::new(TerseFormatter::new(
952 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
954 let mut st = ConsoleTestState::new(opts)?;
955 fn len_if_padded(t: &TestDescAndFn) -> usize {
956 match t.testfn.padding() {
958 PadOnRight => t.desc.name.as_slice().len(),
962 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
964 assert!(st.current_test_count() == st.total);
966 return out.write_run_finish(&st);
970 fn should_sort_failures_before_printing_them() {
971 let test_a = TestDesc {
972 name: StaticTestName("a"),
974 should_panic: ShouldPanic::No,
978 let test_b = TestDesc {
979 name: StaticTestName("b"),
981 should_panic: ShouldPanic::No,
985 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
987 let st = ConsoleTestState {
996 metrics: MetricMap::new(),
997 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
998 options: Options::new(),
999 not_failures: Vec::new(),
1002 out.write_failures(&st).unwrap();
1003 let s = match out.output_location() {
1004 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1005 &Pretty(_) => unreachable!(),
1008 let apos = s.find("a").unwrap();
1009 let bpos = s.find("b").unwrap();
1010 assert!(apos < bpos);
1013 fn use_color(opts: &TestOpts) -> bool {
1015 AutoColor => !opts.nocapture && stdout_isatty(),
1016 AlwaysColor => true,
1017 NeverColor => false,
1021 #[cfg(any(target_os = "cloudabi",
1022 target_os = "redox",
1023 all(target_arch = "wasm32", not(target_os = "emscripten")),
1024 target_env = "sgx"))]
1025 fn stdout_isatty() -> bool {
1026 // FIXME: Implement isatty on Redox and SGX
1030 fn stdout_isatty() -> bool {
1031 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1034 fn stdout_isatty() -> bool {
1037 type HANDLE = *mut u8;
1038 type LPDWORD = *mut u32;
1039 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1041 fn GetStdHandle(which: DWORD) -> HANDLE;
1042 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1045 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1047 GetConsoleMode(handle, &mut out) != 0
1052 pub enum TestEvent {
1053 TeFiltered(Vec<TestDesc>),
1055 TeResult(TestDesc, TestResult, Vec<u8>),
1056 TeTimeout(TestDesc),
1057 TeFilteredOut(usize),
1060 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1062 struct Sink(Arc<Mutex<Vec<u8>>>);
1063 impl Write for Sink {
1064 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1065 Write::write(&mut *self.0.lock().unwrap(), data)
1067 fn flush(&mut self) -> io::Result<()> {
1072 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1074 F: FnMut(TestEvent) -> io::Result<()>,
1076 use std::collections::HashMap;
1077 use std::sync::mpsc::RecvTimeoutError;
1079 let tests_len = tests.len();
1081 let mut filtered_tests = filter_tests(opts, tests);
1082 if !opts.bench_benchmarks {
1083 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1086 let filtered_tests = {
1087 let mut filtered_tests = filtered_tests;
1088 for test in filtered_tests.iter_mut() {
1089 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1095 let filtered_out = tests_len - filtered_tests.len();
1096 callback(TeFilteredOut(filtered_out))?;
1098 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1100 callback(TeFiltered(filtered_descs))?;
1102 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1103 filtered_tests.into_iter().partition(|e| match e.testfn {
1104 StaticTestFn(_) | DynTestFn(_) => true,
1108 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1110 let mut remaining = filtered_tests;
1111 remaining.reverse();
1112 let mut pending = 0;
1114 let (tx, rx) = channel::<MonitorMsg>();
1116 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1118 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1119 let now = Instant::now();
1120 let timed_out = running_tests
1122 .filter_map(|(desc, timeout)| {
1123 if &now >= timeout {
1130 for test in &timed_out {
1131 running_tests.remove(test);
1136 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1137 running_tests.values().min().map(|next_timeout| {
1138 let now = Instant::now();
1139 if *next_timeout >= now {
1147 if concurrency == 1 {
1148 while !remaining.is_empty() {
1149 let test = remaining.pop().unwrap();
1150 callback(TeWait(test.desc.clone()))?;
1151 run_test(opts, !opts.run_tests, test, tx.clone());
1152 let (test, result, stdout) = rx.recv().unwrap();
1153 callback(TeResult(test, result, stdout))?;
1156 while pending > 0 || !remaining.is_empty() {
1157 while pending < concurrency && !remaining.is_empty() {
1158 let test = remaining.pop().unwrap();
1159 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1160 running_tests.insert(test.desc.clone(), timeout);
1161 callback(TeWait(test.desc.clone()))?; //here no pad
1162 run_test(opts, !opts.run_tests, test, tx.clone());
1168 if let Some(timeout) = calc_timeout(&running_tests) {
1169 res = rx.recv_timeout(timeout);
1170 for test in get_timed_out_tests(&mut running_tests) {
1171 callback(TeTimeout(test))?;
1173 if res != Err(RecvTimeoutError::Timeout) {
1177 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1182 let (desc, result, stdout) = res.unwrap();
1183 running_tests.remove(&desc);
1185 callback(TeResult(desc, result, stdout))?;
1190 if opts.bench_benchmarks {
1191 // All benchmarks run at the end, in serial.
1192 for b in filtered_benchs {
1193 callback(TeWait(b.desc.clone()))?;
1194 run_test(opts, false, b, tx.clone());
1195 let (test, result, stdout) = rx.recv().unwrap();
1196 callback(TeResult(test, result, stdout))?;
1202 #[allow(deprecated)]
1203 fn get_concurrency() -> usize {
1204 return match env::var("RUST_TEST_THREADS") {
1206 let opt_n: Option<usize> = s.parse().ok();
1208 Some(n) if n > 0 => n,
1210 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1215 Err(..) => num_cpus(),
1219 #[allow(nonstandard_style)]
1220 fn num_cpus() -> usize {
1222 struct SYSTEM_INFO {
1223 wProcessorArchitecture: u16,
1226 lpMinimumApplicationAddress: *mut u8,
1227 lpMaximumApplicationAddress: *mut u8,
1228 dwActiveProcessorMask: *mut u8,
1229 dwNumberOfProcessors: u32,
1230 dwProcessorType: u32,
1231 dwAllocationGranularity: u32,
1232 wProcessorLevel: u16,
1233 wProcessorRevision: u16,
1236 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1239 let mut sysinfo = std::mem::zeroed();
1240 GetSystemInfo(&mut sysinfo);
1241 sysinfo.dwNumberOfProcessors as usize
1245 #[cfg(target_os = "redox")]
1246 fn num_cpus() -> usize {
1247 // FIXME: Implement num_cpus on Redox
1251 #[cfg(any(all(target_arch = "wasm32", not(target_os = "emscripten")), target_env = "sgx"))]
1252 fn num_cpus() -> usize {
1256 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1257 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1258 target_os = "macos", target_os = "solaris"))]
1259 fn num_cpus() -> usize {
1260 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1263 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1264 target_os = "netbsd"))]
1265 fn num_cpus() -> usize {
1268 let mut cpus: libc::c_uint = 0;
1269 let mut cpus_size = std::mem::size_of_val(&cpus);
1272 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1275 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1280 &mut cpus as *mut _ as *mut _,
1281 &mut cpus_size as *mut _ as *mut _,
1293 #[cfg(target_os = "openbsd")]
1294 fn num_cpus() -> usize {
1297 let mut cpus: libc::c_uint = 0;
1298 let mut cpus_size = std::mem::size_of_val(&cpus);
1299 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1305 &mut cpus as *mut _ as *mut _,
1306 &mut cpus_size as *mut _ as *mut _,
1317 #[cfg(target_os = "haiku")]
1318 fn num_cpus() -> usize {
1323 #[cfg(target_os = "l4re")]
1324 fn num_cpus() -> usize {
1330 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1331 let mut filtered = tests;
1332 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1333 let test_name = test.desc.name.as_slice();
1335 match opts.filter_exact {
1336 true => test_name == filter,
1337 false => test_name.contains(filter),
1341 // Remove tests that don't match the test filter
1342 if let Some(ref filter) = opts.filter {
1343 filtered.retain(|test| matches_filter(test, filter));
1346 // Skip tests that match any of the skip filters
1347 filtered.retain(|test| {
1348 !opts.skip.iter().any(|sf| matches_filter(test, sf))
1351 // maybe unignore tests
1352 match opts.run_ignored {
1353 RunIgnored::Yes => {
1354 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
1356 RunIgnored::Only => {
1357 filtered.retain(|test| test.desc.ignore);
1358 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
1360 RunIgnored::No => {}
1363 // Sort the tests alphabetically
1364 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1369 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1370 // convert benchmarks to tests, if we're not benchmarking them
1374 let testfn = match x.testfn {
1375 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1376 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1378 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1379 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1394 test: TestDescAndFn,
1395 monitor_ch: Sender<MonitorMsg>,
1397 let TestDescAndFn { desc, testfn } = test;
1399 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1400 && desc.should_panic != ShouldPanic::No;
1402 if force_ignore || desc.ignore || ignore_because_panic_abort {
1403 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1409 monitor_ch: Sender<MonitorMsg>,
1411 testfn: Box<dyn FnBox() + Send>,
1413 // Buffer for capturing standard I/O
1414 let data = Arc::new(Mutex::new(Vec::new()));
1415 let data2 = data.clone();
1417 let name = desc.name.clone();
1418 let runtest = move || {
1419 let oldio = if !nocapture {
1421 io::set_print(Some(Box::new(Sink(data2.clone())))),
1422 io::set_panic(Some(Box::new(Sink(data2)))),
1428 let result = catch_unwind(AssertUnwindSafe(testfn));
1430 if let Some((printio, panicio)) = oldio {
1431 io::set_print(printio);
1432 io::set_panic(panicio);
1435 let test_result = calc_result(&desc, result);
1436 let stdout = data.lock().unwrap().to_vec();
1438 .send((desc.clone(), test_result, stdout))
1442 // If the platform is single-threaded we're just going to run
1443 // the test synchronously, regardless of the concurrency
1445 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1446 if supports_threads {
1447 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1448 cfg.spawn(runtest).unwrap();
1455 DynBenchFn(bencher) => {
1456 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1457 bencher.run(harness)
1460 StaticBenchFn(benchfn) => {
1461 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1462 (benchfn.clone())(harness)
1466 let cb = move || __rust_begin_short_backtrace(f);
1467 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1469 StaticTestFn(f) => run_test_inner(
1473 Box::new(move || __rust_begin_short_backtrace(f)),
1478 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1480 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1484 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1485 match (&desc.should_panic, task_result) {
1486 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1487 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1488 if err.downcast_ref::<String>()
1490 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1491 .map(|e| e.contains(msg))
1496 if desc.allow_fail {
1499 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1503 _ if desc.allow_fail => TrAllowedFail,
1508 #[derive(Clone, PartialEq)]
1509 pub struct MetricMap(BTreeMap<String, Metric>);
1512 pub fn new() -> MetricMap {
1513 MetricMap(BTreeMap::new())
1516 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1517 /// must be non-negative. The `noise` indicates the uncertainty of the
1518 /// metric, which doubles as the "noise range" of acceptable
1519 /// pairwise-regressions on this named value, when comparing from one
1520 /// metric to the next using `compare_to_old`.
1522 /// If `noise` is positive, then it means this metric is of a value
1523 /// you want to see grow smaller, so a change larger than `noise` in the
1524 /// positive direction represents a regression.
1526 /// If `noise` is negative, then it means this metric is of a value
1527 /// you want to see grow larger, so a change larger than `noise` in the
1528 /// negative direction represents a regression.
1529 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1530 let m = Metric { value, noise };
1531 self.0.insert(name.to_owned(), m);
1534 pub fn fmt_metrics(&self) -> String {
1537 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1538 .collect::<Vec<_>>();
1545 /// A function that is opaque to the optimizer, to allow benchmarks to
1546 /// pretend to use outputs to assist in avoiding dead-code
1549 /// This function is a no-op, and does not even read from `dummy`.
1550 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1551 pub fn black_box<T>(dummy: T) -> T {
1552 // we need to "use" the argument in some way LLVM can't
1554 unsafe { asm!("" : : "r"(&dummy)) }
1557 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1559 pub fn black_box<T>(dummy: T) -> T {
1564 /// Callback for benchmark functions to run in their body.
1565 pub fn iter<T, F>(&mut self, mut inner: F)
1569 if self.mode == BenchMode::Single {
1570 ns_iter_inner(&mut inner, 1);
1574 self.summary = Some(iter(&mut inner));
1577 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1579 F: FnMut(&mut Bencher),
1582 return self.summary;
1586 fn ns_from_dur(dur: Duration) -> u64 {
1587 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1590 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1594 let start = Instant::now();
1598 return ns_from_dur(start.elapsed());
1601 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1605 // Initial bench run to get ballpark figure.
1606 let ns_single = ns_iter_inner(inner, 1);
1608 // Try to estimate iter count for 1ms falling back to 1m
1609 // iterations if first run took < 1ns.
1610 let ns_target_total = 1_000_000; // 1ms
1611 let mut n = ns_target_total / cmp::max(1, ns_single);
1613 // if the first run took more than 1ms we don't want to just
1614 // be left doing 0 iterations on every loop. The unfortunate
1615 // side effect of not being able to do as many runs is
1616 // automatically handled by the statistical analysis below
1617 // (i.e. larger error bars).
1620 let mut total_run = Duration::new(0, 0);
1621 let samples: &mut [f64] = &mut [0.0_f64; 50];
1623 let loop_start = Instant::now();
1625 for p in &mut *samples {
1626 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1629 stats::winsorize(samples, 5.0);
1630 let summ = stats::Summary::new(samples);
1632 for p in &mut *samples {
1633 let ns = ns_iter_inner(inner, 5 * n);
1634 *p = ns as f64 / (5 * n) as f64;
1637 stats::winsorize(samples, 5.0);
1638 let summ5 = stats::Summary::new(samples);
1640 let loop_run = loop_start.elapsed();
1642 // If we've run for 100ms and seem to have converged to a
1644 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1645 && summ.median - summ5.median < summ5.median_abs_dev
1650 total_run = total_run + loop_run;
1651 // Longest we ever run for is 3s.
1652 if total_run > Duration::from_secs(3) {
1656 // If we overflow here just return the results so far. We check a
1657 // multiplier of 10 because we're about to multiply by 2 and the
1658 // next iteration of the loop will also multiply by 5 (to calculate
1659 // the summ5 result)
1660 n = match n.checked_mul(10) {
1670 use std::panic::{catch_unwind, AssertUnwindSafe};
1673 use std::sync::{Arc, Mutex};
1675 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1677 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1679 F: FnMut(&mut Bencher),
1681 let mut bs = Bencher {
1682 mode: BenchMode::Auto,
1687 let data = Arc::new(Mutex::new(Vec::new()));
1688 let data2 = data.clone();
1690 let oldio = if !nocapture {
1692 io::set_print(Some(Box::new(Sink(data2.clone())))),
1693 io::set_panic(Some(Box::new(Sink(data2)))),
1699 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1701 if let Some((printio, panicio)) = oldio {
1702 io::set_print(printio);
1703 io::set_panic(panicio);
1706 let test_result = match result {
1708 Ok(Some(ns_iter_summ)) => {
1709 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1710 let mb_s = bs.bytes * 1000 / ns_iter;
1712 let bs = BenchSamples {
1714 mb_s: mb_s as usize,
1716 TestResult::TrBench(bs)
1719 // iter not called, so no data.
1720 // FIXME: error in this case?
1721 let samples: &mut [f64] = &mut [0.0_f64; 1];
1722 let bs = BenchSamples {
1723 ns_iter_summ: stats::Summary::new(samples),
1726 TestResult::TrBench(bs)
1728 Err(_) => TestResult::TrFailed,
1731 let stdout = data.lock().unwrap().to_vec();
1732 monitor_ch.send((desc, test_result, stdout)).unwrap();
1735 pub fn run_once<F>(f: F)
1737 F: FnMut(&mut Bencher),
1739 let mut bs = Bencher {
1740 mode: BenchMode::Single,
1750 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
1751 ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed,
1752 TrFailedMsg, TrIgnored, TrOk};
1753 use std::sync::mpsc::channel;
1758 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
1762 name: StaticTestName("1"),
1764 should_panic: ShouldPanic::No,
1767 testfn: DynTestFn(Box::new(move || {})),
1771 name: StaticTestName("2"),
1773 should_panic: ShouldPanic::No,
1776 testfn: DynTestFn(Box::new(move || {})),
1782 pub fn do_not_run_ignored_tests() {
1786 let desc = TestDescAndFn {
1788 name: StaticTestName("whatever"),
1790 should_panic: ShouldPanic::No,
1793 testfn: DynTestFn(Box::new(f)),
1795 let (tx, rx) = channel();
1796 run_test(&TestOpts::new(), false, desc, tx);
1797 let (_, res, _) = rx.recv().unwrap();
1798 assert!(res != TrOk);
1802 pub fn ignored_tests_result_in_ignored() {
1804 let desc = TestDescAndFn {
1806 name: StaticTestName("whatever"),
1808 should_panic: ShouldPanic::No,
1811 testfn: DynTestFn(Box::new(f)),
1813 let (tx, rx) = channel();
1814 run_test(&TestOpts::new(), false, desc, tx);
1815 let (_, res, _) = rx.recv().unwrap();
1816 assert!(res == TrIgnored);
1820 fn test_should_panic() {
1824 let desc = TestDescAndFn {
1826 name: StaticTestName("whatever"),
1828 should_panic: ShouldPanic::Yes,
1831 testfn: DynTestFn(Box::new(f)),
1833 let (tx, rx) = channel();
1834 run_test(&TestOpts::new(), false, desc, tx);
1835 let (_, res, _) = rx.recv().unwrap();
1836 assert!(res == TrOk);
1840 fn test_should_panic_good_message() {
1842 panic!("an error message");
1844 let desc = TestDescAndFn {
1846 name: StaticTestName("whatever"),
1848 should_panic: ShouldPanic::YesWithMessage("error message"),
1851 testfn: DynTestFn(Box::new(f)),
1853 let (tx, rx) = channel();
1854 run_test(&TestOpts::new(), false, desc, tx);
1855 let (_, res, _) = rx.recv().unwrap();
1856 assert!(res == TrOk);
1860 fn test_should_panic_bad_message() {
1862 panic!("an error message");
1864 let expected = "foobar";
1865 let failed_msg = "Panic did not include expected string";
1866 let desc = TestDescAndFn {
1868 name: StaticTestName("whatever"),
1870 should_panic: ShouldPanic::YesWithMessage(expected),
1873 testfn: DynTestFn(Box::new(f)),
1875 let (tx, rx) = channel();
1876 run_test(&TestOpts::new(), false, desc, tx);
1877 let (_, res, _) = rx.recv().unwrap();
1878 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1882 fn test_should_panic_but_succeeds() {
1884 let desc = TestDescAndFn {
1886 name: StaticTestName("whatever"),
1888 should_panic: ShouldPanic::Yes,
1891 testfn: DynTestFn(Box::new(f)),
1893 let (tx, rx) = channel();
1894 run_test(&TestOpts::new(), false, desc, tx);
1895 let (_, res, _) = rx.recv().unwrap();
1896 assert!(res == TrFailed);
1900 fn parse_ignored_flag() {
1902 "progname".to_string(),
1903 "filter".to_string(),
1904 "--ignored".to_string(),
1906 let opts = parse_opts(&args).unwrap().unwrap();
1907 assert_eq!(opts.run_ignored, RunIgnored::Only);
1911 fn parse_include_ignored_flag() {
1913 "progname".to_string(),
1914 "filter".to_string(),
1915 "-Zunstable-options".to_string(),
1916 "--include-ignored".to_string(),
1918 let opts = parse_opts(&args).unwrap().unwrap();
1919 assert_eq!(opts.run_ignored, RunIgnored::Yes);
1923 pub fn filter_for_ignored_option() {
1924 // When we run ignored tests the test filter should filter out all the
1925 // unignored tests and flip the ignore flag on the rest to false
1927 let mut opts = TestOpts::new();
1928 opts.run_tests = true;
1929 opts.run_ignored = RunIgnored::Only;
1931 let tests = one_ignored_one_unignored_test();
1932 let filtered = filter_tests(&opts, tests);
1934 assert_eq!(filtered.len(), 1);
1935 assert_eq!(filtered[0].desc.name.to_string(), "1");
1936 assert!(!filtered[0].desc.ignore);
1940 pub fn run_include_ignored_option() {
1941 // When we "--include-ignored" tests, the ignore flag should be set to false on
1942 // all tests and no test filtered out
1944 let mut opts = TestOpts::new();
1945 opts.run_tests = true;
1946 opts.run_ignored = RunIgnored::Yes;
1948 let tests = one_ignored_one_unignored_test();
1949 let filtered = filter_tests(&opts, tests);
1951 assert_eq!(filtered.len(), 2);
1952 assert!(!filtered[0].desc.ignore);
1953 assert!(!filtered[1].desc.ignore);
1957 pub fn exact_filter_match() {
1958 fn tests() -> Vec<TestDescAndFn> {
1959 vec!["base", "base::test", "base::test1", "base::test2"]
1961 .map(|name| TestDescAndFn {
1963 name: StaticTestName(name),
1965 should_panic: ShouldPanic::No,
1968 testfn: DynTestFn(Box::new(move || {})),
1973 let substr = filter_tests(
1975 filter: Some("base".into()),
1980 assert_eq!(substr.len(), 4);
1982 let substr = filter_tests(
1984 filter: Some("bas".into()),
1989 assert_eq!(substr.len(), 4);
1991 let substr = filter_tests(
1993 filter: Some("::test".into()),
1998 assert_eq!(substr.len(), 3);
2000 let substr = filter_tests(
2002 filter: Some("base::test".into()),
2007 assert_eq!(substr.len(), 3);
2009 let exact = filter_tests(
2011 filter: Some("base".into()),
2017 assert_eq!(exact.len(), 1);
2019 let exact = filter_tests(
2021 filter: Some("bas".into()),
2027 assert_eq!(exact.len(), 0);
2029 let exact = filter_tests(
2031 filter: Some("::test".into()),
2037 assert_eq!(exact.len(), 0);
2039 let exact = filter_tests(
2041 filter: Some("base::test".into()),
2047 assert_eq!(exact.len(), 1);
2051 pub fn sort_tests() {
2052 let mut opts = TestOpts::new();
2053 opts.run_tests = true;
2056 "sha1::test".to_string(),
2057 "isize::test_to_str".to_string(),
2058 "isize::test_pow".to_string(),
2059 "test::do_not_run_ignored_tests".to_string(),
2060 "test::ignored_tests_result_in_ignored".to_string(),
2061 "test::first_free_arg_should_be_a_filter".to_string(),
2062 "test::parse_ignored_flag".to_string(),
2063 "test::parse_include_ignored_flag".to_string(),
2064 "test::filter_for_ignored_option".to_string(),
2065 "test::run_include_ignored_option".to_string(),
2066 "test::sort_tests".to_string(),
2070 let mut tests = Vec::new();
2071 for name in &names {
2072 let test = TestDescAndFn {
2074 name: DynTestName((*name).clone()),
2076 should_panic: ShouldPanic::No,
2079 testfn: DynTestFn(Box::new(testfn)),
2085 let filtered = filter_tests(&opts, tests);
2087 let expected = vec![
2088 "isize::test_pow".to_string(),
2089 "isize::test_to_str".to_string(),
2090 "sha1::test".to_string(),
2091 "test::do_not_run_ignored_tests".to_string(),
2092 "test::filter_for_ignored_option".to_string(),
2093 "test::first_free_arg_should_be_a_filter".to_string(),
2094 "test::ignored_tests_result_in_ignored".to_string(),
2095 "test::parse_ignored_flag".to_string(),
2096 "test::parse_include_ignored_flag".to_string(),
2097 "test::run_include_ignored_option".to_string(),
2098 "test::sort_tests".to_string(),
2101 for (a, b) in expected.iter().zip(filtered) {
2102 assert!(*a == b.desc.name.to_string());
2107 pub fn test_metricmap_compare() {
2108 let mut m1 = MetricMap::new();
2109 let mut m2 = MetricMap::new();
2110 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2111 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2113 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2114 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2116 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2117 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2119 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2120 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2122 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2123 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2125 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2126 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2130 pub fn test_bench_once_no_iter() {
2131 fn f(_: &mut Bencher) {}
2136 pub fn test_bench_once_iter() {
2137 fn f(b: &mut Bencher) {
2144 pub fn test_bench_no_iter() {
2145 fn f(_: &mut Bencher) {}
2147 let (tx, rx) = channel();
2149 let desc = TestDesc {
2150 name: StaticTestName("f"),
2152 should_panic: ShouldPanic::No,
2156 ::bench::benchmark(desc, tx, true, f);
2161 pub fn test_bench_iter() {
2162 fn f(b: &mut Bencher) {
2166 let (tx, rx) = channel();
2168 let desc = TestDesc {
2169 name: StaticTestName("f"),
2171 should_panic: ShouldPanic::No,
2175 ::bench::benchmark(desc, tx, true, f);