1 // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // NB: this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
27 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
28 // cargo) to detect this crate.
30 #![crate_name = "test"]
31 #![unstable(feature = "test", issue = "27812")]
32 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
33 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
34 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
37 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
39 #![feature(set_stdio)]
40 #![feature(panic_unwind)]
41 #![feature(staged_api)]
42 #![feature(termination_trait_lib)]
46 #[cfg(any(unix, target_os = "cloudabi"))]
50 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
51 // on aarch64-pc-windows-msvc, so we don't link libtest against
52 // libunwind (for the time being), even though it means that
53 // libtest won't be fully functional on this platform.
55 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
56 #[cfg(not(all(windows, target_arch = "aarch64")))]
57 extern crate panic_unwind;
59 pub use self::TestFn::*;
60 pub use self::ColorConfig::*;
61 pub use self::TestResult::*;
62 pub use self::TestName::*;
63 use self::TestEvent::*;
64 use self::NamePadding::*;
65 use self::OutputLocation::*;
67 use std::panic::{catch_unwind, AssertUnwindSafe};
69 use std::boxed::FnBox;
71 use std::collections::BTreeMap;
75 use std::io::prelude::*;
77 use std::path::PathBuf;
78 use std::process::Termination;
79 use std::sync::mpsc::{channel, Sender};
80 use std::sync::{Arc, Mutex};
82 use std::time::{Duration, Instant};
86 const TEST_WARN_TIMEOUT_S: u64 = 60;
87 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
89 // to be used by rustc to compile tests in libtest
91 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
92 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, RunIgnored, ShouldPanic,
93 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
94 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
100 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
102 // The name of a test. By convention this follows the rules for rust
103 // paths; i.e. it should be a series of identifiers separated by double
104 // colons. This way if some test runner wants to arrange the tests
105 // hierarchically it may.
107 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
109 StaticTestName(&'static str),
111 AlignedTestName(Cow<'static, str>, NamePadding),
114 fn as_slice(&self) -> &str {
116 StaticTestName(s) => s,
117 DynTestName(ref s) => s,
118 AlignedTestName(ref s, _) => &*s,
122 fn padding(&self) -> NamePadding {
124 &AlignedTestName(_, p) => p,
129 fn with_padding(&self, padding: NamePadding) -> TestName {
130 let name = match self {
131 &TestName::StaticTestName(name) => Cow::Borrowed(name),
132 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
133 &TestName::AlignedTestName(ref name, _) => name.clone(),
136 TestName::AlignedTestName(name, padding)
139 impl fmt::Display for TestName {
140 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
141 fmt::Display::fmt(self.as_slice(), f)
145 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
146 pub enum NamePadding {
152 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
153 let mut name = String::from(self.name.as_slice());
154 let fill = column_count.saturating_sub(name.len());
155 let pad = " ".repeat(fill);
166 /// Represents a benchmark function.
167 pub trait TDynBenchFn: Send {
168 fn run(&self, harness: &mut Bencher);
171 // A function that runs a test. If the function returns successfully,
172 // the test succeeds; if the function panics then the test fails. We
173 // may need to come up with a more clever definition of test in order
174 // to support isolation of tests into threads.
177 StaticBenchFn(fn(&mut Bencher)),
178 DynTestFn(Box<dyn FnBox() + Send>),
179 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
183 fn padding(&self) -> NamePadding {
185 StaticTestFn(..) => PadNone,
186 StaticBenchFn(..) => PadOnRight,
187 DynTestFn(..) => PadNone,
188 DynBenchFn(..) => PadOnRight,
193 impl fmt::Debug for TestFn {
194 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
195 f.write_str(match *self {
196 StaticTestFn(..) => "StaticTestFn(..)",
197 StaticBenchFn(..) => "StaticBenchFn(..)",
198 DynTestFn(..) => "DynTestFn(..)",
199 DynBenchFn(..) => "DynBenchFn(..)",
204 /// Manager of the benchmarking runs.
206 /// This is fed into functions marked with `#[bench]` to allow for
207 /// set-up & tear-down before running a piece of code repeatedly via a
212 summary: Option<stats::Summary>,
216 #[derive(Clone, PartialEq, Eq)]
222 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
223 pub enum ShouldPanic {
226 YesWithMessage(&'static str),
229 // The definition of a single test. A test runner will run a list of
231 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
232 pub struct TestDesc {
235 pub should_panic: ShouldPanic,
236 pub allow_fail: bool,
240 pub struct TestDescAndFn {
245 #[derive(Clone, PartialEq, Debug, Copy)]
252 pub fn new(value: f64, noise: f64) -> Metric {
253 Metric { value, noise }
257 /// In case we want to add other options as well, just add them in this struct.
258 #[derive(Copy, Clone, Debug)]
260 display_output: bool,
264 pub fn new() -> Options {
266 display_output: false,
270 pub fn display_output(mut self, display_output: bool) -> Options {
271 self.display_output = display_output;
276 // The default console test runner. It accepts the command line
277 // arguments and a vector of test_descs.
278 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
279 let mut opts = match parse_opts(args) {
282 eprintln!("error: {}", msg);
288 opts.options = options;
290 if let Err(e) = list_tests_console(&opts, tests) {
291 eprintln!("error: io error when listing tests: {:?}", e);
295 match run_tests_console(&opts, tests) {
297 Ok(false) => process::exit(101),
299 eprintln!("error: io error when listing tests: {:?}", e);
306 // A variant optimized for invocation with a static test vector.
307 // This will panic (intentionally) when fed any dynamic tests, because
308 // it is copying the static values out into a dynamic vector and cannot
309 // copy dynamic values. It is doing this because from this point on
310 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
311 // semantics into parallel test runners, which in turn requires a Vec<>
312 // rather than a &[].
313 pub fn test_main_static(tests: &[&TestDescAndFn]) {
314 let args = env::args().collect::<Vec<_>>();
315 let owned_tests = tests
317 .map(|t| match t.testfn {
318 StaticTestFn(f) => TestDescAndFn {
319 testfn: StaticTestFn(f),
320 desc: t.desc.clone(),
322 StaticBenchFn(f) => TestDescAndFn {
323 testfn: StaticBenchFn(f),
324 desc: t.desc.clone(),
326 _ => panic!("non-static tests passed to test::test_main_static"),
329 test_main(&args, owned_tests, Options::new())
332 /// Invoked when unit tests terminate. Should panic if the unit
333 /// test is considered a failure. By default, invokes `report()`
334 /// and checks for a `0` result.
335 pub fn assert_test_result<T: Termination>(result: T) {
336 let code = result.report();
340 "the test returned a termination value with a non-zero status code ({}) \
341 which indicates a failure",
346 #[derive(Copy, Clone, Debug)]
347 pub enum ColorConfig {
353 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
354 pub enum OutputFormat {
360 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
361 pub enum RunIgnored {
368 pub struct TestOpts {
370 pub filter: Option<String>,
371 pub filter_exact: bool,
372 pub run_ignored: RunIgnored,
374 pub bench_benchmarks: bool,
375 pub logfile: Option<PathBuf>,
377 pub color: ColorConfig,
378 pub format: OutputFormat,
379 pub test_threads: Option<usize>,
380 pub skip: Vec<String>,
381 pub options: Options,
386 fn new() -> TestOpts {
391 run_ignored: RunIgnored::No,
393 bench_benchmarks: false,
397 format: OutputFormat::Pretty,
400 options: Options::new(),
405 /// Result of parsing the options.
406 pub type OptRes = Result<TestOpts, String>;
408 fn optgroups() -> getopts::Options {
409 let mut opts = getopts::Options::new();
410 opts.optflag("", "include-ignored", "Run ignored and not ignored tests")
411 .optflag("", "ignored", "Run only ignored tests")
412 .optflag("", "test", "Run tests and not benchmarks")
413 .optflag("", "bench", "Run benchmarks instead of tests")
414 .optflag("", "list", "List all tests and benchmarks")
415 .optflag("h", "help", "Display this message (longer with --help)")
419 "Write logs to the specified file instead \
426 "don't capture stdout/stderr of each \
427 task, allow printing directly",
432 "Number of threads used for running tests \
439 "Skip tests whose names contain FILTER (this flag can \
440 be used multiple times)",
446 "Display one character per test instead of one line. \
447 Alias to --format=terse",
452 "Exactly match filters rather than by substring",
457 "Configure coloring of output:
458 auto = colorize if stdout is a tty and tests are run on serially (default);
459 always = always colorize output;
460 never = never colorize output;",
466 "Configure formatting of output:
467 pretty = Print verbose output;
468 terse = Display one character per test;
469 json = Output a json document",
475 "Enable nightly-only flags:
476 unstable-options = Allow use of experimental features",
482 fn usage(binary: &str, options: &getopts::Options) {
483 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
487 The FILTER string is tested against the name of all tests, and only those
488 tests whose names contain the filter are run.
490 By default, all tests are run in parallel. This can be altered with the
491 --test-threads flag or the RUST_TEST_THREADS environment variable when running
494 All tests have their standard output and standard error captured by default.
495 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
496 environment variable to a value other than "0". Logging is not captured by default.
500 #[test] - Indicates a function is a test to be run. This function
502 #[bench] - Indicates a function is a benchmark to be run. This
503 function takes one argument (test::Bencher).
504 #[should_panic] - This function (also labeled with #[test]) will only pass if
505 the code causes a panic (an assertion failure or panic!)
506 A message may be provided, which the failure string must
507 contain: #[should_panic(expected = "foo")].
508 #[ignore] - When applied to a function which is already attributed as a
509 test, then the test runner will ignore these tests during
510 normal test runs. Running with --ignored or --include-ignored will run
512 usage = options.usage(&message)
516 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
517 fn is_nightly() -> bool {
518 // Whether this is a feature-staged build, i.e. on the beta or stable channel
519 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
520 // Whether we should enable unstable features for bootstrapping
521 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
523 bootstrap || !disable_unstable_features
526 // Parses command line arguments into test options
527 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
528 let mut allow_unstable = false;
529 let opts = optgroups();
530 let args = args.get(1..).unwrap_or(args);
531 let matches = match opts.parse(args) {
533 Err(f) => return Some(Err(f.to_string())),
536 if let Some(opt) = matches.opt_str("Z") {
539 "the option `Z` is only accepted on the nightly compiler".into(),
544 "unstable-options" => {
545 allow_unstable = true;
548 return Some(Err("Unrecognized option to `Z`".into()));
553 if matches.opt_present("h") {
554 usage(&args[0], &opts);
558 let filter = if !matches.free.is_empty() {
559 Some(matches.free[0].clone())
564 let include_ignored = matches.opt_present("include-ignored");
565 if !allow_unstable && include_ignored {
567 "The \"include-ignored\" flag is only accepted on the nightly compiler".into()
571 let run_ignored = match (include_ignored, matches.opt_present("ignored")) {
572 (true, true) => return Some(Err(
573 "the options --include-ignored and --ignored are mutually exclusive".into()
575 (true, false) => RunIgnored::Yes,
576 (false, true) => RunIgnored::Only,
577 (false, false) => RunIgnored::No,
579 let quiet = matches.opt_present("quiet");
580 let exact = matches.opt_present("exact");
581 let list = matches.opt_present("list");
583 let logfile = matches.opt_str("logfile");
584 let logfile = logfile.map(|s| PathBuf::from(&s));
586 let bench_benchmarks = matches.opt_present("bench");
587 let run_tests = !bench_benchmarks || matches.opt_present("test");
589 let mut nocapture = matches.opt_present("nocapture");
591 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
592 Ok(val) => &val != "0",
597 let test_threads = match matches.opt_str("test-threads") {
598 Some(n_str) => match n_str.parse::<usize>() {
599 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
602 return Some(Err(format!(
603 "argument for --test-threads must be a number > 0 \
612 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
613 Some("auto") | None => AutoColor,
614 Some("always") => AlwaysColor,
615 Some("never") => NeverColor,
618 return Some(Err(format!(
619 "argument for --color must be auto, always, or never (was \
626 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
627 None if quiet => OutputFormat::Terse,
628 Some("pretty") | None => OutputFormat::Pretty,
629 Some("terse") => OutputFormat::Terse,
633 "The \"json\" format is only accepted on the nightly compiler".into(),
640 return Some(Err(format!(
641 "argument for --format must be pretty, terse, or json (was \
648 let test_opts = TestOpts {
660 skip: matches.opt_strs("skip"),
661 options: Options::new(),
667 #[derive(Clone, PartialEq)]
668 pub struct BenchSamples {
669 ns_iter_summ: stats::Summary,
673 #[derive(Clone, PartialEq)]
674 pub enum TestResult {
680 TrBench(BenchSamples),
683 unsafe impl Send for TestResult {}
685 enum OutputLocation<T> {
686 Pretty(Box<term::StdoutTerminal>),
690 impl<T: Write> Write for OutputLocation<T> {
691 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
693 Pretty(ref mut term) => term.write(buf),
694 Raw(ref mut stdout) => stdout.write(buf),
698 fn flush(&mut self) -> io::Result<()> {
700 Pretty(ref mut term) => term.flush(),
701 Raw(ref mut stdout) => stdout.flush(),
706 struct ConsoleTestState {
707 log_out: Option<File>,
716 failures: Vec<(TestDesc, Vec<u8>)>,
717 not_failures: Vec<(TestDesc, Vec<u8>)>,
721 impl ConsoleTestState {
722 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
723 let log_out = match opts.logfile {
724 Some(ref path) => Some(File::create(path)?),
728 Ok(ConsoleTestState {
737 metrics: MetricMap::new(),
738 failures: Vec::new(),
739 not_failures: Vec::new(),
740 options: opts.options,
744 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
745 let msg = msg.as_ref();
748 Some(ref mut o) => o.write_all(msg.as_bytes()),
752 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
753 self.write_log(format!(
756 TrOk => "ok".to_owned(),
757 TrFailed => "failed".to_owned(),
758 TrFailedMsg(ref msg) => format!("failed: {}", msg),
759 TrIgnored => "ignored".to_owned(),
760 TrAllowedFail => "failed (allowed)".to_owned(),
761 TrBench(ref bs) => fmt_bench_samples(bs),
767 fn current_test_count(&self) -> usize {
768 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
772 // Format a number with thousands separators
773 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
775 let mut output = String::new();
776 let mut trailing = false;
777 for &pow in &[9, 6, 3, 0] {
778 let base = 10_usize.pow(pow);
779 if pow == 0 || trailing || n / base != 0 {
781 output.write_fmt(format_args!("{}", n / base)).unwrap();
783 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
796 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
798 let mut output = String::new();
800 let median = bs.ns_iter_summ.median as usize;
801 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
804 .write_fmt(format_args!(
805 "{:>11} ns/iter (+/- {})",
806 fmt_thousands_sep(median, ','),
807 fmt_thousands_sep(deviation, ',')
812 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
818 // List the tests to console, and optionally to logfile. Filters are honored.
819 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
820 let mut output = match term::stdout() {
821 None => Raw(io::stdout()),
822 Some(t) => Pretty(t),
825 let quiet = opts.format == OutputFormat::Terse;
826 let mut st = ConsoleTestState::new(opts)?;
831 for test in filter_tests(&opts, tests) {
835 desc: TestDesc { name, .. },
839 let fntype = match testfn {
840 StaticTestFn(..) | DynTestFn(..) => {
844 StaticBenchFn(..) | DynBenchFn(..) => {
850 writeln!(output, "{}: {}", name, fntype)?;
851 st.write_log(format!("{} {}\n", fntype, name))?;
854 fn plural(count: u32, s: &str) -> String {
856 1 => format!("{} {}", 1, s),
857 n => format!("{} {}s", n, s),
862 if ntest != 0 || nbench != 0 {
863 writeln!(output, "")?;
869 plural(ntest, "test"),
870 plural(nbench, "benchmark")
877 // A simple console test runner
878 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
881 st: &mut ConsoleTestState,
882 out: &mut dyn OutputFormatter,
883 ) -> io::Result<()> {
884 match (*event).clone() {
885 TeFiltered(ref filtered_tests) => {
886 st.total = filtered_tests.len();
887 out.write_run_start(filtered_tests.len())
889 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
890 TeWait(ref test) => out.write_test_start(test),
891 TeTimeout(ref test) => out.write_timeout(test),
892 TeResult(test, result, stdout) => {
893 st.write_log_result(&test, &result)?;
894 out.write_result(&test, &result, &*stdout)?;
898 st.not_failures.push((test, stdout));
900 TrIgnored => st.ignored += 1,
901 TrAllowedFail => st.allowed_fail += 1,
903 st.metrics.insert_metric(
904 test.name.as_slice(),
905 bs.ns_iter_summ.median,
906 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
912 st.failures.push((test, stdout));
914 TrFailedMsg(msg) => {
916 let mut stdout = stdout;
917 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
918 st.failures.push((test, stdout));
926 let output = match term::stdout() {
927 None => Raw(io::stdout()),
928 Some(t) => Pretty(t),
931 let max_name_len = tests
933 .max_by_key(|t| len_if_padded(*t))
934 .map(|t| t.desc.name.as_slice().len())
937 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
939 let mut out: Box<dyn OutputFormatter> = match opts.format {
940 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
946 OutputFormat::Terse => Box::new(TerseFormatter::new(
952 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
954 let mut st = ConsoleTestState::new(opts)?;
955 fn len_if_padded(t: &TestDescAndFn) -> usize {
956 match t.testfn.padding() {
958 PadOnRight => t.desc.name.as_slice().len(),
962 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
964 assert!(st.current_test_count() == st.total);
966 return out.write_run_finish(&st);
970 fn should_sort_failures_before_printing_them() {
971 let test_a = TestDesc {
972 name: StaticTestName("a"),
974 should_panic: ShouldPanic::No,
978 let test_b = TestDesc {
979 name: StaticTestName("b"),
981 should_panic: ShouldPanic::No,
985 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
987 let st = ConsoleTestState {
996 metrics: MetricMap::new(),
997 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
998 options: Options::new(),
999 not_failures: Vec::new(),
1002 out.write_failures(&st).unwrap();
1003 let s = match out.output_location() {
1004 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
1005 &Pretty(_) => unreachable!(),
1008 let apos = s.find("a").unwrap();
1009 let bpos = s.find("b").unwrap();
1010 assert!(apos < bpos);
1013 fn use_color(opts: &TestOpts) -> bool {
1015 AutoColor => !opts.nocapture && stdout_isatty(),
1016 AlwaysColor => true,
1017 NeverColor => false,
1021 #[cfg(any(target_os = "cloudabi", target_os = "redox",
1022 all(target_arch = "wasm32", not(target_os = "emscripten"))))]
1023 fn stdout_isatty() -> bool {
1024 // FIXME: Implement isatty on Redox
1028 fn stdout_isatty() -> bool {
1029 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1032 fn stdout_isatty() -> bool {
1035 type HANDLE = *mut u8;
1036 type LPDWORD = *mut u32;
1037 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1039 fn GetStdHandle(which: DWORD) -> HANDLE;
1040 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1043 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1045 GetConsoleMode(handle, &mut out) != 0
1050 pub enum TestEvent {
1051 TeFiltered(Vec<TestDesc>),
1053 TeResult(TestDesc, TestResult, Vec<u8>),
1054 TeTimeout(TestDesc),
1055 TeFilteredOut(usize),
1058 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1060 struct Sink(Arc<Mutex<Vec<u8>>>);
1061 impl Write for Sink {
1062 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1063 Write::write(&mut *self.0.lock().unwrap(), data)
1065 fn flush(&mut self) -> io::Result<()> {
1070 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1072 F: FnMut(TestEvent) -> io::Result<()>,
1074 use std::collections::HashMap;
1075 use std::sync::mpsc::RecvTimeoutError;
1077 let tests_len = tests.len();
1079 let mut filtered_tests = filter_tests(opts, tests);
1080 if !opts.bench_benchmarks {
1081 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1084 let filtered_tests = {
1085 let mut filtered_tests = filtered_tests;
1086 for test in filtered_tests.iter_mut() {
1087 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1093 let filtered_out = tests_len - filtered_tests.len();
1094 callback(TeFilteredOut(filtered_out))?;
1096 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1098 callback(TeFiltered(filtered_descs))?;
1100 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1101 filtered_tests.into_iter().partition(|e| match e.testfn {
1102 StaticTestFn(_) | DynTestFn(_) => true,
1106 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1108 let mut remaining = filtered_tests;
1109 remaining.reverse();
1110 let mut pending = 0;
1112 let (tx, rx) = channel::<MonitorMsg>();
1114 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1116 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1117 let now = Instant::now();
1118 let timed_out = running_tests
1120 .filter_map(|(desc, timeout)| {
1121 if &now >= timeout {
1128 for test in &timed_out {
1129 running_tests.remove(test);
1134 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1135 running_tests.values().min().map(|next_timeout| {
1136 let now = Instant::now();
1137 if *next_timeout >= now {
1145 if concurrency == 1 {
1146 while !remaining.is_empty() {
1147 let test = remaining.pop().unwrap();
1148 callback(TeWait(test.desc.clone()))?;
1149 run_test(opts, !opts.run_tests, test, tx.clone());
1150 let (test, result, stdout) = rx.recv().unwrap();
1151 callback(TeResult(test, result, stdout))?;
1154 while pending > 0 || !remaining.is_empty() {
1155 while pending < concurrency && !remaining.is_empty() {
1156 let test = remaining.pop().unwrap();
1157 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1158 running_tests.insert(test.desc.clone(), timeout);
1159 callback(TeWait(test.desc.clone()))?; //here no pad
1160 run_test(opts, !opts.run_tests, test, tx.clone());
1166 if let Some(timeout) = calc_timeout(&running_tests) {
1167 res = rx.recv_timeout(timeout);
1168 for test in get_timed_out_tests(&mut running_tests) {
1169 callback(TeTimeout(test))?;
1171 if res != Err(RecvTimeoutError::Timeout) {
1175 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1180 let (desc, result, stdout) = res.unwrap();
1181 running_tests.remove(&desc);
1183 callback(TeResult(desc, result, stdout))?;
1188 if opts.bench_benchmarks {
1189 // All benchmarks run at the end, in serial.
1190 for b in filtered_benchs {
1191 callback(TeWait(b.desc.clone()))?;
1192 run_test(opts, false, b, tx.clone());
1193 let (test, result, stdout) = rx.recv().unwrap();
1194 callback(TeResult(test, result, stdout))?;
1200 #[allow(deprecated)]
1201 fn get_concurrency() -> usize {
1202 return match env::var("RUST_TEST_THREADS") {
1204 let opt_n: Option<usize> = s.parse().ok();
1206 Some(n) if n > 0 => n,
1208 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1213 Err(..) => num_cpus(),
1217 #[allow(nonstandard_style)]
1218 fn num_cpus() -> usize {
1220 struct SYSTEM_INFO {
1221 wProcessorArchitecture: u16,
1224 lpMinimumApplicationAddress: *mut u8,
1225 lpMaximumApplicationAddress: *mut u8,
1226 dwActiveProcessorMask: *mut u8,
1227 dwNumberOfProcessors: u32,
1228 dwProcessorType: u32,
1229 dwAllocationGranularity: u32,
1230 wProcessorLevel: u16,
1231 wProcessorRevision: u16,
1234 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1237 let mut sysinfo = std::mem::zeroed();
1238 GetSystemInfo(&mut sysinfo);
1239 sysinfo.dwNumberOfProcessors as usize
1243 #[cfg(target_os = "redox")]
1244 fn num_cpus() -> usize {
1245 // FIXME: Implement num_cpus on Redox
1249 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
1250 fn num_cpus() -> usize {
1254 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1255 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1256 target_os = "macos", target_os = "solaris"))]
1257 fn num_cpus() -> usize {
1258 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1261 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1262 target_os = "netbsd"))]
1263 fn num_cpus() -> usize {
1266 let mut cpus: libc::c_uint = 0;
1267 let mut cpus_size = std::mem::size_of_val(&cpus);
1270 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1273 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1278 &mut cpus as *mut _ as *mut _,
1279 &mut cpus_size as *mut _ as *mut _,
1291 #[cfg(target_os = "openbsd")]
1292 fn num_cpus() -> usize {
1295 let mut cpus: libc::c_uint = 0;
1296 let mut cpus_size = std::mem::size_of_val(&cpus);
1297 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1303 &mut cpus as *mut _ as *mut _,
1304 &mut cpus_size as *mut _ as *mut _,
1315 #[cfg(target_os = "haiku")]
1316 fn num_cpus() -> usize {
1321 #[cfg(target_os = "l4re")]
1322 fn num_cpus() -> usize {
1328 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1329 let mut filtered = tests;
1330 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1331 let test_name = test.desc.name.as_slice();
1333 match opts.filter_exact {
1334 true => test_name == filter,
1335 false => test_name.contains(filter),
1339 // Remove tests that don't match the test filter
1340 if let Some(ref filter) = opts.filter {
1341 filtered.retain(|test| matches_filter(test, filter));
1344 // Skip tests that match any of the skip filters
1345 filtered.retain(|test| {
1346 !opts.skip.iter().any(|sf| matches_filter(test, sf))
1349 // maybe unignore tests
1350 match opts.run_ignored {
1351 RunIgnored::Yes => {
1352 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
1354 RunIgnored::Only => {
1355 filtered.retain(|test| test.desc.ignore);
1356 filtered.iter_mut().for_each(|test| test.desc.ignore = false);
1358 RunIgnored::No => {}
1361 // Sort the tests alphabetically
1362 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1367 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1368 // convert benchmarks to tests, if we're not benchmarking them
1372 let testfn = match x.testfn {
1373 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1374 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1376 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1377 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1392 test: TestDescAndFn,
1393 monitor_ch: Sender<MonitorMsg>,
1395 let TestDescAndFn { desc, testfn } = test;
1397 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1398 && desc.should_panic != ShouldPanic::No;
1400 if force_ignore || desc.ignore || ignore_because_panic_abort {
1401 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1407 monitor_ch: Sender<MonitorMsg>,
1409 testfn: Box<dyn FnBox() + Send>,
1411 // Buffer for capturing standard I/O
1412 let data = Arc::new(Mutex::new(Vec::new()));
1413 let data2 = data.clone();
1415 let name = desc.name.clone();
1416 let runtest = move || {
1417 let oldio = if !nocapture {
1419 io::set_print(Some(Box::new(Sink(data2.clone())))),
1420 io::set_panic(Some(Box::new(Sink(data2)))),
1426 let result = catch_unwind(AssertUnwindSafe(testfn));
1428 if let Some((printio, panicio)) = oldio {
1429 io::set_print(printio);
1430 io::set_panic(panicio);
1433 let test_result = calc_result(&desc, result);
1434 let stdout = data.lock().unwrap().to_vec();
1436 .send((desc.clone(), test_result, stdout))
1440 // If the platform is single-threaded we're just going to run
1441 // the test synchronously, regardless of the concurrency
1443 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1444 if supports_threads {
1445 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1446 cfg.spawn(runtest).unwrap();
1453 DynBenchFn(bencher) => {
1454 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1455 bencher.run(harness)
1458 StaticBenchFn(benchfn) => {
1459 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1460 (benchfn.clone())(harness)
1464 let cb = move || __rust_begin_short_backtrace(f);
1465 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1467 StaticTestFn(f) => run_test_inner(
1471 Box::new(move || __rust_begin_short_backtrace(f)),
1476 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1478 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1482 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1483 match (&desc.should_panic, task_result) {
1484 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1485 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1486 if err.downcast_ref::<String>()
1488 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1489 .map(|e| e.contains(msg))
1494 if desc.allow_fail {
1497 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1501 _ if desc.allow_fail => TrAllowedFail,
1506 #[derive(Clone, PartialEq)]
1507 pub struct MetricMap(BTreeMap<String, Metric>);
1510 pub fn new() -> MetricMap {
1511 MetricMap(BTreeMap::new())
1514 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1515 /// must be non-negative. The `noise` indicates the uncertainty of the
1516 /// metric, which doubles as the "noise range" of acceptable
1517 /// pairwise-regressions on this named value, when comparing from one
1518 /// metric to the next using `compare_to_old`.
1520 /// If `noise` is positive, then it means this metric is of a value
1521 /// you want to see grow smaller, so a change larger than `noise` in the
1522 /// positive direction represents a regression.
1524 /// If `noise` is negative, then it means this metric is of a value
1525 /// you want to see grow larger, so a change larger than `noise` in the
1526 /// negative direction represents a regression.
1527 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1528 let m = Metric { value, noise };
1529 self.0.insert(name.to_owned(), m);
1532 pub fn fmt_metrics(&self) -> String {
1535 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1536 .collect::<Vec<_>>();
1543 /// A function that is opaque to the optimizer, to allow benchmarks to
1544 /// pretend to use outputs to assist in avoiding dead-code
1547 /// This function is a no-op, and does not even read from `dummy`.
1548 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1549 pub fn black_box<T>(dummy: T) -> T {
1550 // we need to "use" the argument in some way LLVM can't
1552 unsafe { asm!("" : : "r"(&dummy)) }
1555 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1557 pub fn black_box<T>(dummy: T) -> T {
1562 /// Callback for benchmark functions to run in their body.
1563 pub fn iter<T, F>(&mut self, mut inner: F)
1567 if self.mode == BenchMode::Single {
1568 ns_iter_inner(&mut inner, 1);
1572 self.summary = Some(iter(&mut inner));
1575 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1577 F: FnMut(&mut Bencher),
1580 return self.summary;
1584 fn ns_from_dur(dur: Duration) -> u64 {
1585 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1588 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1592 let start = Instant::now();
1596 return ns_from_dur(start.elapsed());
1599 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1603 // Initial bench run to get ballpark figure.
1604 let ns_single = ns_iter_inner(inner, 1);
1606 // Try to estimate iter count for 1ms falling back to 1m
1607 // iterations if first run took < 1ns.
1608 let ns_target_total = 1_000_000; // 1ms
1609 let mut n = ns_target_total / cmp::max(1, ns_single);
1611 // if the first run took more than 1ms we don't want to just
1612 // be left doing 0 iterations on every loop. The unfortunate
1613 // side effect of not being able to do as many runs is
1614 // automatically handled by the statistical analysis below
1615 // (i.e. larger error bars).
1618 let mut total_run = Duration::new(0, 0);
1619 let samples: &mut [f64] = &mut [0.0_f64; 50];
1621 let loop_start = Instant::now();
1623 for p in &mut *samples {
1624 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1627 stats::winsorize(samples, 5.0);
1628 let summ = stats::Summary::new(samples);
1630 for p in &mut *samples {
1631 let ns = ns_iter_inner(inner, 5 * n);
1632 *p = ns as f64 / (5 * n) as f64;
1635 stats::winsorize(samples, 5.0);
1636 let summ5 = stats::Summary::new(samples);
1638 let loop_run = loop_start.elapsed();
1640 // If we've run for 100ms and seem to have converged to a
1642 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1643 && summ.median - summ5.median < summ5.median_abs_dev
1648 total_run = total_run + loop_run;
1649 // Longest we ever run for is 3s.
1650 if total_run > Duration::from_secs(3) {
1654 // If we overflow here just return the results so far. We check a
1655 // multiplier of 10 because we're about to multiply by 2 and the
1656 // next iteration of the loop will also multiply by 5 (to calculate
1657 // the summ5 result)
1658 n = match n.checked_mul(10) {
1668 use std::panic::{catch_unwind, AssertUnwindSafe};
1671 use std::sync::{Arc, Mutex};
1673 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1675 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1677 F: FnMut(&mut Bencher),
1679 let mut bs = Bencher {
1680 mode: BenchMode::Auto,
1685 let data = Arc::new(Mutex::new(Vec::new()));
1686 let data2 = data.clone();
1688 let oldio = if !nocapture {
1690 io::set_print(Some(Box::new(Sink(data2.clone())))),
1691 io::set_panic(Some(Box::new(Sink(data2)))),
1697 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1699 if let Some((printio, panicio)) = oldio {
1700 io::set_print(printio);
1701 io::set_panic(panicio);
1704 let test_result = match result {
1706 Ok(Some(ns_iter_summ)) => {
1707 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1708 let mb_s = bs.bytes * 1000 / ns_iter;
1710 let bs = BenchSamples {
1712 mb_s: mb_s as usize,
1714 TestResult::TrBench(bs)
1717 // iter not called, so no data.
1718 // FIXME: error in this case?
1719 let samples: &mut [f64] = &mut [0.0_f64; 1];
1720 let bs = BenchSamples {
1721 ns_iter_summ: stats::Summary::new(samples),
1724 TestResult::TrBench(bs)
1726 Err(_) => TestResult::TrFailed,
1729 let stdout = data.lock().unwrap().to_vec();
1730 monitor_ch.send((desc, test_result, stdout)).unwrap();
1733 pub fn run_once<F>(f: F)
1735 F: FnMut(&mut Bencher),
1737 let mut bs = Bencher {
1738 mode: BenchMode::Single,
1748 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, RunIgnored,
1749 ShouldPanic, StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed,
1750 TrFailedMsg, TrIgnored, TrOk};
1751 use std::sync::mpsc::channel;
1756 fn one_ignored_one_unignored_test() -> Vec<TestDescAndFn> {
1760 name: StaticTestName("1"),
1762 should_panic: ShouldPanic::No,
1765 testfn: DynTestFn(Box::new(move || {})),
1769 name: StaticTestName("2"),
1771 should_panic: ShouldPanic::No,
1774 testfn: DynTestFn(Box::new(move || {})),
1780 pub fn do_not_run_ignored_tests() {
1784 let desc = TestDescAndFn {
1786 name: StaticTestName("whatever"),
1788 should_panic: ShouldPanic::No,
1791 testfn: DynTestFn(Box::new(f)),
1793 let (tx, rx) = channel();
1794 run_test(&TestOpts::new(), false, desc, tx);
1795 let (_, res, _) = rx.recv().unwrap();
1796 assert!(res != TrOk);
1800 pub fn ignored_tests_result_in_ignored() {
1802 let desc = TestDescAndFn {
1804 name: StaticTestName("whatever"),
1806 should_panic: ShouldPanic::No,
1809 testfn: DynTestFn(Box::new(f)),
1811 let (tx, rx) = channel();
1812 run_test(&TestOpts::new(), false, desc, tx);
1813 let (_, res, _) = rx.recv().unwrap();
1814 assert!(res == TrIgnored);
1818 fn test_should_panic() {
1822 let desc = TestDescAndFn {
1824 name: StaticTestName("whatever"),
1826 should_panic: ShouldPanic::Yes,
1829 testfn: DynTestFn(Box::new(f)),
1831 let (tx, rx) = channel();
1832 run_test(&TestOpts::new(), false, desc, tx);
1833 let (_, res, _) = rx.recv().unwrap();
1834 assert!(res == TrOk);
1838 fn test_should_panic_good_message() {
1840 panic!("an error message");
1842 let desc = TestDescAndFn {
1844 name: StaticTestName("whatever"),
1846 should_panic: ShouldPanic::YesWithMessage("error message"),
1849 testfn: DynTestFn(Box::new(f)),
1851 let (tx, rx) = channel();
1852 run_test(&TestOpts::new(), false, desc, tx);
1853 let (_, res, _) = rx.recv().unwrap();
1854 assert!(res == TrOk);
1858 fn test_should_panic_bad_message() {
1860 panic!("an error message");
1862 let expected = "foobar";
1863 let failed_msg = "Panic did not include expected string";
1864 let desc = TestDescAndFn {
1866 name: StaticTestName("whatever"),
1868 should_panic: ShouldPanic::YesWithMessage(expected),
1871 testfn: DynTestFn(Box::new(f)),
1873 let (tx, rx) = channel();
1874 run_test(&TestOpts::new(), false, desc, tx);
1875 let (_, res, _) = rx.recv().unwrap();
1876 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1880 fn test_should_panic_but_succeeds() {
1882 let desc = TestDescAndFn {
1884 name: StaticTestName("whatever"),
1886 should_panic: ShouldPanic::Yes,
1889 testfn: DynTestFn(Box::new(f)),
1891 let (tx, rx) = channel();
1892 run_test(&TestOpts::new(), false, desc, tx);
1893 let (_, res, _) = rx.recv().unwrap();
1894 assert!(res == TrFailed);
1898 fn parse_ignored_flag() {
1900 "progname".to_string(),
1901 "filter".to_string(),
1902 "--ignored".to_string(),
1904 let opts = parse_opts(&args).unwrap().unwrap();
1905 assert_eq!(opts.run_ignored, RunIgnored::Only);
1909 fn parse_include_ignored_flag() {
1911 "progname".to_string(),
1912 "filter".to_string(),
1913 "-Zunstable-options".to_string(),
1914 "--include-ignored".to_string(),
1916 let opts = parse_opts(&args).unwrap().unwrap();
1917 assert_eq!(opts.run_ignored, RunIgnored::Yes);
1921 pub fn filter_for_ignored_option() {
1922 // When we run ignored tests the test filter should filter out all the
1923 // unignored tests and flip the ignore flag on the rest to false
1925 let mut opts = TestOpts::new();
1926 opts.run_tests = true;
1927 opts.run_ignored = RunIgnored::Only;
1929 let tests = one_ignored_one_unignored_test();
1930 let filtered = filter_tests(&opts, tests);
1932 assert_eq!(filtered.len(), 1);
1933 assert_eq!(filtered[0].desc.name.to_string(), "1");
1934 assert!(!filtered[0].desc.ignore);
1938 pub fn run_include_ignored_option() {
1939 // When we "--include-ignored" tests, the ignore flag should be set to false on
1940 // all tests and no test filtered out
1942 let mut opts = TestOpts::new();
1943 opts.run_tests = true;
1944 opts.run_ignored = RunIgnored::Yes;
1946 let tests = one_ignored_one_unignored_test();
1947 let filtered = filter_tests(&opts, tests);
1949 assert_eq!(filtered.len(), 2);
1950 assert!(!filtered[0].desc.ignore);
1951 assert!(!filtered[1].desc.ignore);
1955 pub fn exact_filter_match() {
1956 fn tests() -> Vec<TestDescAndFn> {
1957 vec!["base", "base::test", "base::test1", "base::test2"]
1959 .map(|name| TestDescAndFn {
1961 name: StaticTestName(name),
1963 should_panic: ShouldPanic::No,
1966 testfn: DynTestFn(Box::new(move || {})),
1971 let substr = filter_tests(
1973 filter: Some("base".into()),
1978 assert_eq!(substr.len(), 4);
1980 let substr = filter_tests(
1982 filter: Some("bas".into()),
1987 assert_eq!(substr.len(), 4);
1989 let substr = filter_tests(
1991 filter: Some("::test".into()),
1996 assert_eq!(substr.len(), 3);
1998 let substr = filter_tests(
2000 filter: Some("base::test".into()),
2005 assert_eq!(substr.len(), 3);
2007 let exact = filter_tests(
2009 filter: Some("base".into()),
2015 assert_eq!(exact.len(), 1);
2017 let exact = filter_tests(
2019 filter: Some("bas".into()),
2025 assert_eq!(exact.len(), 0);
2027 let exact = filter_tests(
2029 filter: Some("::test".into()),
2035 assert_eq!(exact.len(), 0);
2037 let exact = filter_tests(
2039 filter: Some("base::test".into()),
2045 assert_eq!(exact.len(), 1);
2049 pub fn sort_tests() {
2050 let mut opts = TestOpts::new();
2051 opts.run_tests = true;
2054 "sha1::test".to_string(),
2055 "isize::test_to_str".to_string(),
2056 "isize::test_pow".to_string(),
2057 "test::do_not_run_ignored_tests".to_string(),
2058 "test::ignored_tests_result_in_ignored".to_string(),
2059 "test::first_free_arg_should_be_a_filter".to_string(),
2060 "test::parse_ignored_flag".to_string(),
2061 "test::parse_include_ignored_flag".to_string(),
2062 "test::filter_for_ignored_option".to_string(),
2063 "test::run_include_ignored_option".to_string(),
2064 "test::sort_tests".to_string(),
2068 let mut tests = Vec::new();
2069 for name in &names {
2070 let test = TestDescAndFn {
2072 name: DynTestName((*name).clone()),
2074 should_panic: ShouldPanic::No,
2077 testfn: DynTestFn(Box::new(testfn)),
2083 let filtered = filter_tests(&opts, tests);
2085 let expected = vec![
2086 "isize::test_pow".to_string(),
2087 "isize::test_to_str".to_string(),
2088 "sha1::test".to_string(),
2089 "test::do_not_run_ignored_tests".to_string(),
2090 "test::filter_for_ignored_option".to_string(),
2091 "test::first_free_arg_should_be_a_filter".to_string(),
2092 "test::ignored_tests_result_in_ignored".to_string(),
2093 "test::parse_ignored_flag".to_string(),
2094 "test::parse_include_ignored_flag".to_string(),
2095 "test::run_include_ignored_option".to_string(),
2096 "test::sort_tests".to_string(),
2099 for (a, b) in expected.iter().zip(filtered) {
2100 assert!(*a == b.desc.name.to_string());
2105 pub fn test_metricmap_compare() {
2106 let mut m1 = MetricMap::new();
2107 let mut m2 = MetricMap::new();
2108 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2109 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2111 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2112 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2114 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2115 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2117 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2118 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2120 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2121 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2123 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2124 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2128 pub fn test_bench_once_no_iter() {
2129 fn f(_: &mut Bencher) {}
2134 pub fn test_bench_once_iter() {
2135 fn f(b: &mut Bencher) {
2142 pub fn test_bench_no_iter() {
2143 fn f(_: &mut Bencher) {}
2145 let (tx, rx) = channel();
2147 let desc = TestDesc {
2148 name: StaticTestName("f"),
2150 should_panic: ShouldPanic::No,
2154 ::bench::benchmark(desc, tx, true, f);
2159 pub fn test_bench_iter() {
2160 fn f(b: &mut Bencher) {
2164 let (tx, rx) = channel();
2166 let desc = TestDesc {
2167 name: StaticTestName("f"),
2169 should_panic: ShouldPanic::No,
2173 ::bench::benchmark(desc, tx, true, f);