1 // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // NB: this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
27 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
28 // cargo) to detect this crate.
30 #![crate_name = "test"]
31 #![unstable(feature = "test", issue = "27812")]
32 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
33 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
34 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
37 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
39 #![feature(set_stdio)]
40 #![feature(panic_unwind)]
41 #![feature(staged_api)]
42 #![feature(termination_trait_lib)]
46 #[cfg(any(unix, target_os = "cloudabi"))]
50 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
51 // on aarch64-pc-windows-msvc, so we don't link libtest against
52 // libunwind (for the time being), even though it means that
53 // libtest won't be fully functional on this platform.
55 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
56 #[cfg(not(all(windows, target_arch = "aarch64")))]
57 extern crate panic_unwind;
59 pub use self::TestFn::*;
60 pub use self::ColorConfig::*;
61 pub use self::TestResult::*;
62 pub use self::TestName::*;
63 use self::TestEvent::*;
64 use self::NamePadding::*;
65 use self::OutputLocation::*;
67 use std::panic::{catch_unwind, AssertUnwindSafe};
69 use std::boxed::FnBox;
71 use std::collections::BTreeMap;
75 use std::io::prelude::*;
77 use std::path::PathBuf;
78 use std::process::Termination;
79 use std::sync::mpsc::{channel, Sender};
80 use std::sync::{Arc, Mutex};
82 use std::time::{Duration, Instant};
86 const TEST_WARN_TIMEOUT_S: u64 = 60;
87 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
89 // to be used by rustc to compile tests in libtest
91 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
92 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, ShouldPanic,
93 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
94 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
100 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
102 // The name of a test. By convention this follows the rules for rust
103 // paths; i.e. it should be a series of identifiers separated by double
104 // colons. This way if some test runner wants to arrange the tests
105 // hierarchically it may.
107 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
109 StaticTestName(&'static str),
111 AlignedTestName(Cow<'static, str>, NamePadding),
114 fn as_slice(&self) -> &str {
116 StaticTestName(s) => s,
117 DynTestName(ref s) => s,
118 AlignedTestName(ref s, _) => &*s,
122 fn padding(&self) -> NamePadding {
124 &AlignedTestName(_, p) => p,
129 fn with_padding(&self, padding: NamePadding) -> TestName {
130 let name = match self {
131 &TestName::StaticTestName(name) => Cow::Borrowed(name),
132 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
133 &TestName::AlignedTestName(ref name, _) => name.clone(),
136 TestName::AlignedTestName(name, padding)
139 impl fmt::Display for TestName {
140 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
141 fmt::Display::fmt(self.as_slice(), f)
145 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
146 pub enum NamePadding {
152 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
153 let mut name = String::from(self.name.as_slice());
154 let fill = column_count.saturating_sub(name.len());
155 let pad = " ".repeat(fill);
166 /// Represents a benchmark function.
167 pub trait TDynBenchFn: Send {
168 fn run(&self, harness: &mut Bencher);
171 // A function that runs a test. If the function returns successfully,
172 // the test succeeds; if the function panics then the test fails. We
173 // may need to come up with a more clever definition of test in order
174 // to support isolation of tests into threads.
177 StaticBenchFn(fn(&mut Bencher)),
178 DynTestFn(Box<dyn FnBox() + Send>),
179 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
183 fn padding(&self) -> NamePadding {
185 StaticTestFn(..) => PadNone,
186 StaticBenchFn(..) => PadOnRight,
187 DynTestFn(..) => PadNone,
188 DynBenchFn(..) => PadOnRight,
193 impl fmt::Debug for TestFn {
194 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
195 f.write_str(match *self {
196 StaticTestFn(..) => "StaticTestFn(..)",
197 StaticBenchFn(..) => "StaticBenchFn(..)",
198 DynTestFn(..) => "DynTestFn(..)",
199 DynBenchFn(..) => "DynBenchFn(..)",
204 /// Manager of the benchmarking runs.
206 /// This is fed into functions marked with `#[bench]` to allow for
207 /// set-up & tear-down before running a piece of code repeatedly via a
212 summary: Option<stats::Summary>,
216 #[derive(Clone, PartialEq, Eq)]
222 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
223 pub enum ShouldPanic {
226 YesWithMessage(&'static str),
229 // The definition of a single test. A test runner will run a list of
231 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
232 pub struct TestDesc {
235 pub should_panic: ShouldPanic,
236 pub allow_fail: bool,
240 pub struct TestDescAndFn {
245 #[derive(Clone, PartialEq, Debug, Copy)]
252 pub fn new(value: f64, noise: f64) -> Metric {
253 Metric { value, noise }
257 /// In case we want to add other options as well, just add them in this struct.
258 #[derive(Copy, Clone, Debug)]
260 display_output: bool,
264 pub fn new() -> Options {
266 display_output: false,
270 pub fn display_output(mut self, display_output: bool) -> Options {
271 self.display_output = display_output;
276 // The default console test runner. It accepts the command line
277 // arguments and a vector of test_descs.
278 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
279 let mut opts = match parse_opts(args) {
282 eprintln!("error: {}", msg);
288 opts.options = options;
290 if let Err(e) = list_tests_console(&opts, tests) {
291 eprintln!("error: io error when listing tests: {:?}", e);
295 match run_tests_console(&opts, tests) {
297 Ok(false) => process::exit(101),
299 eprintln!("error: io error when listing tests: {:?}", e);
306 // A variant optimized for invocation with a static test vector.
307 // This will panic (intentionally) when fed any dynamic tests, because
308 // it is copying the static values out into a dynamic vector and cannot
309 // copy dynamic values. It is doing this because from this point on
310 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
311 // semantics into parallel test runners, which in turn requires a Vec<>
312 // rather than a &[].
313 pub fn test_main_static(tests: &[&TestDescAndFn]) {
314 let args = env::args().collect::<Vec<_>>();
315 let owned_tests = tests
317 .map(|t| match t.testfn {
318 StaticTestFn(f) => TestDescAndFn {
319 testfn: StaticTestFn(f),
320 desc: t.desc.clone(),
322 StaticBenchFn(f) => TestDescAndFn {
323 testfn: StaticBenchFn(f),
324 desc: t.desc.clone(),
326 _ => panic!("non-static tests passed to test::test_main_static"),
329 test_main(&args, owned_tests, Options::new())
332 /// Invoked when unit tests terminate. Should panic if the unit
333 /// test is considered a failure. By default, invokes `report()`
334 /// and checks for a `0` result.
335 pub fn assert_test_result<T: Termination>(result: T) {
336 let code = result.report();
340 "the test returned a termination value with a non-zero status code ({}) \
341 which indicates a failure",
346 #[derive(Copy, Clone, Debug)]
347 pub enum ColorConfig {
353 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
354 pub enum OutputFormat {
361 pub struct TestOpts {
363 pub filter: Option<String>,
364 pub filter_exact: bool,
365 pub run_ignored: bool,
367 pub bench_benchmarks: bool,
368 pub logfile: Option<PathBuf>,
370 pub color: ColorConfig,
371 pub format: OutputFormat,
372 pub test_threads: Option<usize>,
373 pub skip: Vec<String>,
374 pub options: Options,
379 fn new() -> TestOpts {
386 bench_benchmarks: false,
390 format: OutputFormat::Pretty,
393 options: Options::new(),
398 /// Result of parsing the options.
399 pub type OptRes = Result<TestOpts, String>;
401 fn optgroups() -> getopts::Options {
402 let mut opts = getopts::Options::new();
403 opts.optflag("", "ignored", "Run ignored tests")
404 .optflag("", "test", "Run tests and not benchmarks")
405 .optflag("", "bench", "Run benchmarks instead of tests")
406 .optflag("", "list", "List all tests and benchmarks")
407 .optflag("h", "help", "Display this message (longer with --help)")
411 "Write logs to the specified file instead \
418 "don't capture stdout/stderr of each \
419 task, allow printing directly",
424 "Number of threads used for running tests \
431 "Skip tests whose names contain FILTER (this flag can \
432 be used multiple times)",
438 "Display one character per test instead of one line. \
439 Alias to --format=terse",
444 "Exactly match filters rather than by substring",
449 "Configure coloring of output:
450 auto = colorize if stdout is a tty and tests are run on serially (default);
451 always = always colorize output;
452 never = never colorize output;",
458 "Configure formatting of output:
459 pretty = Print verbose output;
460 terse = Display one character per test;
461 json = Output a json document",
467 "Enable nightly-only flags:
468 unstable-options = Allow use of experimental features",
474 fn usage(binary: &str, options: &getopts::Options) {
475 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
479 The FILTER string is tested against the name of all tests, and only those
480 tests whose names contain the filter are run.
482 By default, all tests are run in parallel. This can be altered with the
483 --test-threads flag or the RUST_TEST_THREADS environment variable when running
486 All tests have their standard output and standard error captured by default.
487 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
488 environment variable to a value other than "0". Logging is not captured by default.
492 #[test] - Indicates a function is a test to be run. This function
494 #[bench] - Indicates a function is a benchmark to be run. This
495 function takes one argument (test::Bencher).
496 #[should_panic] - This function (also labeled with #[test]) will only pass if
497 the code causes a panic (an assertion failure or panic!)
498 A message may be provided, which the failure string must
499 contain: #[should_panic(expected = "foo")].
500 #[ignore] - When applied to a function which is already attributed as a
501 test, then the test runner will ignore these tests during
502 normal test runs. Running with --ignored will run these
504 usage = options.usage(&message)
508 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
509 fn is_nightly() -> bool {
510 // Whether this is a feature-staged build, i.e. on the beta or stable channel
511 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
512 // Whether we should enable unstable features for bootstrapping
513 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
515 bootstrap || !disable_unstable_features
518 // Parses command line arguments into test options
519 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
520 let mut allow_unstable = false;
521 let opts = optgroups();
522 let args = args.get(1..).unwrap_or(args);
523 let matches = match opts.parse(args) {
525 Err(f) => return Some(Err(f.to_string())),
528 if let Some(opt) = matches.opt_str("Z") {
531 "the option `Z` is only accepted on the nightly compiler".into(),
536 "unstable-options" => {
537 allow_unstable = true;
540 return Some(Err("Unrecognized option to `Z`".into()));
545 if matches.opt_present("h") {
546 usage(&args[0], &opts);
550 let filter = if !matches.free.is_empty() {
551 Some(matches.free[0].clone())
556 let run_ignored = matches.opt_present("ignored");
557 let quiet = matches.opt_present("quiet");
558 let exact = matches.opt_present("exact");
559 let list = matches.opt_present("list");
561 let logfile = matches.opt_str("logfile");
562 let logfile = logfile.map(|s| PathBuf::from(&s));
564 let bench_benchmarks = matches.opt_present("bench");
565 let run_tests = !bench_benchmarks || matches.opt_present("test");
567 let mut nocapture = matches.opt_present("nocapture");
569 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
570 Ok(val) => &val != "0",
575 let test_threads = match matches.opt_str("test-threads") {
576 Some(n_str) => match n_str.parse::<usize>() {
577 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
580 return Some(Err(format!(
581 "argument for --test-threads must be a number > 0 \
590 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
591 Some("auto") | None => AutoColor,
592 Some("always") => AlwaysColor,
593 Some("never") => NeverColor,
596 return Some(Err(format!(
597 "argument for --color must be auto, always, or never (was \
604 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
605 None if quiet => OutputFormat::Terse,
606 Some("pretty") | None => OutputFormat::Pretty,
607 Some("terse") => OutputFormat::Terse,
611 "The \"json\" format is only accepted on the nightly compiler".into(),
618 return Some(Err(format!(
619 "argument for --format must be pretty, terse, or json (was \
626 let test_opts = TestOpts {
638 skip: matches.opt_strs("skip"),
639 options: Options::new(),
645 #[derive(Clone, PartialEq)]
646 pub struct BenchSamples {
647 ns_iter_summ: stats::Summary,
651 #[derive(Clone, PartialEq)]
652 pub enum TestResult {
658 TrBench(BenchSamples),
661 unsafe impl Send for TestResult {}
663 enum OutputLocation<T> {
664 Pretty(Box<term::StdoutTerminal>),
668 impl<T: Write> Write for OutputLocation<T> {
669 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
671 Pretty(ref mut term) => term.write(buf),
672 Raw(ref mut stdout) => stdout.write(buf),
676 fn flush(&mut self) -> io::Result<()> {
678 Pretty(ref mut term) => term.flush(),
679 Raw(ref mut stdout) => stdout.flush(),
684 struct ConsoleTestState {
685 log_out: Option<File>,
694 failures: Vec<(TestDesc, Vec<u8>)>,
695 not_failures: Vec<(TestDesc, Vec<u8>)>,
699 impl ConsoleTestState {
700 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
701 let log_out = match opts.logfile {
702 Some(ref path) => Some(File::create(path)?),
706 Ok(ConsoleTestState {
715 metrics: MetricMap::new(),
716 failures: Vec::new(),
717 not_failures: Vec::new(),
718 options: opts.options,
722 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
723 let msg = msg.as_ref();
726 Some(ref mut o) => o.write_all(msg.as_bytes()),
730 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
731 self.write_log(format!(
734 TrOk => "ok".to_owned(),
735 TrFailed => "failed".to_owned(),
736 TrFailedMsg(ref msg) => format!("failed: {}", msg),
737 TrIgnored => "ignored".to_owned(),
738 TrAllowedFail => "failed (allowed)".to_owned(),
739 TrBench(ref bs) => fmt_bench_samples(bs),
745 fn current_test_count(&self) -> usize {
746 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
750 // Format a number with thousands separators
751 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
753 let mut output = String::new();
754 let mut trailing = false;
755 for &pow in &[9, 6, 3, 0] {
756 let base = 10_usize.pow(pow);
757 if pow == 0 || trailing || n / base != 0 {
759 output.write_fmt(format_args!("{}", n / base)).unwrap();
761 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
774 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
776 let mut output = String::new();
778 let median = bs.ns_iter_summ.median as usize;
779 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
782 .write_fmt(format_args!(
783 "{:>11} ns/iter (+/- {})",
784 fmt_thousands_sep(median, ','),
785 fmt_thousands_sep(deviation, ',')
790 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
796 // List the tests to console, and optionally to logfile. Filters are honored.
797 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
798 let mut output = match term::stdout() {
799 None => Raw(io::stdout()),
800 Some(t) => Pretty(t),
803 let quiet = opts.format == OutputFormat::Terse;
804 let mut st = ConsoleTestState::new(opts)?;
809 for test in filter_tests(&opts, tests) {
813 desc: TestDesc { name, .. },
817 let fntype = match testfn {
818 StaticTestFn(..) | DynTestFn(..) => {
822 StaticBenchFn(..) | DynBenchFn(..) => {
828 writeln!(output, "{}: {}", name, fntype)?;
829 st.write_log(format!("{} {}\n", fntype, name))?;
832 fn plural(count: u32, s: &str) -> String {
834 1 => format!("{} {}", 1, s),
835 n => format!("{} {}s", n, s),
840 if ntest != 0 || nbench != 0 {
841 writeln!(output, "")?;
847 plural(ntest, "test"),
848 plural(nbench, "benchmark")
855 // A simple console test runner
856 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
859 st: &mut ConsoleTestState,
860 out: &mut dyn OutputFormatter,
861 ) -> io::Result<()> {
862 match (*event).clone() {
863 TeFiltered(ref filtered_tests) => {
864 st.total = filtered_tests.len();
865 out.write_run_start(filtered_tests.len())
867 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
868 TeWait(ref test) => out.write_test_start(test),
869 TeTimeout(ref test) => out.write_timeout(test),
870 TeResult(test, result, stdout) => {
871 st.write_log_result(&test, &result)?;
872 out.write_result(&test, &result, &*stdout)?;
876 st.not_failures.push((test, stdout));
878 TrIgnored => st.ignored += 1,
879 TrAllowedFail => st.allowed_fail += 1,
881 st.metrics.insert_metric(
882 test.name.as_slice(),
883 bs.ns_iter_summ.median,
884 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
890 st.failures.push((test, stdout));
892 TrFailedMsg(msg) => {
894 let mut stdout = stdout;
895 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
896 st.failures.push((test, stdout));
904 let output = match term::stdout() {
905 None => Raw(io::stdout()),
906 Some(t) => Pretty(t),
909 let max_name_len = tests
911 .max_by_key(|t| len_if_padded(*t))
912 .map(|t| t.desc.name.as_slice().len())
915 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
917 let mut out: Box<dyn OutputFormatter> = match opts.format {
918 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
924 OutputFormat::Terse => Box::new(TerseFormatter::new(
930 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
932 let mut st = ConsoleTestState::new(opts)?;
933 fn len_if_padded(t: &TestDescAndFn) -> usize {
934 match t.testfn.padding() {
936 PadOnRight => t.desc.name.as_slice().len(),
940 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
942 assert!(st.current_test_count() == st.total);
944 return out.write_run_finish(&st);
948 fn should_sort_failures_before_printing_them() {
949 let test_a = TestDesc {
950 name: StaticTestName("a"),
952 should_panic: ShouldPanic::No,
956 let test_b = TestDesc {
957 name: StaticTestName("b"),
959 should_panic: ShouldPanic::No,
963 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
965 let st = ConsoleTestState {
974 metrics: MetricMap::new(),
975 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
976 options: Options::new(),
977 not_failures: Vec::new(),
980 out.write_failures(&st).unwrap();
981 let s = match out.output_location() {
982 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
983 &Pretty(_) => unreachable!(),
986 let apos = s.find("a").unwrap();
987 let bpos = s.find("b").unwrap();
988 assert!(apos < bpos);
991 fn use_color(opts: &TestOpts) -> bool {
993 AutoColor => !opts.nocapture && stdout_isatty(),
999 #[cfg(any(target_os = "cloudabi", target_os = "redox",
1000 all(target_arch = "wasm32", not(target_os = "emscripten"))))]
1001 fn stdout_isatty() -> bool {
1002 // FIXME: Implement isatty on Redox
1006 fn stdout_isatty() -> bool {
1007 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1010 fn stdout_isatty() -> bool {
1013 type HANDLE = *mut u8;
1014 type LPDWORD = *mut u32;
1015 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1017 fn GetStdHandle(which: DWORD) -> HANDLE;
1018 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1021 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1023 GetConsoleMode(handle, &mut out) != 0
1028 pub enum TestEvent {
1029 TeFiltered(Vec<TestDesc>),
1031 TeResult(TestDesc, TestResult, Vec<u8>),
1032 TeTimeout(TestDesc),
1033 TeFilteredOut(usize),
1036 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1038 struct Sink(Arc<Mutex<Vec<u8>>>);
1039 impl Write for Sink {
1040 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1041 Write::write(&mut *self.0.lock().unwrap(), data)
1043 fn flush(&mut self) -> io::Result<()> {
1048 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1050 F: FnMut(TestEvent) -> io::Result<()>,
1052 use std::collections::HashMap;
1053 use std::sync::mpsc::RecvTimeoutError;
1055 let tests_len = tests.len();
1057 let mut filtered_tests = filter_tests(opts, tests);
1058 if !opts.bench_benchmarks {
1059 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1062 let filtered_tests = {
1063 let mut filtered_tests = filtered_tests;
1064 for test in filtered_tests.iter_mut() {
1065 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1071 let filtered_out = tests_len - filtered_tests.len();
1072 callback(TeFilteredOut(filtered_out))?;
1074 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1076 callback(TeFiltered(filtered_descs))?;
1078 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1079 filtered_tests.into_iter().partition(|e| match e.testfn {
1080 StaticTestFn(_) | DynTestFn(_) => true,
1084 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1086 let mut remaining = filtered_tests;
1087 remaining.reverse();
1088 let mut pending = 0;
1090 let (tx, rx) = channel::<MonitorMsg>();
1092 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1094 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1095 let now = Instant::now();
1096 let timed_out = running_tests
1098 .filter_map(|(desc, timeout)| {
1099 if &now >= timeout {
1106 for test in &timed_out {
1107 running_tests.remove(test);
1112 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1113 running_tests.values().min().map(|next_timeout| {
1114 let now = Instant::now();
1115 if *next_timeout >= now {
1123 if concurrency == 1 {
1124 while !remaining.is_empty() {
1125 let test = remaining.pop().unwrap();
1126 callback(TeWait(test.desc.clone()))?;
1127 run_test(opts, !opts.run_tests, test, tx.clone());
1128 let (test, result, stdout) = rx.recv().unwrap();
1129 callback(TeResult(test, result, stdout))?;
1132 while pending > 0 || !remaining.is_empty() {
1133 while pending < concurrency && !remaining.is_empty() {
1134 let test = remaining.pop().unwrap();
1135 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1136 running_tests.insert(test.desc.clone(), timeout);
1137 callback(TeWait(test.desc.clone()))?; //here no pad
1138 run_test(opts, !opts.run_tests, test, tx.clone());
1144 if let Some(timeout) = calc_timeout(&running_tests) {
1145 res = rx.recv_timeout(timeout);
1146 for test in get_timed_out_tests(&mut running_tests) {
1147 callback(TeTimeout(test))?;
1149 if res != Err(RecvTimeoutError::Timeout) {
1153 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1158 let (desc, result, stdout) = res.unwrap();
1159 running_tests.remove(&desc);
1161 callback(TeResult(desc, result, stdout))?;
1166 if opts.bench_benchmarks {
1167 // All benchmarks run at the end, in serial.
1168 for b in filtered_benchs {
1169 callback(TeWait(b.desc.clone()))?;
1170 run_test(opts, false, b, tx.clone());
1171 let (test, result, stdout) = rx.recv().unwrap();
1172 callback(TeResult(test, result, stdout))?;
1178 #[allow(deprecated)]
1179 fn get_concurrency() -> usize {
1180 return match env::var("RUST_TEST_THREADS") {
1182 let opt_n: Option<usize> = s.parse().ok();
1184 Some(n) if n > 0 => n,
1186 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1191 Err(..) => num_cpus(),
1195 #[allow(nonstandard_style)]
1196 fn num_cpus() -> usize {
1198 struct SYSTEM_INFO {
1199 wProcessorArchitecture: u16,
1202 lpMinimumApplicationAddress: *mut u8,
1203 lpMaximumApplicationAddress: *mut u8,
1204 dwActiveProcessorMask: *mut u8,
1205 dwNumberOfProcessors: u32,
1206 dwProcessorType: u32,
1207 dwAllocationGranularity: u32,
1208 wProcessorLevel: u16,
1209 wProcessorRevision: u16,
1212 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1215 let mut sysinfo = std::mem::zeroed();
1216 GetSystemInfo(&mut sysinfo);
1217 sysinfo.dwNumberOfProcessors as usize
1221 #[cfg(target_os = "redox")]
1222 fn num_cpus() -> usize {
1223 // FIXME: Implement num_cpus on Redox
1227 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
1228 fn num_cpus() -> usize {
1232 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1233 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1234 target_os = "macos", target_os = "solaris"))]
1235 fn num_cpus() -> usize {
1236 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1239 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1240 target_os = "netbsd"))]
1241 fn num_cpus() -> usize {
1244 let mut cpus: libc::c_uint = 0;
1245 let mut cpus_size = std::mem::size_of_val(&cpus);
1248 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1251 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1256 &mut cpus as *mut _ as *mut _,
1257 &mut cpus_size as *mut _ as *mut _,
1269 #[cfg(target_os = "openbsd")]
1270 fn num_cpus() -> usize {
1273 let mut cpus: libc::c_uint = 0;
1274 let mut cpus_size = std::mem::size_of_val(&cpus);
1275 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1281 &mut cpus as *mut _ as *mut _,
1282 &mut cpus_size as *mut _ as *mut _,
1293 #[cfg(target_os = "haiku")]
1294 fn num_cpus() -> usize {
1299 #[cfg(target_os = "l4re")]
1300 fn num_cpus() -> usize {
1306 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1307 let mut filtered = tests;
1308 // Remove tests that don't match the test filter
1309 filtered = match opts.filter {
1311 Some(ref filter) => filtered
1314 if opts.filter_exact {
1315 test.desc.name.as_slice() == &filter[..]
1317 test.desc.name.as_slice().contains(&filter[..])
1323 // Skip tests that match any of the skip filters
1327 !opts.skip.iter().any(|sf| {
1328 if opts.filter_exact {
1329 t.desc.name.as_slice() == &sf[..]
1331 t.desc.name.as_slice().contains(&sf[..])
1337 // Maybe pull out the ignored test and unignore them
1338 filtered = if !opts.run_ignored {
1341 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1342 if test.desc.ignore {
1343 let TestDescAndFn { desc, testfn } = test;
1344 Some(TestDescAndFn {
1355 filtered.into_iter().filter_map(filter).collect()
1358 // Sort the tests alphabetically
1359 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1364 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1365 // convert benchmarks to tests, if we're not benchmarking them
1369 let testfn = match x.testfn {
1370 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1371 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1373 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1374 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1389 test: TestDescAndFn,
1390 monitor_ch: Sender<MonitorMsg>,
1392 let TestDescAndFn { desc, testfn } = test;
1394 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1395 && desc.should_panic != ShouldPanic::No;
1397 if force_ignore || desc.ignore || ignore_because_panic_abort {
1398 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1404 monitor_ch: Sender<MonitorMsg>,
1406 testfn: Box<dyn FnBox() + Send>,
1408 // Buffer for capturing standard I/O
1409 let data = Arc::new(Mutex::new(Vec::new()));
1410 let data2 = data.clone();
1412 let name = desc.name.clone();
1413 let runtest = move || {
1414 let oldio = if !nocapture {
1416 io::set_print(Some(Box::new(Sink(data2.clone())))),
1417 io::set_panic(Some(Box::new(Sink(data2)))),
1423 let result = catch_unwind(AssertUnwindSafe(testfn));
1425 if let Some((printio, panicio)) = oldio {
1426 io::set_print(printio);
1427 io::set_panic(panicio);
1430 let test_result = calc_result(&desc, result);
1431 let stdout = data.lock().unwrap().to_vec();
1433 .send((desc.clone(), test_result, stdout))
1437 // If the platform is single-threaded we're just going to run
1438 // the test synchronously, regardless of the concurrency
1440 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1441 if supports_threads {
1442 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1443 cfg.spawn(runtest).unwrap();
1450 DynBenchFn(bencher) => {
1451 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1452 bencher.run(harness)
1455 StaticBenchFn(benchfn) => {
1456 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1457 (benchfn.clone())(harness)
1461 let cb = move || __rust_begin_short_backtrace(f);
1462 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1464 StaticTestFn(f) => run_test_inner(
1468 Box::new(move || __rust_begin_short_backtrace(f)),
1473 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1475 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1479 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1480 match (&desc.should_panic, task_result) {
1481 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1482 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1483 if err.downcast_ref::<String>()
1485 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1486 .map(|e| e.contains(msg))
1491 if desc.allow_fail {
1494 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1498 _ if desc.allow_fail => TrAllowedFail,
1503 #[derive(Clone, PartialEq)]
1504 pub struct MetricMap(BTreeMap<String, Metric>);
1507 pub fn new() -> MetricMap {
1508 MetricMap(BTreeMap::new())
1511 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1512 /// must be non-negative. The `noise` indicates the uncertainty of the
1513 /// metric, which doubles as the "noise range" of acceptable
1514 /// pairwise-regressions on this named value, when comparing from one
1515 /// metric to the next using `compare_to_old`.
1517 /// If `noise` is positive, then it means this metric is of a value
1518 /// you want to see grow smaller, so a change larger than `noise` in the
1519 /// positive direction represents a regression.
1521 /// If `noise` is negative, then it means this metric is of a value
1522 /// you want to see grow larger, so a change larger than `noise` in the
1523 /// negative direction represents a regression.
1524 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1525 let m = Metric { value, noise };
1526 self.0.insert(name.to_owned(), m);
1529 pub fn fmt_metrics(&self) -> String {
1532 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1533 .collect::<Vec<_>>();
1540 /// A function that is opaque to the optimizer, to allow benchmarks to
1541 /// pretend to use outputs to assist in avoiding dead-code
1544 /// This function is a no-op, and does not even read from `dummy`.
1545 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1546 pub fn black_box<T>(dummy: T) -> T {
1547 // we need to "use" the argument in some way LLVM can't
1549 unsafe { asm!("" : : "r"(&dummy)) }
1552 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1554 pub fn black_box<T>(dummy: T) -> T {
1559 /// Callback for benchmark functions to run in their body.
1560 pub fn iter<T, F>(&mut self, mut inner: F)
1564 if self.mode == BenchMode::Single {
1565 ns_iter_inner(&mut inner, 1);
1569 self.summary = Some(iter(&mut inner));
1572 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1574 F: FnMut(&mut Bencher),
1577 return self.summary;
1581 fn ns_from_dur(dur: Duration) -> u64 {
1582 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1585 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1589 let start = Instant::now();
1593 return ns_from_dur(start.elapsed());
1596 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1600 // Initial bench run to get ballpark figure.
1601 let ns_single = ns_iter_inner(inner, 1);
1603 // Try to estimate iter count for 1ms falling back to 1m
1604 // iterations if first run took < 1ns.
1605 let ns_target_total = 1_000_000; // 1ms
1606 let mut n = ns_target_total / cmp::max(1, ns_single);
1608 // if the first run took more than 1ms we don't want to just
1609 // be left doing 0 iterations on every loop. The unfortunate
1610 // side effect of not being able to do as many runs is
1611 // automatically handled by the statistical analysis below
1612 // (i.e. larger error bars).
1615 let mut total_run = Duration::new(0, 0);
1616 let samples: &mut [f64] = &mut [0.0_f64; 50];
1618 let loop_start = Instant::now();
1620 for p in &mut *samples {
1621 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1624 stats::winsorize(samples, 5.0);
1625 let summ = stats::Summary::new(samples);
1627 for p in &mut *samples {
1628 let ns = ns_iter_inner(inner, 5 * n);
1629 *p = ns as f64 / (5 * n) as f64;
1632 stats::winsorize(samples, 5.0);
1633 let summ5 = stats::Summary::new(samples);
1635 let loop_run = loop_start.elapsed();
1637 // If we've run for 100ms and seem to have converged to a
1639 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1640 && summ.median - summ5.median < summ5.median_abs_dev
1645 total_run = total_run + loop_run;
1646 // Longest we ever run for is 3s.
1647 if total_run > Duration::from_secs(3) {
1651 // If we overflow here just return the results so far. We check a
1652 // multiplier of 10 because we're about to multiply by 2 and the
1653 // next iteration of the loop will also multiply by 5 (to calculate
1654 // the summ5 result)
1655 n = match n.checked_mul(10) {
1665 use std::panic::{catch_unwind, AssertUnwindSafe};
1668 use std::sync::{Arc, Mutex};
1670 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1672 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1674 F: FnMut(&mut Bencher),
1676 let mut bs = Bencher {
1677 mode: BenchMode::Auto,
1682 let data = Arc::new(Mutex::new(Vec::new()));
1683 let data2 = data.clone();
1685 let oldio = if !nocapture {
1687 io::set_print(Some(Box::new(Sink(data2.clone())))),
1688 io::set_panic(Some(Box::new(Sink(data2)))),
1694 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1696 if let Some((printio, panicio)) = oldio {
1697 io::set_print(printio);
1698 io::set_panic(panicio);
1701 let test_result = match result {
1703 Ok(Some(ns_iter_summ)) => {
1704 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1705 let mb_s = bs.bytes * 1000 / ns_iter;
1707 let bs = BenchSamples {
1709 mb_s: mb_s as usize,
1711 TestResult::TrBench(bs)
1714 // iter not called, so no data.
1715 // FIXME: error in this case?
1716 let samples: &mut [f64] = &mut [0.0_f64; 1];
1717 let bs = BenchSamples {
1718 ns_iter_summ: stats::Summary::new(samples),
1721 TestResult::TrBench(bs)
1723 Err(_) => TestResult::TrFailed,
1726 let stdout = data.lock().unwrap().to_vec();
1727 monitor_ch.send((desc, test_result, stdout)).unwrap();
1730 pub fn run_once<F>(f: F)
1732 F: FnMut(&mut Bencher),
1734 let mut bs = Bencher {
1735 mode: BenchMode::Single,
1745 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, ShouldPanic,
1746 StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1748 use std::sync::mpsc::channel;
1753 pub fn do_not_run_ignored_tests() {
1757 let desc = TestDescAndFn {
1759 name: StaticTestName("whatever"),
1761 should_panic: ShouldPanic::No,
1764 testfn: DynTestFn(Box::new(f)),
1766 let (tx, rx) = channel();
1767 run_test(&TestOpts::new(), false, desc, tx);
1768 let (_, res, _) = rx.recv().unwrap();
1769 assert!(res != TrOk);
1773 pub fn ignored_tests_result_in_ignored() {
1775 let desc = TestDescAndFn {
1777 name: StaticTestName("whatever"),
1779 should_panic: ShouldPanic::No,
1782 testfn: DynTestFn(Box::new(f)),
1784 let (tx, rx) = channel();
1785 run_test(&TestOpts::new(), false, desc, tx);
1786 let (_, res, _) = rx.recv().unwrap();
1787 assert!(res == TrIgnored);
1791 fn test_should_panic() {
1795 let desc = TestDescAndFn {
1797 name: StaticTestName("whatever"),
1799 should_panic: ShouldPanic::Yes,
1802 testfn: DynTestFn(Box::new(f)),
1804 let (tx, rx) = channel();
1805 run_test(&TestOpts::new(), false, desc, tx);
1806 let (_, res, _) = rx.recv().unwrap();
1807 assert!(res == TrOk);
1811 fn test_should_panic_good_message() {
1813 panic!("an error message");
1815 let desc = TestDescAndFn {
1817 name: StaticTestName("whatever"),
1819 should_panic: ShouldPanic::YesWithMessage("error message"),
1822 testfn: DynTestFn(Box::new(f)),
1824 let (tx, rx) = channel();
1825 run_test(&TestOpts::new(), false, desc, tx);
1826 let (_, res, _) = rx.recv().unwrap();
1827 assert!(res == TrOk);
1831 fn test_should_panic_bad_message() {
1833 panic!("an error message");
1835 let expected = "foobar";
1836 let failed_msg = "Panic did not include expected string";
1837 let desc = TestDescAndFn {
1839 name: StaticTestName("whatever"),
1841 should_panic: ShouldPanic::YesWithMessage(expected),
1844 testfn: DynTestFn(Box::new(f)),
1846 let (tx, rx) = channel();
1847 run_test(&TestOpts::new(), false, desc, tx);
1848 let (_, res, _) = rx.recv().unwrap();
1849 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1853 fn test_should_panic_but_succeeds() {
1855 let desc = TestDescAndFn {
1857 name: StaticTestName("whatever"),
1859 should_panic: ShouldPanic::Yes,
1862 testfn: DynTestFn(Box::new(f)),
1864 let (tx, rx) = channel();
1865 run_test(&TestOpts::new(), false, desc, tx);
1866 let (_, res, _) = rx.recv().unwrap();
1867 assert!(res == TrFailed);
1871 fn parse_ignored_flag() {
1873 "progname".to_string(),
1874 "filter".to_string(),
1875 "--ignored".to_string(),
1877 let opts = match parse_opts(&args) {
1879 _ => panic!("Malformed arg in parse_ignored_flag"),
1881 assert!((opts.run_ignored));
1885 pub fn filter_for_ignored_option() {
1886 // When we run ignored tests the test filter should filter out all the
1887 // unignored tests and flip the ignore flag on the rest to false
1889 let mut opts = TestOpts::new();
1890 opts.run_tests = true;
1891 opts.run_ignored = true;
1896 name: StaticTestName("1"),
1898 should_panic: ShouldPanic::No,
1901 testfn: DynTestFn(Box::new(move || {})),
1905 name: StaticTestName("2"),
1907 should_panic: ShouldPanic::No,
1910 testfn: DynTestFn(Box::new(move || {})),
1913 let filtered = filter_tests(&opts, tests);
1915 assert_eq!(filtered.len(), 1);
1916 assert_eq!(filtered[0].desc.name.to_string(), "1");
1917 assert!(!filtered[0].desc.ignore);
1921 pub fn exact_filter_match() {
1922 fn tests() -> Vec<TestDescAndFn> {
1923 vec!["base", "base::test", "base::test1", "base::test2"]
1925 .map(|name| TestDescAndFn {
1927 name: StaticTestName(name),
1929 should_panic: ShouldPanic::No,
1932 testfn: DynTestFn(Box::new(move || {})),
1937 let substr = filter_tests(
1939 filter: Some("base".into()),
1944 assert_eq!(substr.len(), 4);
1946 let substr = filter_tests(
1948 filter: Some("bas".into()),
1953 assert_eq!(substr.len(), 4);
1955 let substr = filter_tests(
1957 filter: Some("::test".into()),
1962 assert_eq!(substr.len(), 3);
1964 let substr = filter_tests(
1966 filter: Some("base::test".into()),
1971 assert_eq!(substr.len(), 3);
1973 let exact = filter_tests(
1975 filter: Some("base".into()),
1981 assert_eq!(exact.len(), 1);
1983 let exact = filter_tests(
1985 filter: Some("bas".into()),
1991 assert_eq!(exact.len(), 0);
1993 let exact = filter_tests(
1995 filter: Some("::test".into()),
2001 assert_eq!(exact.len(), 0);
2003 let exact = filter_tests(
2005 filter: Some("base::test".into()),
2011 assert_eq!(exact.len(), 1);
2015 pub fn sort_tests() {
2016 let mut opts = TestOpts::new();
2017 opts.run_tests = true;
2020 "sha1::test".to_string(),
2021 "isize::test_to_str".to_string(),
2022 "isize::test_pow".to_string(),
2023 "test::do_not_run_ignored_tests".to_string(),
2024 "test::ignored_tests_result_in_ignored".to_string(),
2025 "test::first_free_arg_should_be_a_filter".to_string(),
2026 "test::parse_ignored_flag".to_string(),
2027 "test::filter_for_ignored_option".to_string(),
2028 "test::sort_tests".to_string(),
2032 let mut tests = Vec::new();
2033 for name in &names {
2034 let test = TestDescAndFn {
2036 name: DynTestName((*name).clone()),
2038 should_panic: ShouldPanic::No,
2041 testfn: DynTestFn(Box::new(testfn)),
2047 let filtered = filter_tests(&opts, tests);
2049 let expected = vec![
2050 "isize::test_pow".to_string(),
2051 "isize::test_to_str".to_string(),
2052 "sha1::test".to_string(),
2053 "test::do_not_run_ignored_tests".to_string(),
2054 "test::filter_for_ignored_option".to_string(),
2055 "test::first_free_arg_should_be_a_filter".to_string(),
2056 "test::ignored_tests_result_in_ignored".to_string(),
2057 "test::parse_ignored_flag".to_string(),
2058 "test::sort_tests".to_string(),
2061 for (a, b) in expected.iter().zip(filtered) {
2062 assert!(*a == b.desc.name.to_string());
2067 pub fn test_metricmap_compare() {
2068 let mut m1 = MetricMap::new();
2069 let mut m2 = MetricMap::new();
2070 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2071 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2073 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2074 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2076 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2077 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2079 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2080 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2082 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2083 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2085 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2086 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2090 pub fn test_bench_once_no_iter() {
2091 fn f(_: &mut Bencher) {}
2096 pub fn test_bench_once_iter() {
2097 fn f(b: &mut Bencher) {
2104 pub fn test_bench_no_iter() {
2105 fn f(_: &mut Bencher) {}
2107 let (tx, rx) = channel();
2109 let desc = TestDesc {
2110 name: StaticTestName("f"),
2112 should_panic: ShouldPanic::No,
2116 ::bench::benchmark(desc, tx, true, f);
2121 pub fn test_bench_iter() {
2122 fn f(b: &mut Bencher) {
2126 let (tx, rx) = channel();
2128 let desc = TestDesc {
2129 name: StaticTestName("f"),
2131 should_panic: ShouldPanic::No,
2135 ::bench::benchmark(desc, tx, true, f);