1 // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // NB: this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
27 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
28 // cargo) to detect this crate.
30 #![crate_name = "test"]
31 #![unstable(feature = "test", issue = "27812")]
32 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
33 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
34 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
37 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
39 #![feature(set_stdio)]
40 #![feature(panic_unwind)]
41 #![feature(staged_api)]
42 #![feature(termination_trait_lib)]
46 #[cfg(any(unix, target_os = "cloudabi"))]
50 // FIXME(#54291): rustc and/or LLVM don't yet support building with panic-unwind
51 // on aarch64-pc-windows-msvc, so we don't link libtest against
52 // libunwind (for the time being), even though it means that
53 // libtest won't be fully functional on this platform.
55 // See also: https://github.com/rust-lang/rust/issues/54190#issuecomment-422904437
56 #[cfg(not(all(windows, target_arch = "aarch64")))]
57 extern crate panic_unwind;
59 pub use self::TestFn::*;
60 pub use self::ColorConfig::*;
61 pub use self::TestResult::*;
62 pub use self::TestName::*;
63 use self::TestEvent::*;
64 use self::NamePadding::*;
65 use self::OutputLocation::*;
67 use std::panic::{catch_unwind, AssertUnwindSafe};
69 use std::boxed::FnBox;
71 use std::collections::BTreeMap;
75 use std::io::prelude::*;
77 use std::path::PathBuf;
78 use std::process::Termination;
79 use std::sync::mpsc::{channel, Sender};
80 use std::sync::{Arc, Mutex};
82 use std::time::{Duration, Instant};
86 const TEST_WARN_TIMEOUT_S: u64 = 60;
87 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
89 // to be used by rustc to compile tests in libtest
91 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
92 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, ShouldPanic,
93 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
94 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
100 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
102 // The name of a test. By convention this follows the rules for rust
103 // paths; i.e. it should be a series of identifiers separated by double
104 // colons. This way if some test runner wants to arrange the tests
105 // hierarchically it may.
107 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
109 StaticTestName(&'static str),
111 AlignedTestName(Cow<'static, str>, NamePadding),
114 fn as_slice(&self) -> &str {
116 StaticTestName(s) => s,
117 DynTestName(ref s) => s,
118 AlignedTestName(ref s, _) => &*s,
122 fn padding(&self) -> NamePadding {
124 &AlignedTestName(_, p) => p,
129 fn with_padding(&self, padding: NamePadding) -> TestName {
130 let name = match self {
131 &TestName::StaticTestName(name) => Cow::Borrowed(name),
132 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
133 &TestName::AlignedTestName(ref name, _) => name.clone(),
136 TestName::AlignedTestName(name, padding)
139 impl fmt::Display for TestName {
140 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
141 fmt::Display::fmt(self.as_slice(), f)
145 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
146 pub enum NamePadding {
152 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
153 let mut name = String::from(self.name.as_slice());
154 let fill = column_count.saturating_sub(name.len());
155 let pad = " ".repeat(fill);
166 /// Represents a benchmark function.
167 pub trait TDynBenchFn: Send {
168 fn run(&self, harness: &mut Bencher);
171 // A function that runs a test. If the function returns successfully,
172 // the test succeeds; if the function panics then the test fails. We
173 // may need to come up with a more clever definition of test in order
174 // to support isolation of tests into threads.
177 StaticBenchFn(fn(&mut Bencher)),
178 DynTestFn(Box<dyn FnBox() + Send>),
179 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
183 fn padding(&self) -> NamePadding {
185 StaticTestFn(..) => PadNone,
186 StaticBenchFn(..) => PadOnRight,
187 DynTestFn(..) => PadNone,
188 DynBenchFn(..) => PadOnRight,
193 impl fmt::Debug for TestFn {
194 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
195 f.write_str(match *self {
196 StaticTestFn(..) => "StaticTestFn(..)",
197 StaticBenchFn(..) => "StaticBenchFn(..)",
198 DynTestFn(..) => "DynTestFn(..)",
199 DynBenchFn(..) => "DynBenchFn(..)",
204 /// Manager of the benchmarking runs.
206 /// This is fed into functions marked with `#[bench]` to allow for
207 /// set-up & tear-down before running a piece of code repeatedly via a
212 summary: Option<stats::Summary>,
216 #[derive(Clone, PartialEq, Eq)]
222 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
223 pub enum ShouldPanic {
226 YesWithMessage(&'static str),
229 // The definition of a single test. A test runner will run a list of
231 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
232 pub struct TestDesc {
235 pub should_panic: ShouldPanic,
236 pub allow_fail: bool,
240 pub struct TestDescAndFn {
245 #[derive(Clone, PartialEq, Debug, Copy)]
252 pub fn new(value: f64, noise: f64) -> Metric {
253 Metric { value, noise }
257 /// In case we want to add other options as well, just add them in this struct.
258 #[derive(Copy, Clone, Debug)]
260 display_output: bool,
264 pub fn new() -> Options {
266 display_output: false,
270 pub fn display_output(mut self, display_output: bool) -> Options {
271 self.display_output = display_output;
276 // The default console test runner. It accepts the command line
277 // arguments and a vector of test_descs.
278 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
279 let mut opts = match parse_opts(args) {
282 eprintln!("error: {}", msg);
288 opts.options = options;
290 if let Err(e) = list_tests_console(&opts, tests) {
291 eprintln!("error: io error when listing tests: {:?}", e);
295 match run_tests_console(&opts, tests) {
297 Ok(false) => process::exit(101),
299 eprintln!("error: io error when listing tests: {:?}", e);
306 // A variant optimized for invocation with a static test vector.
307 // This will panic (intentionally) when fed any dynamic tests, because
308 // it is copying the static values out into a dynamic vector and cannot
309 // copy dynamic values. It is doing this because from this point on
310 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
311 // semantics into parallel test runners, which in turn requires a Vec<>
312 // rather than a &[].
313 pub fn test_main_static(tests: &[&TestDescAndFn]) {
314 let args = env::args().collect::<Vec<_>>();
315 let owned_tests = tests
317 .map(|t| match t.testfn {
318 StaticTestFn(f) => TestDescAndFn {
319 testfn: StaticTestFn(f),
320 desc: t.desc.clone(),
322 StaticBenchFn(f) => TestDescAndFn {
323 testfn: StaticBenchFn(f),
324 desc: t.desc.clone(),
326 _ => panic!("non-static tests passed to test::test_main_static"),
329 test_main(&args, owned_tests, Options::new())
332 /// Invoked when unit tests terminate. Should panic if the unit
333 /// test is considered a failure. By default, invokes `report()`
334 /// and checks for a `0` result.
335 pub fn assert_test_result<T: Termination>(result: T) {
336 let code = result.report();
340 "the test returned a termination value with a non-zero status code ({}) \
341 which indicates a failure",
346 #[derive(Copy, Clone, Debug)]
347 pub enum ColorConfig {
353 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
354 pub enum OutputFormat {
361 pub struct TestOpts {
363 pub filter: Option<String>,
364 pub filter_exact: bool,
365 pub run_ignored: bool,
367 pub bench_benchmarks: bool,
368 pub logfile: Option<PathBuf>,
370 pub color: ColorConfig,
371 pub format: OutputFormat,
372 pub test_threads: Option<usize>,
373 pub skip: Vec<String>,
374 pub options: Options,
379 fn new() -> TestOpts {
386 bench_benchmarks: false,
390 format: OutputFormat::Pretty,
393 options: Options::new(),
398 /// Result of parsing the options.
399 pub type OptRes = Result<TestOpts, String>;
401 fn optgroups() -> getopts::Options {
402 let mut opts = getopts::Options::new();
403 opts.optflag("", "ignored", "Run ignored tests")
404 .optflag("", "test", "Run tests and not benchmarks")
405 .optflag("", "bench", "Run benchmarks instead of tests")
406 .optflag("", "list", "List all tests and benchmarks")
407 .optflag("h", "help", "Display this message (longer with --help)")
411 "Write logs to the specified file instead \
418 "don't capture stdout/stderr of each \
419 task, allow printing directly",
424 "Number of threads used for running tests \
431 "Skip tests whose names contain FILTER (this flag can \
432 be used multiple times)",
438 "Display one character per test instead of one line. \
439 Alias to --format=terse",
444 "Exactly match filters rather than by substring",
449 "Configure coloring of output:
450 auto = colorize if stdout is a tty and tests are run on serially (default);
451 always = always colorize output;
452 never = never colorize output;",
458 "Configure formatting of output:
459 pretty = Print verbose output;
460 terse = Display one character per test;
461 json = Output a json document",
467 "Enable nightly-only flags:
468 unstable-options = Allow use of experimental features",
474 fn usage(binary: &str, options: &getopts::Options) {
475 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
479 The FILTER string is tested against the name of all tests, and only those
480 tests whose names contain the filter are run.
482 By default, all tests are run in parallel. This can be altered with the
483 --test-threads flag or the RUST_TEST_THREADS environment variable when running
486 All tests have their standard output and standard error captured by default.
487 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
488 environment variable to a value other than "0". Logging is not captured by default.
492 #[test] - Indicates a function is a test to be run. This function
494 #[bench] - Indicates a function is a benchmark to be run. This
495 function takes one argument (test::Bencher).
496 #[should_panic] - This function (also labeled with #[test]) will only pass if
497 the code causes a panic (an assertion failure or panic!)
498 A message may be provided, which the failure string must
499 contain: #[should_panic(expected = "foo")].
500 #[ignore] - When applied to a function which is already attributed as a
501 test, then the test runner will ignore these tests during
502 normal test runs. Running with --ignored will run these
504 usage = options.usage(&message)
508 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
509 fn is_nightly() -> bool {
510 // Whether this is a feature-staged build, i.e. on the beta or stable channel
511 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
512 // Whether we should enable unstable features for bootstrapping
513 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
515 bootstrap || !disable_unstable_features
518 // Parses command line arguments into test options
519 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
520 let mut allow_unstable = false;
521 let opts = optgroups();
522 let args = args.get(1..).unwrap_or(args);
523 let matches = match opts.parse(args) {
525 Err(f) => return Some(Err(f.to_string())),
528 if let Some(opt) = matches.opt_str("Z") {
531 "the option `Z` is only accepted on the nightly compiler".into(),
536 "unstable-options" => {
537 allow_unstable = true;
540 return Some(Err("Unrecognized option to `Z`".into()));
545 if matches.opt_present("h") {
546 usage(&args[0], &opts);
550 let filter = if !matches.free.is_empty() {
551 Some(matches.free[0].clone())
556 let run_ignored = matches.opt_present("ignored");
557 let quiet = matches.opt_present("quiet");
558 let exact = matches.opt_present("exact");
559 let list = matches.opt_present("list");
561 let logfile = matches.opt_str("logfile");
562 let logfile = logfile.map(|s| PathBuf::from(&s));
564 let bench_benchmarks = matches.opt_present("bench");
565 let run_tests = !bench_benchmarks || matches.opt_present("test");
567 let mut nocapture = matches.opt_present("nocapture");
569 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
570 Ok(val) => &val != "0",
575 let test_threads = match matches.opt_str("test-threads") {
576 Some(n_str) => match n_str.parse::<usize>() {
577 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
580 return Some(Err(format!(
581 "argument for --test-threads must be a number > 0 \
590 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
591 Some("auto") | None => AutoColor,
592 Some("always") => AlwaysColor,
593 Some("never") => NeverColor,
596 return Some(Err(format!(
597 "argument for --color must be auto, always, or never (was \
604 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
605 None if quiet => OutputFormat::Terse,
606 Some("pretty") | None => OutputFormat::Pretty,
607 Some("terse") => OutputFormat::Terse,
611 "The \"json\" format is only accepted on the nightly compiler".into(),
618 return Some(Err(format!(
619 "argument for --format must be pretty, terse, or json (was \
626 let test_opts = TestOpts {
638 skip: matches.opt_strs("skip"),
639 options: Options::new(),
645 #[derive(Clone, PartialEq)]
646 pub struct BenchSamples {
647 ns_iter_summ: stats::Summary,
651 #[derive(Clone, PartialEq)]
652 pub enum TestResult {
658 TrBench(BenchSamples),
661 unsafe impl Send for TestResult {}
663 enum OutputLocation<T> {
664 Pretty(Box<term::StdoutTerminal>),
668 impl<T: Write> Write for OutputLocation<T> {
669 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
671 Pretty(ref mut term) => term.write(buf),
672 Raw(ref mut stdout) => stdout.write(buf),
676 fn flush(&mut self) -> io::Result<()> {
678 Pretty(ref mut term) => term.flush(),
679 Raw(ref mut stdout) => stdout.flush(),
684 struct ConsoleTestState {
685 log_out: Option<File>,
694 failures: Vec<(TestDesc, Vec<u8>)>,
695 not_failures: Vec<(TestDesc, Vec<u8>)>,
699 impl ConsoleTestState {
700 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
701 let log_out = match opts.logfile {
702 Some(ref path) => Some(File::create(path)?),
706 Ok(ConsoleTestState {
715 metrics: MetricMap::new(),
716 failures: Vec::new(),
717 not_failures: Vec::new(),
718 options: opts.options,
722 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
723 let msg = msg.as_ref();
726 Some(ref mut o) => o.write_all(msg.as_bytes()),
730 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
731 self.write_log(format!(
734 TrOk => "ok".to_owned(),
735 TrFailed => "failed".to_owned(),
736 TrFailedMsg(ref msg) => format!("failed: {}", msg),
737 TrIgnored => "ignored".to_owned(),
738 TrAllowedFail => "failed (allowed)".to_owned(),
739 TrBench(ref bs) => fmt_bench_samples(bs),
745 fn current_test_count(&self) -> usize {
746 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
750 // Format a number with thousands separators
751 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
753 let mut output = String::new();
754 let mut trailing = false;
755 for &pow in &[9, 6, 3, 0] {
756 let base = 10_usize.pow(pow);
757 if pow == 0 || trailing || n / base != 0 {
759 output.write_fmt(format_args!("{}", n / base)).unwrap();
761 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
774 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
776 let mut output = String::new();
778 let median = bs.ns_iter_summ.median as usize;
779 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
782 .write_fmt(format_args!(
783 "{:>11} ns/iter (+/- {})",
784 fmt_thousands_sep(median, ','),
785 fmt_thousands_sep(deviation, ',')
790 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
796 // List the tests to console, and optionally to logfile. Filters are honored.
797 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
798 let mut output = match term::stdout() {
799 None => Raw(io::stdout()),
800 Some(t) => Pretty(t),
803 let quiet = opts.format == OutputFormat::Terse;
804 let mut st = ConsoleTestState::new(opts)?;
809 for test in filter_tests(&opts, tests) {
813 desc: TestDesc { name, .. },
817 let fntype = match testfn {
818 StaticTestFn(..) | DynTestFn(..) => {
822 StaticBenchFn(..) | DynBenchFn(..) => {
828 writeln!(output, "{}: {}", name, fntype)?;
829 st.write_log(format!("{} {}\n", fntype, name))?;
832 fn plural(count: u32, s: &str) -> String {
834 1 => format!("{} {}", 1, s),
835 n => format!("{} {}s", n, s),
840 if ntest != 0 || nbench != 0 {
841 writeln!(output, "")?;
847 plural(ntest, "test"),
848 plural(nbench, "benchmark")
855 // A simple console test runner
856 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
859 st: &mut ConsoleTestState,
860 out: &mut dyn OutputFormatter,
861 ) -> io::Result<()> {
862 match (*event).clone() {
863 TeFiltered(ref filtered_tests) => {
864 st.total = filtered_tests.len();
865 out.write_run_start(filtered_tests.len())
867 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
868 TeWait(ref test) => out.write_test_start(test),
869 TeTimeout(ref test) => out.write_timeout(test),
870 TeResult(test, result, stdout) => {
871 st.write_log_result(&test, &result)?;
872 out.write_result(&test, &result, &*stdout)?;
876 st.not_failures.push((test, stdout));
878 TrIgnored => st.ignored += 1,
879 TrAllowedFail => st.allowed_fail += 1,
881 st.metrics.insert_metric(
882 test.name.as_slice(),
883 bs.ns_iter_summ.median,
884 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
890 st.failures.push((test, stdout));
892 TrFailedMsg(msg) => {
894 let mut stdout = stdout;
895 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
896 st.failures.push((test, stdout));
904 let output = match term::stdout() {
905 None => Raw(io::stdout()),
906 Some(t) => Pretty(t),
909 let max_name_len = tests
911 .max_by_key(|t| len_if_padded(*t))
912 .map(|t| t.desc.name.as_slice().len())
915 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
917 let mut out: Box<dyn OutputFormatter> = match opts.format {
918 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
924 OutputFormat::Terse => Box::new(TerseFormatter::new(
930 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
932 let mut st = ConsoleTestState::new(opts)?;
933 fn len_if_padded(t: &TestDescAndFn) -> usize {
934 match t.testfn.padding() {
936 PadOnRight => t.desc.name.as_slice().len(),
940 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
942 assert!(st.current_test_count() == st.total);
944 return out.write_run_finish(&st);
948 fn should_sort_failures_before_printing_them() {
949 let test_a = TestDesc {
950 name: StaticTestName("a"),
952 should_panic: ShouldPanic::No,
956 let test_b = TestDesc {
957 name: StaticTestName("b"),
959 should_panic: ShouldPanic::No,
963 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
965 let st = ConsoleTestState {
974 metrics: MetricMap::new(),
975 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
976 options: Options::new(),
977 not_failures: Vec::new(),
980 out.write_failures(&st).unwrap();
981 let s = match out.output_location() {
982 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
983 &Pretty(_) => unreachable!(),
986 let apos = s.find("a").unwrap();
987 let bpos = s.find("b").unwrap();
988 assert!(apos < bpos);
991 fn use_color(opts: &TestOpts) -> bool {
993 AutoColor => !opts.nocapture && stdout_isatty(),
999 #[cfg(any(target_os = "cloudabi", target_os = "redox",
1000 all(target_arch = "wasm32", not(target_os = "emscripten"))))]
1001 fn stdout_isatty() -> bool {
1002 // FIXME: Implement isatty on Redox
1006 fn stdout_isatty() -> bool {
1007 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1010 fn stdout_isatty() -> bool {
1013 type HANDLE = *mut u8;
1014 type LPDWORD = *mut u32;
1015 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1017 fn GetStdHandle(which: DWORD) -> HANDLE;
1018 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1021 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1023 GetConsoleMode(handle, &mut out) != 0
1028 pub enum TestEvent {
1029 TeFiltered(Vec<TestDesc>),
1031 TeResult(TestDesc, TestResult, Vec<u8>),
1032 TeTimeout(TestDesc),
1033 TeFilteredOut(usize),
1036 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1038 struct Sink(Arc<Mutex<Vec<u8>>>);
1039 impl Write for Sink {
1040 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1041 Write::write(&mut *self.0.lock().unwrap(), data)
1043 fn flush(&mut self) -> io::Result<()> {
1048 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1050 F: FnMut(TestEvent) -> io::Result<()>,
1052 use std::collections::HashMap;
1053 use std::sync::mpsc::RecvTimeoutError;
1055 let tests_len = tests.len();
1057 let mut filtered_tests = filter_tests(opts, tests);
1058 if !opts.bench_benchmarks {
1059 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1062 let filtered_tests = {
1063 let mut filtered_tests = filtered_tests;
1064 for test in filtered_tests.iter_mut() {
1065 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1071 let filtered_out = tests_len - filtered_tests.len();
1072 callback(TeFilteredOut(filtered_out))?;
1074 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1076 callback(TeFiltered(filtered_descs))?;
1078 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1079 filtered_tests.into_iter().partition(|e| match e.testfn {
1080 StaticTestFn(_) | DynTestFn(_) => true,
1084 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1086 let mut remaining = filtered_tests;
1087 remaining.reverse();
1088 let mut pending = 0;
1090 let (tx, rx) = channel::<MonitorMsg>();
1092 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1094 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1095 let now = Instant::now();
1096 let timed_out = running_tests
1098 .filter_map(|(desc, timeout)| {
1099 if &now >= timeout {
1106 for test in &timed_out {
1107 running_tests.remove(test);
1112 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1113 running_tests.values().min().map(|next_timeout| {
1114 let now = Instant::now();
1115 if *next_timeout >= now {
1123 if concurrency == 1 {
1124 while !remaining.is_empty() {
1125 let test = remaining.pop().unwrap();
1126 callback(TeWait(test.desc.clone()))?;
1127 run_test(opts, !opts.run_tests, test, tx.clone());
1128 let (test, result, stdout) = rx.recv().unwrap();
1129 callback(TeResult(test, result, stdout))?;
1132 while pending > 0 || !remaining.is_empty() {
1133 while pending < concurrency && !remaining.is_empty() {
1134 let test = remaining.pop().unwrap();
1135 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1136 running_tests.insert(test.desc.clone(), timeout);
1137 callback(TeWait(test.desc.clone()))?; //here no pad
1138 run_test(opts, !opts.run_tests, test, tx.clone());
1144 if let Some(timeout) = calc_timeout(&running_tests) {
1145 res = rx.recv_timeout(timeout);
1146 for test in get_timed_out_tests(&mut running_tests) {
1147 callback(TeTimeout(test))?;
1149 if res != Err(RecvTimeoutError::Timeout) {
1153 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1158 let (desc, result, stdout) = res.unwrap();
1159 running_tests.remove(&desc);
1161 callback(TeResult(desc, result, stdout))?;
1166 if opts.bench_benchmarks {
1167 // All benchmarks run at the end, in serial.
1168 for b in filtered_benchs {
1169 callback(TeWait(b.desc.clone()))?;
1170 run_test(opts, false, b, tx.clone());
1171 let (test, result, stdout) = rx.recv().unwrap();
1172 callback(TeResult(test, result, stdout))?;
1178 #[allow(deprecated)]
1179 fn get_concurrency() -> usize {
1180 return match env::var("RUST_TEST_THREADS") {
1182 let opt_n: Option<usize> = s.parse().ok();
1184 Some(n) if n > 0 => n,
1186 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1191 Err(..) => num_cpus(),
1195 #[allow(nonstandard_style)]
1196 fn num_cpus() -> usize {
1198 struct SYSTEM_INFO {
1199 wProcessorArchitecture: u16,
1202 lpMinimumApplicationAddress: *mut u8,
1203 lpMaximumApplicationAddress: *mut u8,
1204 dwActiveProcessorMask: *mut u8,
1205 dwNumberOfProcessors: u32,
1206 dwProcessorType: u32,
1207 dwAllocationGranularity: u32,
1208 wProcessorLevel: u16,
1209 wProcessorRevision: u16,
1212 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1215 let mut sysinfo = std::mem::zeroed();
1216 GetSystemInfo(&mut sysinfo);
1217 sysinfo.dwNumberOfProcessors as usize
1221 #[cfg(target_os = "redox")]
1222 fn num_cpus() -> usize {
1223 // FIXME: Implement num_cpus on Redox
1227 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
1228 fn num_cpus() -> usize {
1232 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1233 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1234 target_os = "macos", target_os = "solaris"))]
1235 fn num_cpus() -> usize {
1236 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1239 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1240 target_os = "netbsd"))]
1241 fn num_cpus() -> usize {
1244 let mut cpus: libc::c_uint = 0;
1245 let mut cpus_size = std::mem::size_of_val(&cpus);
1248 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1251 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1256 &mut cpus as *mut _ as *mut _,
1257 &mut cpus_size as *mut _ as *mut _,
1269 #[cfg(target_os = "openbsd")]
1270 fn num_cpus() -> usize {
1273 let mut cpus: libc::c_uint = 0;
1274 let mut cpus_size = std::mem::size_of_val(&cpus);
1275 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1281 &mut cpus as *mut _ as *mut _,
1282 &mut cpus_size as *mut _ as *mut _,
1293 #[cfg(target_os = "haiku")]
1294 fn num_cpus() -> usize {
1299 #[cfg(target_os = "l4re")]
1300 fn num_cpus() -> usize {
1306 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1307 let mut filtered = tests;
1308 let matches_filter = |test: &TestDescAndFn, filter: &str| {
1309 let test_name = test.desc.name.as_slice();
1311 match opts.filter_exact {
1312 true => test_name == filter,
1313 false => test_name.contains(filter),
1317 // Remove tests that don't match the test filter
1318 if let Some(ref filter) = opts.filter {
1319 filtered.retain(|test| matches_filter(test, filter));
1322 // Skip tests that match any of the skip filters
1323 filtered.retain(|test| {
1324 !opts.skip.iter().any(|sf| matches_filter(test, sf))
1327 // Maybe pull out the ignored test and unignore them
1328 if opts.run_ignored {
1329 filtered = filtered.into_iter()
1330 .filter(|test| test.desc.ignore)
1332 test.desc.ignore = false;
1338 // Sort the tests alphabetically
1339 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1344 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1345 // convert benchmarks to tests, if we're not benchmarking them
1349 let testfn = match x.testfn {
1350 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1351 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1353 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1354 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1369 test: TestDescAndFn,
1370 monitor_ch: Sender<MonitorMsg>,
1372 let TestDescAndFn { desc, testfn } = test;
1374 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1375 && desc.should_panic != ShouldPanic::No;
1377 if force_ignore || desc.ignore || ignore_because_panic_abort {
1378 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1384 monitor_ch: Sender<MonitorMsg>,
1386 testfn: Box<dyn FnBox() + Send>,
1388 // Buffer for capturing standard I/O
1389 let data = Arc::new(Mutex::new(Vec::new()));
1390 let data2 = data.clone();
1392 let name = desc.name.clone();
1393 let runtest = move || {
1394 let oldio = if !nocapture {
1396 io::set_print(Some(Box::new(Sink(data2.clone())))),
1397 io::set_panic(Some(Box::new(Sink(data2)))),
1403 let result = catch_unwind(AssertUnwindSafe(testfn));
1405 if let Some((printio, panicio)) = oldio {
1406 io::set_print(printio);
1407 io::set_panic(panicio);
1410 let test_result = calc_result(&desc, result);
1411 let stdout = data.lock().unwrap().to_vec();
1413 .send((desc.clone(), test_result, stdout))
1417 // If the platform is single-threaded we're just going to run
1418 // the test synchronously, regardless of the concurrency
1420 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1421 if supports_threads {
1422 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1423 cfg.spawn(runtest).unwrap();
1430 DynBenchFn(bencher) => {
1431 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1432 bencher.run(harness)
1435 StaticBenchFn(benchfn) => {
1436 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1437 (benchfn.clone())(harness)
1441 let cb = move || __rust_begin_short_backtrace(f);
1442 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1444 StaticTestFn(f) => run_test_inner(
1448 Box::new(move || __rust_begin_short_backtrace(f)),
1453 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1455 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1459 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1460 match (&desc.should_panic, task_result) {
1461 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1462 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1463 if err.downcast_ref::<String>()
1465 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1466 .map(|e| e.contains(msg))
1471 if desc.allow_fail {
1474 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1478 _ if desc.allow_fail => TrAllowedFail,
1483 #[derive(Clone, PartialEq)]
1484 pub struct MetricMap(BTreeMap<String, Metric>);
1487 pub fn new() -> MetricMap {
1488 MetricMap(BTreeMap::new())
1491 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1492 /// must be non-negative. The `noise` indicates the uncertainty of the
1493 /// metric, which doubles as the "noise range" of acceptable
1494 /// pairwise-regressions on this named value, when comparing from one
1495 /// metric to the next using `compare_to_old`.
1497 /// If `noise` is positive, then it means this metric is of a value
1498 /// you want to see grow smaller, so a change larger than `noise` in the
1499 /// positive direction represents a regression.
1501 /// If `noise` is negative, then it means this metric is of a value
1502 /// you want to see grow larger, so a change larger than `noise` in the
1503 /// negative direction represents a regression.
1504 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1505 let m = Metric { value, noise };
1506 self.0.insert(name.to_owned(), m);
1509 pub fn fmt_metrics(&self) -> String {
1512 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1513 .collect::<Vec<_>>();
1520 /// A function that is opaque to the optimizer, to allow benchmarks to
1521 /// pretend to use outputs to assist in avoiding dead-code
1524 /// This function is a no-op, and does not even read from `dummy`.
1525 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1526 pub fn black_box<T>(dummy: T) -> T {
1527 // we need to "use" the argument in some way LLVM can't
1529 unsafe { asm!("" : : "r"(&dummy)) }
1532 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1534 pub fn black_box<T>(dummy: T) -> T {
1539 /// Callback for benchmark functions to run in their body.
1540 pub fn iter<T, F>(&mut self, mut inner: F)
1544 if self.mode == BenchMode::Single {
1545 ns_iter_inner(&mut inner, 1);
1549 self.summary = Some(iter(&mut inner));
1552 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1554 F: FnMut(&mut Bencher),
1557 return self.summary;
1561 fn ns_from_dur(dur: Duration) -> u64 {
1562 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1565 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1569 let start = Instant::now();
1573 return ns_from_dur(start.elapsed());
1576 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1580 // Initial bench run to get ballpark figure.
1581 let ns_single = ns_iter_inner(inner, 1);
1583 // Try to estimate iter count for 1ms falling back to 1m
1584 // iterations if first run took < 1ns.
1585 let ns_target_total = 1_000_000; // 1ms
1586 let mut n = ns_target_total / cmp::max(1, ns_single);
1588 // if the first run took more than 1ms we don't want to just
1589 // be left doing 0 iterations on every loop. The unfortunate
1590 // side effect of not being able to do as many runs is
1591 // automatically handled by the statistical analysis below
1592 // (i.e. larger error bars).
1595 let mut total_run = Duration::new(0, 0);
1596 let samples: &mut [f64] = &mut [0.0_f64; 50];
1598 let loop_start = Instant::now();
1600 for p in &mut *samples {
1601 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1604 stats::winsorize(samples, 5.0);
1605 let summ = stats::Summary::new(samples);
1607 for p in &mut *samples {
1608 let ns = ns_iter_inner(inner, 5 * n);
1609 *p = ns as f64 / (5 * n) as f64;
1612 stats::winsorize(samples, 5.0);
1613 let summ5 = stats::Summary::new(samples);
1615 let loop_run = loop_start.elapsed();
1617 // If we've run for 100ms and seem to have converged to a
1619 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1620 && summ.median - summ5.median < summ5.median_abs_dev
1625 total_run = total_run + loop_run;
1626 // Longest we ever run for is 3s.
1627 if total_run > Duration::from_secs(3) {
1631 // If we overflow here just return the results so far. We check a
1632 // multiplier of 10 because we're about to multiply by 2 and the
1633 // next iteration of the loop will also multiply by 5 (to calculate
1634 // the summ5 result)
1635 n = match n.checked_mul(10) {
1645 use std::panic::{catch_unwind, AssertUnwindSafe};
1648 use std::sync::{Arc, Mutex};
1650 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1652 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1654 F: FnMut(&mut Bencher),
1656 let mut bs = Bencher {
1657 mode: BenchMode::Auto,
1662 let data = Arc::new(Mutex::new(Vec::new()));
1663 let data2 = data.clone();
1665 let oldio = if !nocapture {
1667 io::set_print(Some(Box::new(Sink(data2.clone())))),
1668 io::set_panic(Some(Box::new(Sink(data2)))),
1674 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1676 if let Some((printio, panicio)) = oldio {
1677 io::set_print(printio);
1678 io::set_panic(panicio);
1681 let test_result = match result {
1683 Ok(Some(ns_iter_summ)) => {
1684 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1685 let mb_s = bs.bytes * 1000 / ns_iter;
1687 let bs = BenchSamples {
1689 mb_s: mb_s as usize,
1691 TestResult::TrBench(bs)
1694 // iter not called, so no data.
1695 // FIXME: error in this case?
1696 let samples: &mut [f64] = &mut [0.0_f64; 1];
1697 let bs = BenchSamples {
1698 ns_iter_summ: stats::Summary::new(samples),
1701 TestResult::TrBench(bs)
1703 Err(_) => TestResult::TrFailed,
1706 let stdout = data.lock().unwrap().to_vec();
1707 monitor_ch.send((desc, test_result, stdout)).unwrap();
1710 pub fn run_once<F>(f: F)
1712 F: FnMut(&mut Bencher),
1714 let mut bs = Bencher {
1715 mode: BenchMode::Single,
1725 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, ShouldPanic,
1726 StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1728 use std::sync::mpsc::channel;
1733 pub fn do_not_run_ignored_tests() {
1737 let desc = TestDescAndFn {
1739 name: StaticTestName("whatever"),
1741 should_panic: ShouldPanic::No,
1744 testfn: DynTestFn(Box::new(f)),
1746 let (tx, rx) = channel();
1747 run_test(&TestOpts::new(), false, desc, tx);
1748 let (_, res, _) = rx.recv().unwrap();
1749 assert!(res != TrOk);
1753 pub fn ignored_tests_result_in_ignored() {
1755 let desc = TestDescAndFn {
1757 name: StaticTestName("whatever"),
1759 should_panic: ShouldPanic::No,
1762 testfn: DynTestFn(Box::new(f)),
1764 let (tx, rx) = channel();
1765 run_test(&TestOpts::new(), false, desc, tx);
1766 let (_, res, _) = rx.recv().unwrap();
1767 assert!(res == TrIgnored);
1771 fn test_should_panic() {
1775 let desc = TestDescAndFn {
1777 name: StaticTestName("whatever"),
1779 should_panic: ShouldPanic::Yes,
1782 testfn: DynTestFn(Box::new(f)),
1784 let (tx, rx) = channel();
1785 run_test(&TestOpts::new(), false, desc, tx);
1786 let (_, res, _) = rx.recv().unwrap();
1787 assert!(res == TrOk);
1791 fn test_should_panic_good_message() {
1793 panic!("an error message");
1795 let desc = TestDescAndFn {
1797 name: StaticTestName("whatever"),
1799 should_panic: ShouldPanic::YesWithMessage("error message"),
1802 testfn: DynTestFn(Box::new(f)),
1804 let (tx, rx) = channel();
1805 run_test(&TestOpts::new(), false, desc, tx);
1806 let (_, res, _) = rx.recv().unwrap();
1807 assert!(res == TrOk);
1811 fn test_should_panic_bad_message() {
1813 panic!("an error message");
1815 let expected = "foobar";
1816 let failed_msg = "Panic did not include expected string";
1817 let desc = TestDescAndFn {
1819 name: StaticTestName("whatever"),
1821 should_panic: ShouldPanic::YesWithMessage(expected),
1824 testfn: DynTestFn(Box::new(f)),
1826 let (tx, rx) = channel();
1827 run_test(&TestOpts::new(), false, desc, tx);
1828 let (_, res, _) = rx.recv().unwrap();
1829 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1833 fn test_should_panic_but_succeeds() {
1835 let desc = TestDescAndFn {
1837 name: StaticTestName("whatever"),
1839 should_panic: ShouldPanic::Yes,
1842 testfn: DynTestFn(Box::new(f)),
1844 let (tx, rx) = channel();
1845 run_test(&TestOpts::new(), false, desc, tx);
1846 let (_, res, _) = rx.recv().unwrap();
1847 assert!(res == TrFailed);
1851 fn parse_ignored_flag() {
1853 "progname".to_string(),
1854 "filter".to_string(),
1855 "--ignored".to_string(),
1857 let opts = match parse_opts(&args) {
1859 _ => panic!("Malformed arg in parse_ignored_flag"),
1861 assert!((opts.run_ignored));
1865 pub fn filter_for_ignored_option() {
1866 // When we run ignored tests the test filter should filter out all the
1867 // unignored tests and flip the ignore flag on the rest to false
1869 let mut opts = TestOpts::new();
1870 opts.run_tests = true;
1871 opts.run_ignored = true;
1876 name: StaticTestName("1"),
1878 should_panic: ShouldPanic::No,
1881 testfn: DynTestFn(Box::new(move || {})),
1885 name: StaticTestName("2"),
1887 should_panic: ShouldPanic::No,
1890 testfn: DynTestFn(Box::new(move || {})),
1893 let filtered = filter_tests(&opts, tests);
1895 assert_eq!(filtered.len(), 1);
1896 assert_eq!(filtered[0].desc.name.to_string(), "1");
1897 assert!(!filtered[0].desc.ignore);
1901 pub fn exact_filter_match() {
1902 fn tests() -> Vec<TestDescAndFn> {
1903 vec!["base", "base::test", "base::test1", "base::test2"]
1905 .map(|name| TestDescAndFn {
1907 name: StaticTestName(name),
1909 should_panic: ShouldPanic::No,
1912 testfn: DynTestFn(Box::new(move || {})),
1917 let substr = filter_tests(
1919 filter: Some("base".into()),
1924 assert_eq!(substr.len(), 4);
1926 let substr = filter_tests(
1928 filter: Some("bas".into()),
1933 assert_eq!(substr.len(), 4);
1935 let substr = filter_tests(
1937 filter: Some("::test".into()),
1942 assert_eq!(substr.len(), 3);
1944 let substr = filter_tests(
1946 filter: Some("base::test".into()),
1951 assert_eq!(substr.len(), 3);
1953 let exact = filter_tests(
1955 filter: Some("base".into()),
1961 assert_eq!(exact.len(), 1);
1963 let exact = filter_tests(
1965 filter: Some("bas".into()),
1971 assert_eq!(exact.len(), 0);
1973 let exact = filter_tests(
1975 filter: Some("::test".into()),
1981 assert_eq!(exact.len(), 0);
1983 let exact = filter_tests(
1985 filter: Some("base::test".into()),
1991 assert_eq!(exact.len(), 1);
1995 pub fn sort_tests() {
1996 let mut opts = TestOpts::new();
1997 opts.run_tests = true;
2000 "sha1::test".to_string(),
2001 "isize::test_to_str".to_string(),
2002 "isize::test_pow".to_string(),
2003 "test::do_not_run_ignored_tests".to_string(),
2004 "test::ignored_tests_result_in_ignored".to_string(),
2005 "test::first_free_arg_should_be_a_filter".to_string(),
2006 "test::parse_ignored_flag".to_string(),
2007 "test::filter_for_ignored_option".to_string(),
2008 "test::sort_tests".to_string(),
2012 let mut tests = Vec::new();
2013 for name in &names {
2014 let test = TestDescAndFn {
2016 name: DynTestName((*name).clone()),
2018 should_panic: ShouldPanic::No,
2021 testfn: DynTestFn(Box::new(testfn)),
2027 let filtered = filter_tests(&opts, tests);
2029 let expected = vec![
2030 "isize::test_pow".to_string(),
2031 "isize::test_to_str".to_string(),
2032 "sha1::test".to_string(),
2033 "test::do_not_run_ignored_tests".to_string(),
2034 "test::filter_for_ignored_option".to_string(),
2035 "test::first_free_arg_should_be_a_filter".to_string(),
2036 "test::ignored_tests_result_in_ignored".to_string(),
2037 "test::parse_ignored_flag".to_string(),
2038 "test::sort_tests".to_string(),
2041 for (a, b) in expected.iter().zip(filtered) {
2042 assert!(*a == b.desc.name.to_string());
2047 pub fn test_metricmap_compare() {
2048 let mut m1 = MetricMap::new();
2049 let mut m2 = MetricMap::new();
2050 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2051 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2053 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2054 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2056 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2057 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2059 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2060 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2062 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2063 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2065 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2066 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2070 pub fn test_bench_once_no_iter() {
2071 fn f(_: &mut Bencher) {}
2076 pub fn test_bench_once_iter() {
2077 fn f(b: &mut Bencher) {
2084 pub fn test_bench_no_iter() {
2085 fn f(_: &mut Bencher) {}
2087 let (tx, rx) = channel();
2089 let desc = TestDesc {
2090 name: StaticTestName("f"),
2092 should_panic: ShouldPanic::No,
2096 ::bench::benchmark(desc, tx, true, f);
2101 pub fn test_bench_iter() {
2102 fn f(b: &mut Bencher) {
2106 let (tx, rx) = channel();
2108 let desc = TestDesc {
2109 name: StaticTestName("f"),
2111 should_panic: ShouldPanic::No,
2115 ::bench::benchmark(desc, tx, true, f);