1 // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // NB: this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
27 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
28 // cargo) to detect this crate.
30 #![crate_name = "test"]
31 #![unstable(feature = "test", issue = "27812")]
32 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
33 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
34 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
37 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
38 #![cfg_attr(not(stage0), feature(nll))]
39 #![feature(set_stdio)]
40 #![feature(panic_unwind)]
41 #![feature(staged_api)]
42 #![feature(termination_trait_lib)]
45 #[cfg(any(unix, target_os = "cloudabi"))]
47 extern crate panic_unwind;
50 pub use self::TestFn::*;
51 pub use self::ColorConfig::*;
52 pub use self::TestResult::*;
53 pub use self::TestName::*;
54 use self::TestEvent::*;
55 use self::NamePadding::*;
56 use self::OutputLocation::*;
58 use std::panic::{catch_unwind, AssertUnwindSafe};
60 use std::boxed::FnBox;
62 use std::collections::BTreeMap;
66 use std::io::prelude::*;
68 use std::path::PathBuf;
69 use std::process::Termination;
70 use std::sync::mpsc::{channel, Sender};
71 use std::sync::{Arc, Mutex};
73 use std::time::{Duration, Instant};
77 const TEST_WARN_TIMEOUT_S: u64 = 60;
78 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
80 // to be used by rustc to compile tests in libtest
82 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
83 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, ShouldPanic,
84 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
85 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
91 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
93 // The name of a test. By convention this follows the rules for rust
94 // paths; i.e. it should be a series of identifiers separated by double
95 // colons. This way if some test runner wants to arrange the tests
96 // hierarchically it may.
98 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
100 StaticTestName(&'static str),
102 AlignedTestName(Cow<'static, str>, NamePadding),
105 fn as_slice(&self) -> &str {
107 StaticTestName(s) => s,
108 DynTestName(ref s) => s,
109 AlignedTestName(ref s, _) => &*s,
113 fn padding(&self) -> NamePadding {
115 &AlignedTestName(_, p) => p,
120 fn with_padding(&self, padding: NamePadding) -> TestName {
121 let name = match self {
122 &TestName::StaticTestName(name) => Cow::Borrowed(name),
123 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
124 &TestName::AlignedTestName(ref name, _) => name.clone(),
127 TestName::AlignedTestName(name, padding)
130 impl fmt::Display for TestName {
131 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
132 fmt::Display::fmt(self.as_slice(), f)
136 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
137 pub enum NamePadding {
143 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
144 let mut name = String::from(self.name.as_slice());
145 let fill = column_count.saturating_sub(name.len());
146 let pad = " ".repeat(fill);
157 /// Represents a benchmark function.
158 pub trait TDynBenchFn: Send {
159 fn run(&self, harness: &mut Bencher);
162 // A function that runs a test. If the function returns successfully,
163 // the test succeeds; if the function panics then the test fails. We
164 // may need to come up with a more clever definition of test in order
165 // to support isolation of tests into threads.
168 StaticBenchFn(fn(&mut Bencher)),
169 DynTestFn(Box<dyn FnBox() + Send>),
170 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
174 fn padding(&self) -> NamePadding {
176 StaticTestFn(..) => PadNone,
177 StaticBenchFn(..) => PadOnRight,
178 DynTestFn(..) => PadNone,
179 DynBenchFn(..) => PadOnRight,
184 impl fmt::Debug for TestFn {
185 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
186 f.write_str(match *self {
187 StaticTestFn(..) => "StaticTestFn(..)",
188 StaticBenchFn(..) => "StaticBenchFn(..)",
189 DynTestFn(..) => "DynTestFn(..)",
190 DynBenchFn(..) => "DynBenchFn(..)",
195 /// Manager of the benchmarking runs.
197 /// This is fed into functions marked with `#[bench]` to allow for
198 /// set-up & tear-down before running a piece of code repeatedly via a
203 summary: Option<stats::Summary>,
207 #[derive(Clone, PartialEq, Eq)]
213 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
214 pub enum ShouldPanic {
217 YesWithMessage(&'static str),
220 // The definition of a single test. A test runner will run a list of
222 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
223 pub struct TestDesc {
226 pub should_panic: ShouldPanic,
227 pub allow_fail: bool,
231 pub struct TestDescAndFn {
236 #[derive(Clone, PartialEq, Debug, Copy)]
243 pub fn new(value: f64, noise: f64) -> Metric {
244 Metric { value, noise }
248 /// In case we want to add other options as well, just add them in this struct.
249 #[derive(Copy, Clone, Debug)]
251 display_output: bool,
255 pub fn new() -> Options {
257 display_output: false,
261 pub fn display_output(mut self, display_output: bool) -> Options {
262 self.display_output = display_output;
267 // The default console test runner. It accepts the command line
268 // arguments and a vector of test_descs.
269 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
270 let mut opts = match parse_opts(args) {
273 eprintln!("error: {}", msg);
279 opts.options = options;
281 if let Err(e) = list_tests_console(&opts, tests) {
282 eprintln!("error: io error when listing tests: {:?}", e);
286 match run_tests_console(&opts, tests) {
288 Ok(false) => process::exit(101),
290 eprintln!("error: io error when listing tests: {:?}", e);
297 // A variant optimized for invocation with a static test vector.
298 // This will panic (intentionally) when fed any dynamic tests, because
299 // it is copying the static values out into a dynamic vector and cannot
300 // copy dynamic values. It is doing this because from this point on
301 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
302 // semantics into parallel test runners, which in turn requires a Vec<>
303 // rather than a &[].
304 pub fn test_main_static(tests: &[TestDescAndFn]) {
305 let args = env::args().collect::<Vec<_>>();
306 let owned_tests = tests
308 .map(|t| match t.testfn {
309 StaticTestFn(f) => TestDescAndFn {
310 testfn: StaticTestFn(f),
311 desc: t.desc.clone(),
313 StaticBenchFn(f) => TestDescAndFn {
314 testfn: StaticBenchFn(f),
315 desc: t.desc.clone(),
317 _ => panic!("non-static tests passed to test::test_main_static"),
320 test_main(&args, owned_tests, Options::new())
323 /// Invoked when unit tests terminate. Should panic if the unit
324 /// test is considered a failure. By default, invokes `report()`
325 /// and checks for a `0` result.
326 pub fn assert_test_result<T: Termination>(result: T) {
327 let code = result.report();
331 "the test returned a termination value with a non-zero status code ({}) \
332 which indicates a failure",
337 #[derive(Copy, Clone, Debug)]
338 pub enum ColorConfig {
344 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
345 pub enum OutputFormat {
352 pub struct TestOpts {
354 pub filter: Option<String>,
355 pub filter_exact: bool,
356 pub run_ignored: bool,
358 pub bench_benchmarks: bool,
359 pub logfile: Option<PathBuf>,
361 pub color: ColorConfig,
362 pub format: OutputFormat,
363 pub test_threads: Option<usize>,
364 pub skip: Vec<String>,
365 pub options: Options,
370 fn new() -> TestOpts {
377 bench_benchmarks: false,
381 format: OutputFormat::Pretty,
384 options: Options::new(),
389 /// Result of parsing the options.
390 pub type OptRes = Result<TestOpts, String>;
392 fn optgroups() -> getopts::Options {
393 let mut opts = getopts::Options::new();
394 opts.optflag("", "ignored", "Run ignored tests")
395 .optflag("", "test", "Run tests and not benchmarks")
396 .optflag("", "bench", "Run benchmarks instead of tests")
397 .optflag("", "list", "List all tests and benchmarks")
398 .optflag("h", "help", "Display this message (longer with --help)")
402 "Write logs to the specified file instead \
409 "don't capture stdout/stderr of each \
410 task, allow printing directly",
415 "Number of threads used for running tests \
422 "Skip tests whose names contain FILTER (this flag can \
423 be used multiple times)",
429 "Display one character per test instead of one line. \
430 Alias to --format=terse",
435 "Exactly match filters rather than by substring",
440 "Configure coloring of output:
441 auto = colorize if stdout is a tty and tests are run on serially (default);
442 always = always colorize output;
443 never = never colorize output;",
449 "Configure formatting of output:
450 pretty = Print verbose output;
451 terse = Display one character per test;
452 json = Output a json document",
458 "Enable nightly-only flags:
459 unstable-options = Allow use of experimental features",
465 fn usage(binary: &str, options: &getopts::Options) {
466 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
470 The FILTER string is tested against the name of all tests, and only those
471 tests whose names contain the filter are run.
473 By default, all tests are run in parallel. This can be altered with the
474 --test-threads flag or the RUST_TEST_THREADS environment variable when running
477 All tests have their standard output and standard error captured by default.
478 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
479 environment variable to a value other than "0". Logging is not captured by default.
483 #[test] - Indicates a function is a test to be run. This function
485 #[bench] - Indicates a function is a benchmark to be run. This
486 function takes one argument (test::Bencher).
487 #[should_panic] - This function (also labeled with #[test]) will only pass if
488 the code causes a panic (an assertion failure or panic!)
489 A message may be provided, which the failure string must
490 contain: #[should_panic(expected = "foo")].
491 #[ignore] - When applied to a function which is already attributed as a
492 test, then the test runner will ignore these tests during
493 normal test runs. Running with --ignored will run these
495 usage = options.usage(&message)
499 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
500 fn is_nightly() -> bool {
501 // Whether this is a feature-staged build, i.e. on the beta or stable channel
502 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
503 // Whether we should enable unstable features for bootstrapping
504 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
506 bootstrap || !disable_unstable_features
509 // Parses command line arguments into test options
510 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
511 let mut allow_unstable = false;
512 let opts = optgroups();
513 let args = args.get(1..).unwrap_or(args);
514 let matches = match opts.parse(args) {
516 Err(f) => return Some(Err(f.to_string())),
519 if let Some(opt) = matches.opt_str("Z") {
522 "the option `Z` is only accepted on the nightly compiler".into(),
527 "unstable-options" => {
528 allow_unstable = true;
531 return Some(Err("Unrecognized option to `Z`".into()));
536 if matches.opt_present("h") {
537 usage(&args[0], &opts);
541 let filter = if !matches.free.is_empty() {
542 Some(matches.free[0].clone())
547 let run_ignored = matches.opt_present("ignored");
548 let quiet = matches.opt_present("quiet");
549 let exact = matches.opt_present("exact");
550 let list = matches.opt_present("list");
552 let logfile = matches.opt_str("logfile");
553 let logfile = logfile.map(|s| PathBuf::from(&s));
555 let bench_benchmarks = matches.opt_present("bench");
556 let run_tests = !bench_benchmarks || matches.opt_present("test");
558 let mut nocapture = matches.opt_present("nocapture");
560 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
561 Ok(val) => &val != "0",
566 let test_threads = match matches.opt_str("test-threads") {
567 Some(n_str) => match n_str.parse::<usize>() {
568 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
571 return Some(Err(format!(
572 "argument for --test-threads must be a number > 0 \
581 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
582 Some("auto") | None => AutoColor,
583 Some("always") => AlwaysColor,
584 Some("never") => NeverColor,
587 return Some(Err(format!(
588 "argument for --color must be auto, always, or never (was \
595 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
596 None if quiet => OutputFormat::Terse,
597 Some("pretty") | None => OutputFormat::Pretty,
598 Some("terse") => OutputFormat::Terse,
602 "The \"json\" format is only accepted on the nightly compiler".into(),
609 return Some(Err(format!(
610 "argument for --format must be pretty, terse, or json (was \
617 let test_opts = TestOpts {
629 skip: matches.opt_strs("skip"),
630 options: Options::new(),
636 #[derive(Clone, PartialEq)]
637 pub struct BenchSamples {
638 ns_iter_summ: stats::Summary,
642 #[derive(Clone, PartialEq)]
643 pub enum TestResult {
649 TrBench(BenchSamples),
652 unsafe impl Send for TestResult {}
654 enum OutputLocation<T> {
655 Pretty(Box<term::StdoutTerminal>),
659 impl<T: Write> Write for OutputLocation<T> {
660 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
662 Pretty(ref mut term) => term.write(buf),
663 Raw(ref mut stdout) => stdout.write(buf),
667 fn flush(&mut self) -> io::Result<()> {
669 Pretty(ref mut term) => term.flush(),
670 Raw(ref mut stdout) => stdout.flush(),
675 struct ConsoleTestState {
676 log_out: Option<File>,
685 failures: Vec<(TestDesc, Vec<u8>)>,
686 not_failures: Vec<(TestDesc, Vec<u8>)>,
690 impl ConsoleTestState {
691 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
692 let log_out = match opts.logfile {
693 Some(ref path) => Some(File::create(path)?),
697 Ok(ConsoleTestState {
706 metrics: MetricMap::new(),
707 failures: Vec::new(),
708 not_failures: Vec::new(),
709 options: opts.options,
713 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
714 let msg = msg.as_ref();
717 Some(ref mut o) => o.write_all(msg.as_bytes()),
721 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
722 self.write_log(format!(
725 TrOk => "ok".to_owned(),
726 TrFailed => "failed".to_owned(),
727 TrFailedMsg(ref msg) => format!("failed: {}", msg),
728 TrIgnored => "ignored".to_owned(),
729 TrAllowedFail => "failed (allowed)".to_owned(),
730 TrBench(ref bs) => fmt_bench_samples(bs),
736 fn current_test_count(&self) -> usize {
737 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
741 // Format a number with thousands separators
742 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
744 let mut output = String::new();
745 let mut trailing = false;
746 for &pow in &[9, 6, 3, 0] {
747 let base = 10_usize.pow(pow);
748 if pow == 0 || trailing || n / base != 0 {
750 output.write_fmt(format_args!("{}", n / base)).unwrap();
752 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
765 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
767 let mut output = String::new();
769 let median = bs.ns_iter_summ.median as usize;
770 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
773 .write_fmt(format_args!(
774 "{:>11} ns/iter (+/- {})",
775 fmt_thousands_sep(median, ','),
776 fmt_thousands_sep(deviation, ',')
781 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
787 // List the tests to console, and optionally to logfile. Filters are honored.
788 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
789 let mut output = match term::stdout() {
790 None => Raw(io::stdout()),
791 Some(t) => Pretty(t),
794 let quiet = opts.format == OutputFormat::Terse;
795 let mut st = ConsoleTestState::new(opts)?;
800 for test in filter_tests(&opts, tests) {
804 desc: TestDesc { name, .. },
808 let fntype = match testfn {
809 StaticTestFn(..) | DynTestFn(..) => {
813 StaticBenchFn(..) | DynBenchFn(..) => {
819 writeln!(output, "{}: {}", name, fntype)?;
820 st.write_log(format!("{} {}\n", fntype, name))?;
823 fn plural(count: u32, s: &str) -> String {
825 1 => format!("{} {}", 1, s),
826 n => format!("{} {}s", n, s),
831 if ntest != 0 || nbench != 0 {
832 writeln!(output, "")?;
838 plural(ntest, "test"),
839 plural(nbench, "benchmark")
846 // A simple console test runner
847 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
850 st: &mut ConsoleTestState,
851 out: &mut dyn OutputFormatter,
852 ) -> io::Result<()> {
853 match (*event).clone() {
854 TeFiltered(ref filtered_tests) => {
855 st.total = filtered_tests.len();
856 out.write_run_start(filtered_tests.len())
858 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
859 TeWait(ref test) => out.write_test_start(test),
860 TeTimeout(ref test) => out.write_timeout(test),
861 TeResult(test, result, stdout) => {
862 st.write_log_result(&test, &result)?;
863 out.write_result(&test, &result, &*stdout)?;
867 st.not_failures.push((test, stdout));
869 TrIgnored => st.ignored += 1,
870 TrAllowedFail => st.allowed_fail += 1,
872 st.metrics.insert_metric(
873 test.name.as_slice(),
874 bs.ns_iter_summ.median,
875 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
881 st.failures.push((test, stdout));
883 TrFailedMsg(msg) => {
885 let mut stdout = stdout;
886 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
887 st.failures.push((test, stdout));
895 let output = match term::stdout() {
896 None => Raw(io::stdout()),
897 Some(t) => Pretty(t),
900 let max_name_len = tests
902 .max_by_key(|t| len_if_padded(*t))
903 .map(|t| t.desc.name.as_slice().len())
906 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
908 let mut out: Box<dyn OutputFormatter> = match opts.format {
909 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
915 OutputFormat::Terse => Box::new(TerseFormatter::new(
921 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
923 let mut st = ConsoleTestState::new(opts)?;
924 fn len_if_padded(t: &TestDescAndFn) -> usize {
925 match t.testfn.padding() {
927 PadOnRight => t.desc.name.as_slice().len(),
931 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
933 assert!(st.current_test_count() == st.total);
935 return out.write_run_finish(&st);
939 fn should_sort_failures_before_printing_them() {
940 let test_a = TestDesc {
941 name: StaticTestName("a"),
943 should_panic: ShouldPanic::No,
947 let test_b = TestDesc {
948 name: StaticTestName("b"),
950 should_panic: ShouldPanic::No,
954 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
956 let st = ConsoleTestState {
965 metrics: MetricMap::new(),
966 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
967 options: Options::new(),
968 not_failures: Vec::new(),
971 out.write_failures(&st).unwrap();
972 let s = match out.output_location() {
973 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
974 &Pretty(_) => unreachable!(),
977 let apos = s.find("a").unwrap();
978 let bpos = s.find("b").unwrap();
979 assert!(apos < bpos);
982 fn use_color(opts: &TestOpts) -> bool {
984 AutoColor => !opts.nocapture && stdout_isatty(),
990 #[cfg(any(target_os = "cloudabi", target_os = "redox",
991 all(target_arch = "wasm32", not(target_os = "emscripten"))))]
992 fn stdout_isatty() -> bool {
993 // FIXME: Implement isatty on Redox
997 fn stdout_isatty() -> bool {
998 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1001 fn stdout_isatty() -> bool {
1004 type HANDLE = *mut u8;
1005 type LPDWORD = *mut u32;
1006 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1008 fn GetStdHandle(which: DWORD) -> HANDLE;
1009 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1012 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1014 GetConsoleMode(handle, &mut out) != 0
1019 pub enum TestEvent {
1020 TeFiltered(Vec<TestDesc>),
1022 TeResult(TestDesc, TestResult, Vec<u8>),
1023 TeTimeout(TestDesc),
1024 TeFilteredOut(usize),
1027 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1029 struct Sink(Arc<Mutex<Vec<u8>>>);
1030 impl Write for Sink {
1031 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1032 Write::write(&mut *self.0.lock().unwrap(), data)
1034 fn flush(&mut self) -> io::Result<()> {
1039 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1041 F: FnMut(TestEvent) -> io::Result<()>,
1043 use std::collections::HashMap;
1044 use std::sync::mpsc::RecvTimeoutError;
1046 let tests_len = tests.len();
1048 let mut filtered_tests = filter_tests(opts, tests);
1049 if !opts.bench_benchmarks {
1050 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1053 let filtered_tests = {
1054 let mut filtered_tests = filtered_tests;
1055 for test in filtered_tests.iter_mut() {
1056 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1062 let filtered_out = tests_len - filtered_tests.len();
1063 callback(TeFilteredOut(filtered_out))?;
1065 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1067 callback(TeFiltered(filtered_descs))?;
1069 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1070 filtered_tests.into_iter().partition(|e| match e.testfn {
1071 StaticTestFn(_) | DynTestFn(_) => true,
1075 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1077 let mut remaining = filtered_tests;
1078 remaining.reverse();
1079 let mut pending = 0;
1081 let (tx, rx) = channel::<MonitorMsg>();
1083 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1085 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1086 let now = Instant::now();
1087 let timed_out = running_tests
1089 .filter_map(|(desc, timeout)| {
1090 if &now >= timeout {
1097 for test in &timed_out {
1098 running_tests.remove(test);
1103 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1104 running_tests.values().min().map(|next_timeout| {
1105 let now = Instant::now();
1106 if *next_timeout >= now {
1114 if concurrency == 1 {
1115 while !remaining.is_empty() {
1116 let test = remaining.pop().unwrap();
1117 callback(TeWait(test.desc.clone()))?;
1118 run_test(opts, !opts.run_tests, test, tx.clone());
1119 let (test, result, stdout) = rx.recv().unwrap();
1120 callback(TeResult(test, result, stdout))?;
1123 while pending > 0 || !remaining.is_empty() {
1124 while pending < concurrency && !remaining.is_empty() {
1125 let test = remaining.pop().unwrap();
1126 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1127 running_tests.insert(test.desc.clone(), timeout);
1128 callback(TeWait(test.desc.clone()))?; //here no pad
1129 run_test(opts, !opts.run_tests, test, tx.clone());
1135 if let Some(timeout) = calc_timeout(&running_tests) {
1136 res = rx.recv_timeout(timeout);
1137 for test in get_timed_out_tests(&mut running_tests) {
1138 callback(TeTimeout(test))?;
1140 if res != Err(RecvTimeoutError::Timeout) {
1144 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1149 let (desc, result, stdout) = res.unwrap();
1150 running_tests.remove(&desc);
1152 callback(TeResult(desc, result, stdout))?;
1157 if opts.bench_benchmarks {
1158 // All benchmarks run at the end, in serial.
1159 for b in filtered_benchs {
1160 callback(TeWait(b.desc.clone()))?;
1161 run_test(opts, false, b, tx.clone());
1162 let (test, result, stdout) = rx.recv().unwrap();
1163 callback(TeResult(test, result, stdout))?;
1169 #[allow(deprecated)]
1170 fn get_concurrency() -> usize {
1171 return match env::var("RUST_TEST_THREADS") {
1173 let opt_n: Option<usize> = s.parse().ok();
1175 Some(n) if n > 0 => n,
1177 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1182 Err(..) => num_cpus(),
1187 fn num_cpus() -> usize {
1189 struct SYSTEM_INFO {
1190 wProcessorArchitecture: u16,
1193 lpMinimumApplicationAddress: *mut u8,
1194 lpMaximumApplicationAddress: *mut u8,
1195 dwActiveProcessorMask: *mut u8,
1196 dwNumberOfProcessors: u32,
1197 dwProcessorType: u32,
1198 dwAllocationGranularity: u32,
1199 wProcessorLevel: u16,
1200 wProcessorRevision: u16,
1203 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1206 let mut sysinfo = std::mem::zeroed();
1207 GetSystemInfo(&mut sysinfo);
1208 sysinfo.dwNumberOfProcessors as usize
1212 #[cfg(target_os = "redox")]
1213 fn num_cpus() -> usize {
1214 // FIXME: Implement num_cpus on Redox
1218 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
1219 fn num_cpus() -> usize {
1223 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1224 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1225 target_os = "macos", target_os = "solaris"))]
1226 fn num_cpus() -> usize {
1227 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1230 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1231 target_os = "netbsd"))]
1232 fn num_cpus() -> usize {
1235 let mut cpus: libc::c_uint = 0;
1236 let mut cpus_size = std::mem::size_of_val(&cpus);
1239 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1242 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1247 &mut cpus as *mut _ as *mut _,
1248 &mut cpus_size as *mut _ as *mut _,
1260 #[cfg(target_os = "openbsd")]
1261 fn num_cpus() -> usize {
1264 let mut cpus: libc::c_uint = 0;
1265 let mut cpus_size = std::mem::size_of_val(&cpus);
1266 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1272 &mut cpus as *mut _ as *mut _,
1273 &mut cpus_size as *mut _ as *mut _,
1284 #[cfg(target_os = "haiku")]
1285 fn num_cpus() -> usize {
1290 #[cfg(target_os = "l4re")]
1291 fn num_cpus() -> usize {
1297 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1298 let mut filtered = tests;
1299 // Remove tests that don't match the test filter
1300 filtered = match opts.filter {
1302 Some(ref filter) => filtered
1305 if opts.filter_exact {
1306 test.desc.name.as_slice() == &filter[..]
1308 test.desc.name.as_slice().contains(&filter[..])
1314 // Skip tests that match any of the skip filters
1318 !opts.skip.iter().any(|sf| {
1319 if opts.filter_exact {
1320 t.desc.name.as_slice() == &sf[..]
1322 t.desc.name.as_slice().contains(&sf[..])
1328 // Maybe pull out the ignored test and unignore them
1329 filtered = if !opts.run_ignored {
1332 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1333 if test.desc.ignore {
1334 let TestDescAndFn { desc, testfn } = test;
1335 Some(TestDescAndFn {
1346 filtered.into_iter().filter_map(filter).collect()
1349 // Sort the tests alphabetically
1350 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1355 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1356 // convert benchmarks to tests, if we're not benchmarking them
1360 let testfn = match x.testfn {
1361 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1362 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1364 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1365 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1380 test: TestDescAndFn,
1381 monitor_ch: Sender<MonitorMsg>,
1383 let TestDescAndFn { desc, testfn } = test;
1385 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1386 && desc.should_panic != ShouldPanic::No;
1388 if force_ignore || desc.ignore || ignore_because_panic_abort {
1389 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1395 monitor_ch: Sender<MonitorMsg>,
1397 testfn: Box<dyn FnBox() + Send>,
1399 // Buffer for capturing standard I/O
1400 let data = Arc::new(Mutex::new(Vec::new()));
1401 let data2 = data.clone();
1403 let name = desc.name.clone();
1404 let runtest = move || {
1405 let oldio = if !nocapture {
1407 io::set_print(Some(Box::new(Sink(data2.clone())))),
1408 io::set_panic(Some(Box::new(Sink(data2)))),
1414 let result = catch_unwind(AssertUnwindSafe(testfn));
1416 if let Some((printio, panicio)) = oldio {
1417 io::set_print(printio);
1418 io::set_panic(panicio);
1421 let test_result = calc_result(&desc, result);
1422 let stdout = data.lock().unwrap().to_vec();
1424 .send((desc.clone(), test_result, stdout))
1428 // If the platform is single-threaded we're just going to run
1429 // the test synchronously, regardless of the concurrency
1431 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1432 if supports_threads {
1433 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1434 cfg.spawn(runtest).unwrap();
1441 DynBenchFn(bencher) => {
1442 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1443 bencher.run(harness)
1446 StaticBenchFn(benchfn) => {
1447 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1448 (benchfn.clone())(harness)
1452 let cb = move || __rust_begin_short_backtrace(f);
1453 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1455 StaticTestFn(f) => run_test_inner(
1459 Box::new(move || __rust_begin_short_backtrace(f)),
1464 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1466 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1470 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1471 match (&desc.should_panic, task_result) {
1472 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1473 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1474 if err.downcast_ref::<String>()
1476 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1477 .map(|e| e.contains(msg))
1482 if desc.allow_fail {
1485 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1489 _ if desc.allow_fail => TrAllowedFail,
1494 #[derive(Clone, PartialEq)]
1495 pub struct MetricMap(BTreeMap<String, Metric>);
1498 pub fn new() -> MetricMap {
1499 MetricMap(BTreeMap::new())
1502 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1503 /// must be non-negative. The `noise` indicates the uncertainty of the
1504 /// metric, which doubles as the "noise range" of acceptable
1505 /// pairwise-regressions on this named value, when comparing from one
1506 /// metric to the next using `compare_to_old`.
1508 /// If `noise` is positive, then it means this metric is of a value
1509 /// you want to see grow smaller, so a change larger than `noise` in the
1510 /// positive direction represents a regression.
1512 /// If `noise` is negative, then it means this metric is of a value
1513 /// you want to see grow larger, so a change larger than `noise` in the
1514 /// negative direction represents a regression.
1515 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1516 let m = Metric { value, noise };
1517 self.0.insert(name.to_owned(), m);
1520 pub fn fmt_metrics(&self) -> String {
1523 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1524 .collect::<Vec<_>>();
1531 /// A function that is opaque to the optimizer, to allow benchmarks to
1532 /// pretend to use outputs to assist in avoiding dead-code
1535 /// This function is a no-op, and does not even read from `dummy`.
1536 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1537 pub fn black_box<T>(dummy: T) -> T {
1538 // we need to "use" the argument in some way LLVM can't
1540 unsafe { asm!("" : : "r"(&dummy)) }
1543 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1545 pub fn black_box<T>(dummy: T) -> T {
1550 /// Callback for benchmark functions to run in their body.
1551 pub fn iter<T, F>(&mut self, mut inner: F)
1555 if self.mode == BenchMode::Single {
1556 ns_iter_inner(&mut inner, 1);
1560 self.summary = Some(iter(&mut inner));
1563 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1565 F: FnMut(&mut Bencher),
1568 return self.summary;
1572 fn ns_from_dur(dur: Duration) -> u64 {
1573 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1576 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1580 let start = Instant::now();
1584 return ns_from_dur(start.elapsed());
1587 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1591 // Initial bench run to get ballpark figure.
1592 let ns_single = ns_iter_inner(inner, 1);
1594 // Try to estimate iter count for 1ms falling back to 1m
1595 // iterations if first run took < 1ns.
1596 let ns_target_total = 1_000_000; // 1ms
1597 let mut n = ns_target_total / cmp::max(1, ns_single);
1599 // if the first run took more than 1ms we don't want to just
1600 // be left doing 0 iterations on every loop. The unfortunate
1601 // side effect of not being able to do as many runs is
1602 // automatically handled by the statistical analysis below
1603 // (i.e. larger error bars).
1606 let mut total_run = Duration::new(0, 0);
1607 let samples: &mut [f64] = &mut [0.0_f64; 50];
1609 let loop_start = Instant::now();
1611 for p in &mut *samples {
1612 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1615 stats::winsorize(samples, 5.0);
1616 let summ = stats::Summary::new(samples);
1618 for p in &mut *samples {
1619 let ns = ns_iter_inner(inner, 5 * n);
1620 *p = ns as f64 / (5 * n) as f64;
1623 stats::winsorize(samples, 5.0);
1624 let summ5 = stats::Summary::new(samples);
1626 let loop_run = loop_start.elapsed();
1628 // If we've run for 100ms and seem to have converged to a
1630 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1631 && summ.median - summ5.median < summ5.median_abs_dev
1636 total_run = total_run + loop_run;
1637 // Longest we ever run for is 3s.
1638 if total_run > Duration::from_secs(3) {
1642 // If we overflow here just return the results so far. We check a
1643 // multiplier of 10 because we're about to multiply by 2 and the
1644 // next iteration of the loop will also multiply by 5 (to calculate
1645 // the summ5 result)
1646 n = match n.checked_mul(10) {
1656 use std::panic::{catch_unwind, AssertUnwindSafe};
1659 use std::sync::{Arc, Mutex};
1661 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1663 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1665 F: FnMut(&mut Bencher),
1667 let mut bs = Bencher {
1668 mode: BenchMode::Auto,
1673 let data = Arc::new(Mutex::new(Vec::new()));
1674 let data2 = data.clone();
1676 let oldio = if !nocapture {
1678 io::set_print(Some(Box::new(Sink(data2.clone())))),
1679 io::set_panic(Some(Box::new(Sink(data2)))),
1685 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1687 if let Some((printio, panicio)) = oldio {
1688 io::set_print(printio);
1689 io::set_panic(panicio);
1692 let test_result = match result {
1694 Ok(Some(ns_iter_summ)) => {
1695 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1696 let mb_s = bs.bytes * 1000 / ns_iter;
1698 let bs = BenchSamples {
1700 mb_s: mb_s as usize,
1702 TestResult::TrBench(bs)
1705 // iter not called, so no data.
1706 // FIXME: error in this case?
1707 let samples: &mut [f64] = &mut [0.0_f64; 1];
1708 let bs = BenchSamples {
1709 ns_iter_summ: stats::Summary::new(samples),
1712 TestResult::TrBench(bs)
1714 Err(_) => TestResult::TrFailed,
1717 let stdout = data.lock().unwrap().to_vec();
1718 monitor_ch.send((desc, test_result, stdout)).unwrap();
1721 pub fn run_once<F>(f: F)
1723 F: FnMut(&mut Bencher),
1725 let mut bs = Bencher {
1726 mode: BenchMode::Single,
1736 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, ShouldPanic,
1737 StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1739 use std::sync::mpsc::channel;
1744 pub fn do_not_run_ignored_tests() {
1748 let desc = TestDescAndFn {
1750 name: StaticTestName("whatever"),
1752 should_panic: ShouldPanic::No,
1755 testfn: DynTestFn(Box::new(f)),
1757 let (tx, rx) = channel();
1758 run_test(&TestOpts::new(), false, desc, tx);
1759 let (_, res, _) = rx.recv().unwrap();
1760 assert!(res != TrOk);
1764 pub fn ignored_tests_result_in_ignored() {
1766 let desc = TestDescAndFn {
1768 name: StaticTestName("whatever"),
1770 should_panic: ShouldPanic::No,
1773 testfn: DynTestFn(Box::new(f)),
1775 let (tx, rx) = channel();
1776 run_test(&TestOpts::new(), false, desc, tx);
1777 let (_, res, _) = rx.recv().unwrap();
1778 assert!(res == TrIgnored);
1782 fn test_should_panic() {
1786 let desc = TestDescAndFn {
1788 name: StaticTestName("whatever"),
1790 should_panic: ShouldPanic::Yes,
1793 testfn: DynTestFn(Box::new(f)),
1795 let (tx, rx) = channel();
1796 run_test(&TestOpts::new(), false, desc, tx);
1797 let (_, res, _) = rx.recv().unwrap();
1798 assert!(res == TrOk);
1802 fn test_should_panic_good_message() {
1804 panic!("an error message");
1806 let desc = TestDescAndFn {
1808 name: StaticTestName("whatever"),
1810 should_panic: ShouldPanic::YesWithMessage("error message"),
1813 testfn: DynTestFn(Box::new(f)),
1815 let (tx, rx) = channel();
1816 run_test(&TestOpts::new(), false, desc, tx);
1817 let (_, res, _) = rx.recv().unwrap();
1818 assert!(res == TrOk);
1822 fn test_should_panic_bad_message() {
1824 panic!("an error message");
1826 let expected = "foobar";
1827 let failed_msg = "Panic did not include expected string";
1828 let desc = TestDescAndFn {
1830 name: StaticTestName("whatever"),
1832 should_panic: ShouldPanic::YesWithMessage(expected),
1835 testfn: DynTestFn(Box::new(f)),
1837 let (tx, rx) = channel();
1838 run_test(&TestOpts::new(), false, desc, tx);
1839 let (_, res, _) = rx.recv().unwrap();
1840 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1844 fn test_should_panic_but_succeeds() {
1846 let desc = TestDescAndFn {
1848 name: StaticTestName("whatever"),
1850 should_panic: ShouldPanic::Yes,
1853 testfn: DynTestFn(Box::new(f)),
1855 let (tx, rx) = channel();
1856 run_test(&TestOpts::new(), false, desc, tx);
1857 let (_, res, _) = rx.recv().unwrap();
1858 assert!(res == TrFailed);
1862 fn parse_ignored_flag() {
1864 "progname".to_string(),
1865 "filter".to_string(),
1866 "--ignored".to_string(),
1868 let opts = match parse_opts(&args) {
1870 _ => panic!("Malformed arg in parse_ignored_flag"),
1872 assert!((opts.run_ignored));
1876 pub fn filter_for_ignored_option() {
1877 // When we run ignored tests the test filter should filter out all the
1878 // unignored tests and flip the ignore flag on the rest to false
1880 let mut opts = TestOpts::new();
1881 opts.run_tests = true;
1882 opts.run_ignored = true;
1887 name: StaticTestName("1"),
1889 should_panic: ShouldPanic::No,
1892 testfn: DynTestFn(Box::new(move || {})),
1896 name: StaticTestName("2"),
1898 should_panic: ShouldPanic::No,
1901 testfn: DynTestFn(Box::new(move || {})),
1904 let filtered = filter_tests(&opts, tests);
1906 assert_eq!(filtered.len(), 1);
1907 assert_eq!(filtered[0].desc.name.to_string(), "1");
1908 assert!(!filtered[0].desc.ignore);
1912 pub fn exact_filter_match() {
1913 fn tests() -> Vec<TestDescAndFn> {
1914 vec!["base", "base::test", "base::test1", "base::test2"]
1916 .map(|name| TestDescAndFn {
1918 name: StaticTestName(name),
1920 should_panic: ShouldPanic::No,
1923 testfn: DynTestFn(Box::new(move || {})),
1928 let substr = filter_tests(
1930 filter: Some("base".into()),
1935 assert_eq!(substr.len(), 4);
1937 let substr = filter_tests(
1939 filter: Some("bas".into()),
1944 assert_eq!(substr.len(), 4);
1946 let substr = filter_tests(
1948 filter: Some("::test".into()),
1953 assert_eq!(substr.len(), 3);
1955 let substr = filter_tests(
1957 filter: Some("base::test".into()),
1962 assert_eq!(substr.len(), 3);
1964 let exact = filter_tests(
1966 filter: Some("base".into()),
1972 assert_eq!(exact.len(), 1);
1974 let exact = filter_tests(
1976 filter: Some("bas".into()),
1982 assert_eq!(exact.len(), 0);
1984 let exact = filter_tests(
1986 filter: Some("::test".into()),
1992 assert_eq!(exact.len(), 0);
1994 let exact = filter_tests(
1996 filter: Some("base::test".into()),
2002 assert_eq!(exact.len(), 1);
2006 pub fn sort_tests() {
2007 let mut opts = TestOpts::new();
2008 opts.run_tests = true;
2011 "sha1::test".to_string(),
2012 "isize::test_to_str".to_string(),
2013 "isize::test_pow".to_string(),
2014 "test::do_not_run_ignored_tests".to_string(),
2015 "test::ignored_tests_result_in_ignored".to_string(),
2016 "test::first_free_arg_should_be_a_filter".to_string(),
2017 "test::parse_ignored_flag".to_string(),
2018 "test::filter_for_ignored_option".to_string(),
2019 "test::sort_tests".to_string(),
2023 let mut tests = Vec::new();
2024 for name in &names {
2025 let test = TestDescAndFn {
2027 name: DynTestName((*name).clone()),
2029 should_panic: ShouldPanic::No,
2032 testfn: DynTestFn(Box::new(testfn)),
2038 let filtered = filter_tests(&opts, tests);
2040 let expected = vec![
2041 "isize::test_pow".to_string(),
2042 "isize::test_to_str".to_string(),
2043 "sha1::test".to_string(),
2044 "test::do_not_run_ignored_tests".to_string(),
2045 "test::filter_for_ignored_option".to_string(),
2046 "test::first_free_arg_should_be_a_filter".to_string(),
2047 "test::ignored_tests_result_in_ignored".to_string(),
2048 "test::parse_ignored_flag".to_string(),
2049 "test::sort_tests".to_string(),
2052 for (a, b) in expected.iter().zip(filtered) {
2053 assert!(*a == b.desc.name.to_string());
2058 pub fn test_metricmap_compare() {
2059 let mut m1 = MetricMap::new();
2060 let mut m2 = MetricMap::new();
2061 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2062 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2064 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2065 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2067 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2068 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2070 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2071 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2073 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2074 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2076 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2077 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2081 pub fn test_bench_once_no_iter() {
2082 fn f(_: &mut Bencher) {}
2087 pub fn test_bench_once_iter() {
2088 fn f(b: &mut Bencher) {
2095 pub fn test_bench_no_iter() {
2096 fn f(_: &mut Bencher) {}
2098 let (tx, rx) = channel();
2100 let desc = TestDesc {
2101 name: StaticTestName("f"),
2103 should_panic: ShouldPanic::No,
2107 ::bench::benchmark(desc, tx, true, f);
2112 pub fn test_bench_iter() {
2113 fn f(b: &mut Bencher) {
2117 let (tx, rx) = channel();
2119 let desc = TestDesc {
2120 name: StaticTestName("f"),
2122 should_panic: ShouldPanic::No,
2126 ::bench::benchmark(desc, tx, true, f);