1 // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // NB: this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
27 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
28 // cargo) to detect this crate.
30 #![deny(bare_trait_objects)]
32 #![crate_name = "test"]
33 #![unstable(feature = "test", issue = "27812")]
34 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
35 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
36 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
39 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
40 #![feature(set_stdio)]
41 #![feature(panic_unwind)]
42 #![feature(staged_api)]
43 #![feature(termination_trait_lib)]
46 #[cfg(any(unix, target_os = "cloudabi"))]
48 extern crate panic_unwind;
51 pub use self::TestFn::*;
52 pub use self::ColorConfig::*;
53 pub use self::TestResult::*;
54 pub use self::TestName::*;
55 use self::TestEvent::*;
56 use self::NamePadding::*;
57 use self::OutputLocation::*;
59 use std::panic::{catch_unwind, AssertUnwindSafe};
61 use std::boxed::FnBox;
63 use std::collections::BTreeMap;
67 use std::io::prelude::*;
69 use std::iter::repeat;
70 use std::path::PathBuf;
71 use std::process::Termination;
72 use std::sync::mpsc::{channel, Sender};
73 use std::sync::{Arc, Mutex};
75 use std::time::{Duration, Instant};
79 const TEST_WARN_TIMEOUT_S: u64 = 60;
80 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
82 // to be used by rustc to compile tests in libtest
84 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
85 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, ShouldPanic,
86 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
87 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
93 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
95 // The name of a test. By convention this follows the rules for rust
96 // paths; i.e. it should be a series of identifiers separated by double
97 // colons. This way if some test runner wants to arrange the tests
98 // hierarchically it may.
100 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
102 StaticTestName(&'static str),
104 AlignedTestName(Cow<'static, str>, NamePadding),
107 fn as_slice(&self) -> &str {
109 StaticTestName(s) => s,
110 DynTestName(ref s) => s,
111 AlignedTestName(ref s, _) => &*s,
115 fn padding(&self) -> NamePadding {
117 &AlignedTestName(_, p) => p,
122 fn with_padding(&self, padding: NamePadding) -> TestName {
123 let name = match self {
124 &TestName::StaticTestName(name) => Cow::Borrowed(name),
125 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
126 &TestName::AlignedTestName(ref name, _) => name.clone(),
129 TestName::AlignedTestName(name, padding)
132 impl fmt::Display for TestName {
133 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
134 fmt::Display::fmt(self.as_slice(), f)
138 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
139 pub enum NamePadding {
145 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
146 let mut name = String::from(self.name.as_slice());
147 let fill = column_count.saturating_sub(name.len());
148 let pad = repeat(" ").take(fill).collect::<String>();
159 /// Represents a benchmark function.
160 pub trait TDynBenchFn: Send {
161 fn run(&self, harness: &mut Bencher);
164 // A function that runs a test. If the function returns successfully,
165 // the test succeeds; if the function panics then the test fails. We
166 // may need to come up with a more clever definition of test in order
167 // to support isolation of tests into threads.
170 StaticBenchFn(fn(&mut Bencher)),
171 DynTestFn(Box<dyn FnBox() + Send>),
172 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
176 fn padding(&self) -> NamePadding {
178 StaticTestFn(..) => PadNone,
179 StaticBenchFn(..) => PadOnRight,
180 DynTestFn(..) => PadNone,
181 DynBenchFn(..) => PadOnRight,
186 impl fmt::Debug for TestFn {
187 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
188 f.write_str(match *self {
189 StaticTestFn(..) => "StaticTestFn(..)",
190 StaticBenchFn(..) => "StaticBenchFn(..)",
191 DynTestFn(..) => "DynTestFn(..)",
192 DynBenchFn(..) => "DynBenchFn(..)",
197 /// Manager of the benchmarking runs.
199 /// This is fed into functions marked with `#[bench]` to allow for
200 /// set-up & tear-down before running a piece of code repeatedly via a
205 summary: Option<stats::Summary>,
209 #[derive(Clone, PartialEq, Eq)]
215 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
216 pub enum ShouldPanic {
219 YesWithMessage(&'static str),
222 // The definition of a single test. A test runner will run a list of
224 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
225 pub struct TestDesc {
228 pub should_panic: ShouldPanic,
229 pub allow_fail: bool,
233 pub struct TestDescAndFn {
238 #[derive(Clone, PartialEq, Debug, Copy)]
245 pub fn new(value: f64, noise: f64) -> Metric {
246 Metric { value, noise }
250 /// In case we want to add other options as well, just add them in this struct.
251 #[derive(Copy, Clone, Debug)]
253 display_output: bool,
257 pub fn new() -> Options {
259 display_output: false,
263 pub fn display_output(mut self, display_output: bool) -> Options {
264 self.display_output = display_output;
269 // The default console test runner. It accepts the command line
270 // arguments and a vector of test_descs.
271 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
272 let mut opts = match parse_opts(args) {
275 eprintln!("error: {}", msg);
281 opts.options = options;
283 if let Err(e) = list_tests_console(&opts, tests) {
284 eprintln!("error: io error when listing tests: {:?}", e);
288 match run_tests_console(&opts, tests) {
290 Ok(false) => process::exit(101),
292 eprintln!("error: io error when listing tests: {:?}", e);
299 // A variant optimized for invocation with a static test vector.
300 // This will panic (intentionally) when fed any dynamic tests, because
301 // it is copying the static values out into a dynamic vector and cannot
302 // copy dynamic values. It is doing this because from this point on
303 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
304 // semantics into parallel test runners, which in turn requires a Vec<>
305 // rather than a &[].
306 pub fn test_main_static(tests: &[TestDescAndFn]) {
307 let args = env::args().collect::<Vec<_>>();
308 let owned_tests = tests
310 .map(|t| match t.testfn {
311 StaticTestFn(f) => TestDescAndFn {
312 testfn: StaticTestFn(f),
313 desc: t.desc.clone(),
315 StaticBenchFn(f) => TestDescAndFn {
316 testfn: StaticBenchFn(f),
317 desc: t.desc.clone(),
319 _ => panic!("non-static tests passed to test::test_main_static"),
322 test_main(&args, owned_tests, Options::new())
325 /// Invoked when unit tests terminate. Should panic if the unit
326 /// test is considered a failure. By default, invokes `report()`
327 /// and checks for a `0` result.
328 pub fn assert_test_result<T: Termination>(result: T) {
329 assert_eq!(result.report(), 0);
332 #[derive(Copy, Clone, Debug)]
333 pub enum ColorConfig {
339 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
340 pub enum OutputFormat {
347 pub struct TestOpts {
349 pub filter: Option<String>,
350 pub filter_exact: bool,
351 pub run_ignored: bool,
353 pub bench_benchmarks: bool,
354 pub logfile: Option<PathBuf>,
356 pub color: ColorConfig,
357 pub format: OutputFormat,
358 pub test_threads: Option<usize>,
359 pub skip: Vec<String>,
360 pub options: Options,
365 fn new() -> TestOpts {
372 bench_benchmarks: false,
376 format: OutputFormat::Pretty,
379 options: Options::new(),
384 /// Result of parsing the options.
385 pub type OptRes = Result<TestOpts, String>;
387 fn optgroups() -> getopts::Options {
388 let mut opts = getopts::Options::new();
389 opts.optflag("", "ignored", "Run ignored tests")
390 .optflag("", "test", "Run tests and not benchmarks")
391 .optflag("", "bench", "Run benchmarks instead of tests")
392 .optflag("", "list", "List all tests and benchmarks")
393 .optflag("h", "help", "Display this message (longer with --help)")
397 "Write logs to the specified file instead \
404 "don't capture stdout/stderr of each \
405 task, allow printing directly",
410 "Number of threads used for running tests \
417 "Skip tests whose names contain FILTER (this flag can \
418 be used multiple times)",
424 "Display one character per test instead of one line. \
425 Alias to --format=terse",
430 "Exactly match filters rather than by substring",
435 "Configure coloring of output:
436 auto = colorize if stdout is a tty and tests are run on serially (default);
437 always = always colorize output;
438 never = never colorize output;",
444 "Configure formatting of output:
445 pretty = Print verbose output;
446 terse = Display one character per test;
447 json = Output a json document",
453 "Enable nightly-only flags:
454 unstable-options = Allow use of experimental features",
460 fn usage(binary: &str, options: &getopts::Options) {
461 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
465 The FILTER string is tested against the name of all tests, and only those
466 tests whose names contain the filter are run.
468 By default, all tests are run in parallel. This can be altered with the
469 --test-threads flag or the RUST_TEST_THREADS environment variable when running
472 All tests have their standard output and standard error captured by default.
473 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
474 environment variable to a value other than "0". Logging is not captured by default.
478 #[test] - Indicates a function is a test to be run. This function
480 #[bench] - Indicates a function is a benchmark to be run. This
481 function takes one argument (test::Bencher).
482 #[should_panic] - This function (also labeled with #[test]) will only pass if
483 the code causes a panic (an assertion failure or panic!)
484 A message may be provided, which the failure string must
485 contain: #[should_panic(expected = "foo")].
486 #[ignore] - When applied to a function which is already attributed as a
487 test, then the test runner will ignore these tests during
488 normal test runs. Running with --ignored will run these
490 usage = options.usage(&message)
494 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
495 fn is_nightly() -> bool {
496 // Whether this is a feature-staged build, i.e. on the beta or stable channel
497 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
498 // Whether we should enable unstable features for bootstrapping
499 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
501 bootstrap || !disable_unstable_features
504 // Parses command line arguments into test options
505 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
506 let mut allow_unstable = false;
507 let opts = optgroups();
508 let args = args.get(1..).unwrap_or(args);
509 let matches = match opts.parse(args) {
511 Err(f) => return Some(Err(f.to_string())),
514 if let Some(opt) = matches.opt_str("Z") {
517 "the option `Z` is only accepted on the nightly compiler".into(),
522 "unstable-options" => {
523 allow_unstable = true;
526 return Some(Err("Unrecognized option to `Z`".into()));
531 if matches.opt_present("h") {
532 usage(&args[0], &opts);
536 let filter = if !matches.free.is_empty() {
537 Some(matches.free[0].clone())
542 let run_ignored = matches.opt_present("ignored");
543 let quiet = matches.opt_present("quiet");
544 let exact = matches.opt_present("exact");
545 let list = matches.opt_present("list");
547 let logfile = matches.opt_str("logfile");
548 let logfile = logfile.map(|s| PathBuf::from(&s));
550 let bench_benchmarks = matches.opt_present("bench");
551 let run_tests = !bench_benchmarks || matches.opt_present("test");
553 let mut nocapture = matches.opt_present("nocapture");
555 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
556 Ok(val) => &val != "0",
561 let test_threads = match matches.opt_str("test-threads") {
562 Some(n_str) => match n_str.parse::<usize>() {
563 Ok(0) => return Some(Err(format!("argument for --test-threads must not be 0"))),
566 return Some(Err(format!(
567 "argument for --test-threads must be a number > 0 \
576 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
577 Some("auto") | None => AutoColor,
578 Some("always") => AlwaysColor,
579 Some("never") => NeverColor,
582 return Some(Err(format!(
583 "argument for --color must be auto, always, or never (was \
590 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
591 None if quiet => OutputFormat::Terse,
592 Some("pretty") | None => OutputFormat::Pretty,
593 Some("terse") => OutputFormat::Terse,
597 "The \"json\" format is only accepted on the nightly compiler".into(),
604 return Some(Err(format!(
605 "argument for --format must be pretty, terse, or json (was \
612 let test_opts = TestOpts {
624 skip: matches.opt_strs("skip"),
625 options: Options::new(),
631 #[derive(Clone, PartialEq)]
632 pub struct BenchSamples {
633 ns_iter_summ: stats::Summary,
637 #[derive(Clone, PartialEq)]
638 pub enum TestResult {
644 TrBench(BenchSamples),
647 unsafe impl Send for TestResult {}
649 enum OutputLocation<T> {
650 Pretty(Box<term::StdoutTerminal>),
654 impl<T: Write> Write for OutputLocation<T> {
655 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
657 Pretty(ref mut term) => term.write(buf),
658 Raw(ref mut stdout) => stdout.write(buf),
662 fn flush(&mut self) -> io::Result<()> {
664 Pretty(ref mut term) => term.flush(),
665 Raw(ref mut stdout) => stdout.flush(),
670 struct ConsoleTestState {
671 log_out: Option<File>,
680 failures: Vec<(TestDesc, Vec<u8>)>,
681 not_failures: Vec<(TestDesc, Vec<u8>)>,
685 impl ConsoleTestState {
686 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
687 let log_out = match opts.logfile {
688 Some(ref path) => Some(File::create(path)?),
692 Ok(ConsoleTestState {
701 metrics: MetricMap::new(),
702 failures: Vec::new(),
703 not_failures: Vec::new(),
704 options: opts.options,
708 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
709 let msg = msg.as_ref();
712 Some(ref mut o) => o.write_all(msg.as_bytes()),
716 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
717 self.write_log(format!(
720 TrOk => "ok".to_owned(),
721 TrFailed => "failed".to_owned(),
722 TrFailedMsg(ref msg) => format!("failed: {}", msg),
723 TrIgnored => "ignored".to_owned(),
724 TrAllowedFail => "failed (allowed)".to_owned(),
725 TrBench(ref bs) => fmt_bench_samples(bs),
731 fn current_test_count(&self) -> usize {
732 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
736 // Format a number with thousands separators
737 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
739 let mut output = String::new();
740 let mut trailing = false;
741 for &pow in &[9, 6, 3, 0] {
742 let base = 10_usize.pow(pow);
743 if pow == 0 || trailing || n / base != 0 {
745 output.write_fmt(format_args!("{}", n / base)).unwrap();
747 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
760 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
762 let mut output = String::new();
764 let median = bs.ns_iter_summ.median as usize;
765 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
768 .write_fmt(format_args!(
769 "{:>11} ns/iter (+/- {})",
770 fmt_thousands_sep(median, ','),
771 fmt_thousands_sep(deviation, ',')
776 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
782 // List the tests to console, and optionally to logfile. Filters are honored.
783 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
784 let mut output = match term::stdout() {
785 None => Raw(io::stdout()),
786 Some(t) => Pretty(t),
789 let quiet = opts.format == OutputFormat::Terse;
790 let mut st = ConsoleTestState::new(opts)?;
795 for test in filter_tests(&opts, tests) {
799 desc: TestDesc { name, .. },
803 let fntype = match testfn {
804 StaticTestFn(..) | DynTestFn(..) => {
808 StaticBenchFn(..) | DynBenchFn(..) => {
814 writeln!(output, "{}: {}", name, fntype)?;
815 st.write_log(format!("{} {}\n", fntype, name))?;
818 fn plural(count: u32, s: &str) -> String {
820 1 => format!("{} {}", 1, s),
821 n => format!("{} {}s", n, s),
826 if ntest != 0 || nbench != 0 {
827 writeln!(output, "")?;
833 plural(ntest, "test"),
834 plural(nbench, "benchmark")
841 // A simple console test runner
842 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
845 st: &mut ConsoleTestState,
846 out: &mut dyn OutputFormatter,
847 ) -> io::Result<()> {
848 match (*event).clone() {
849 TeFiltered(ref filtered_tests) => {
850 st.total = filtered_tests.len();
851 out.write_run_start(filtered_tests.len())
853 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
854 TeWait(ref test) => out.write_test_start(test),
855 TeTimeout(ref test) => out.write_timeout(test),
856 TeResult(test, result, stdout) => {
857 st.write_log_result(&test, &result)?;
858 out.write_result(&test, &result, &*stdout)?;
862 st.not_failures.push((test, stdout));
864 TrIgnored => st.ignored += 1,
865 TrAllowedFail => st.allowed_fail += 1,
867 st.metrics.insert_metric(
868 test.name.as_slice(),
869 bs.ns_iter_summ.median,
870 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
876 st.failures.push((test, stdout));
878 TrFailedMsg(msg) => {
880 let mut stdout = stdout;
881 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
882 st.failures.push((test, stdout));
890 let output = match term::stdout() {
891 None => Raw(io::stdout()),
892 Some(t) => Pretty(t),
895 let max_name_len = tests
897 .max_by_key(|t| len_if_padded(*t))
898 .map(|t| t.desc.name.as_slice().len())
901 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
903 let mut out: Box<dyn OutputFormatter> = match opts.format {
904 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
910 OutputFormat::Terse => Box::new(TerseFormatter::new(
916 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
918 let mut st = ConsoleTestState::new(opts)?;
919 fn len_if_padded(t: &TestDescAndFn) -> usize {
920 match t.testfn.padding() {
922 PadOnRight => t.desc.name.as_slice().len(),
926 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
928 assert!(st.current_test_count() == st.total);
930 return out.write_run_finish(&st);
934 fn should_sort_failures_before_printing_them() {
935 let test_a = TestDesc {
936 name: StaticTestName("a"),
938 should_panic: ShouldPanic::No,
942 let test_b = TestDesc {
943 name: StaticTestName("b"),
945 should_panic: ShouldPanic::No,
949 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
951 let st = ConsoleTestState {
960 metrics: MetricMap::new(),
961 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
962 options: Options::new(),
963 not_failures: Vec::new(),
966 out.write_failures(&st).unwrap();
967 let s = match out.output_location() {
968 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
969 &Pretty(_) => unreachable!(),
972 let apos = s.find("a").unwrap();
973 let bpos = s.find("b").unwrap();
974 assert!(apos < bpos);
977 fn use_color(opts: &TestOpts) -> bool {
979 AutoColor => !opts.nocapture && stdout_isatty(),
985 #[cfg(any(target_os = "cloudabi", target_os = "redox",
986 all(target_arch = "wasm32", not(target_os = "emscripten"))))]
987 fn stdout_isatty() -> bool {
988 // FIXME: Implement isatty on Redox
992 fn stdout_isatty() -> bool {
993 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
996 fn stdout_isatty() -> bool {
999 type HANDLE = *mut u8;
1000 type LPDWORD = *mut u32;
1001 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1003 fn GetStdHandle(which: DWORD) -> HANDLE;
1004 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1007 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1009 GetConsoleMode(handle, &mut out) != 0
1014 pub enum TestEvent {
1015 TeFiltered(Vec<TestDesc>),
1017 TeResult(TestDesc, TestResult, Vec<u8>),
1018 TeTimeout(TestDesc),
1019 TeFilteredOut(usize),
1022 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1024 struct Sink(Arc<Mutex<Vec<u8>>>);
1025 impl Write for Sink {
1026 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1027 Write::write(&mut *self.0.lock().unwrap(), data)
1029 fn flush(&mut self) -> io::Result<()> {
1034 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1036 F: FnMut(TestEvent) -> io::Result<()>,
1038 use std::collections::HashMap;
1039 use std::sync::mpsc::RecvTimeoutError;
1041 let tests_len = tests.len();
1043 let mut filtered_tests = filter_tests(opts, tests);
1044 if !opts.bench_benchmarks {
1045 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1048 let filtered_tests = {
1049 let mut filtered_tests = filtered_tests;
1050 for test in filtered_tests.iter_mut() {
1051 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1057 let filtered_out = tests_len - filtered_tests.len();
1058 callback(TeFilteredOut(filtered_out))?;
1060 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1062 callback(TeFiltered(filtered_descs))?;
1064 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1065 filtered_tests.into_iter().partition(|e| match e.testfn {
1066 StaticTestFn(_) | DynTestFn(_) => true,
1070 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1072 let mut remaining = filtered_tests;
1073 remaining.reverse();
1074 let mut pending = 0;
1076 let (tx, rx) = channel::<MonitorMsg>();
1078 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1080 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1081 let now = Instant::now();
1082 let timed_out = running_tests
1084 .filter_map(|(desc, timeout)| {
1085 if &now >= timeout {
1092 for test in &timed_out {
1093 running_tests.remove(test);
1098 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1099 running_tests.values().min().map(|next_timeout| {
1100 let now = Instant::now();
1101 if *next_timeout >= now {
1109 if concurrency == 1 {
1110 while !remaining.is_empty() {
1111 let test = remaining.pop().unwrap();
1112 callback(TeWait(test.desc.clone()))?;
1113 run_test(opts, !opts.run_tests, test, tx.clone());
1114 let (test, result, stdout) = rx.recv().unwrap();
1115 callback(TeResult(test, result, stdout))?;
1118 while pending > 0 || !remaining.is_empty() {
1119 while pending < concurrency && !remaining.is_empty() {
1120 let test = remaining.pop().unwrap();
1121 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1122 running_tests.insert(test.desc.clone(), timeout);
1123 callback(TeWait(test.desc.clone()))?; //here no pad
1124 run_test(opts, !opts.run_tests, test, tx.clone());
1130 if let Some(timeout) = calc_timeout(&running_tests) {
1131 res = rx.recv_timeout(timeout);
1132 for test in get_timed_out_tests(&mut running_tests) {
1133 callback(TeTimeout(test))?;
1135 if res != Err(RecvTimeoutError::Timeout) {
1139 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1144 let (desc, result, stdout) = res.unwrap();
1145 running_tests.remove(&desc);
1147 callback(TeResult(desc, result, stdout))?;
1152 if opts.bench_benchmarks {
1153 // All benchmarks run at the end, in serial.
1154 for b in filtered_benchs {
1155 callback(TeWait(b.desc.clone()))?;
1156 run_test(opts, false, b, tx.clone());
1157 let (test, result, stdout) = rx.recv().unwrap();
1158 callback(TeResult(test, result, stdout))?;
1164 #[allow(deprecated)]
1165 fn get_concurrency() -> usize {
1166 return match env::var("RUST_TEST_THREADS") {
1168 let opt_n: Option<usize> = s.parse().ok();
1170 Some(n) if n > 0 => n,
1172 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1177 Err(..) => num_cpus(),
1182 fn num_cpus() -> usize {
1184 struct SYSTEM_INFO {
1185 wProcessorArchitecture: u16,
1188 lpMinimumApplicationAddress: *mut u8,
1189 lpMaximumApplicationAddress: *mut u8,
1190 dwActiveProcessorMask: *mut u8,
1191 dwNumberOfProcessors: u32,
1192 dwProcessorType: u32,
1193 dwAllocationGranularity: u32,
1194 wProcessorLevel: u16,
1195 wProcessorRevision: u16,
1198 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1201 let mut sysinfo = std::mem::zeroed();
1202 GetSystemInfo(&mut sysinfo);
1203 sysinfo.dwNumberOfProcessors as usize
1207 #[cfg(target_os = "redox")]
1208 fn num_cpus() -> usize {
1209 // FIXME: Implement num_cpus on Redox
1213 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
1214 fn num_cpus() -> usize {
1218 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1219 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1220 target_os = "macos", target_os = "solaris"))]
1221 fn num_cpus() -> usize {
1222 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1225 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1226 target_os = "netbsd"))]
1227 fn num_cpus() -> usize {
1230 let mut cpus: libc::c_uint = 0;
1231 let mut cpus_size = std::mem::size_of_val(&cpus);
1234 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1237 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1242 &mut cpus as *mut _ as *mut _,
1243 &mut cpus_size as *mut _ as *mut _,
1255 #[cfg(target_os = "openbsd")]
1256 fn num_cpus() -> usize {
1259 let mut cpus: libc::c_uint = 0;
1260 let mut cpus_size = std::mem::size_of_val(&cpus);
1261 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1267 &mut cpus as *mut _ as *mut _,
1268 &mut cpus_size as *mut _ as *mut _,
1279 #[cfg(target_os = "haiku")]
1280 fn num_cpus() -> usize {
1285 #[cfg(target_os = "l4re")]
1286 fn num_cpus() -> usize {
1292 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1293 let mut filtered = tests;
1294 // Remove tests that don't match the test filter
1295 filtered = match opts.filter {
1297 Some(ref filter) => filtered
1300 if opts.filter_exact {
1301 test.desc.name.as_slice() == &filter[..]
1303 test.desc.name.as_slice().contains(&filter[..])
1309 // Skip tests that match any of the skip filters
1313 !opts.skip.iter().any(|sf| {
1314 if opts.filter_exact {
1315 t.desc.name.as_slice() == &sf[..]
1317 t.desc.name.as_slice().contains(&sf[..])
1323 // Maybe pull out the ignored test and unignore them
1324 filtered = if !opts.run_ignored {
1327 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1328 if test.desc.ignore {
1329 let TestDescAndFn { desc, testfn } = test;
1330 Some(TestDescAndFn {
1341 filtered.into_iter().filter_map(filter).collect()
1344 // Sort the tests alphabetically
1345 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1350 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1351 // convert benchmarks to tests, if we're not benchmarking them
1355 let testfn = match x.testfn {
1356 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1357 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1359 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1360 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1375 test: TestDescAndFn,
1376 monitor_ch: Sender<MonitorMsg>,
1378 let TestDescAndFn { desc, testfn } = test;
1380 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1381 && desc.should_panic != ShouldPanic::No;
1383 if force_ignore || desc.ignore || ignore_because_panic_abort {
1384 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1390 monitor_ch: Sender<MonitorMsg>,
1392 testfn: Box<dyn FnBox() + Send>,
1394 // Buffer for capturing standard I/O
1395 let data = Arc::new(Mutex::new(Vec::new()));
1396 let data2 = data.clone();
1398 let name = desc.name.clone();
1399 let runtest = move || {
1400 let oldio = if !nocapture {
1402 io::set_print(Some(Box::new(Sink(data2.clone())))),
1403 io::set_panic(Some(Box::new(Sink(data2)))),
1409 let result = catch_unwind(AssertUnwindSafe(testfn));
1411 if let Some((printio, panicio)) = oldio {
1412 io::set_print(printio);
1413 io::set_panic(panicio);
1416 let test_result = calc_result(&desc, result);
1417 let stdout = data.lock().unwrap().to_vec();
1419 .send((desc.clone(), test_result, stdout))
1423 // If the platform is single-threaded we're just going to run
1424 // the test synchronously, regardless of the concurrency
1426 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1427 if supports_threads {
1428 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1429 cfg.spawn(runtest).unwrap();
1436 DynBenchFn(bencher) => {
1437 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1438 bencher.run(harness)
1441 StaticBenchFn(benchfn) => {
1442 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1443 (benchfn.clone())(harness)
1447 let cb = move || __rust_begin_short_backtrace(f);
1448 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1450 StaticTestFn(f) => run_test_inner(
1454 Box::new(move || __rust_begin_short_backtrace(f)),
1459 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1461 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1465 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1466 match (&desc.should_panic, task_result) {
1467 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1468 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1469 if err.downcast_ref::<String>()
1471 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1472 .map(|e| e.contains(msg))
1477 if desc.allow_fail {
1480 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1484 _ if desc.allow_fail => TrAllowedFail,
1489 #[derive(Clone, PartialEq)]
1490 pub struct MetricMap(BTreeMap<String, Metric>);
1493 pub fn new() -> MetricMap {
1494 MetricMap(BTreeMap::new())
1497 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1498 /// must be non-negative. The `noise` indicates the uncertainty of the
1499 /// metric, which doubles as the "noise range" of acceptable
1500 /// pairwise-regressions on this named value, when comparing from one
1501 /// metric to the next using `compare_to_old`.
1503 /// If `noise` is positive, then it means this metric is of a value
1504 /// you want to see grow smaller, so a change larger than `noise` in the
1505 /// positive direction represents a regression.
1507 /// If `noise` is negative, then it means this metric is of a value
1508 /// you want to see grow larger, so a change larger than `noise` in the
1509 /// negative direction represents a regression.
1510 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1511 let m = Metric { value, noise };
1512 self.0.insert(name.to_owned(), m);
1515 pub fn fmt_metrics(&self) -> String {
1518 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1519 .collect::<Vec<_>>();
1526 /// A function that is opaque to the optimizer, to allow benchmarks to
1527 /// pretend to use outputs to assist in avoiding dead-code
1530 /// This function is a no-op, and does not even read from `dummy`.
1531 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1532 pub fn black_box<T>(dummy: T) -> T {
1533 // we need to "use" the argument in some way LLVM can't
1535 unsafe { asm!("" : : "r"(&dummy)) }
1538 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1540 pub fn black_box<T>(dummy: T) -> T {
1545 /// Callback for benchmark functions to run in their body.
1546 pub fn iter<T, F>(&mut self, mut inner: F)
1550 if self.mode == BenchMode::Single {
1551 ns_iter_inner(&mut inner, 1);
1555 self.summary = Some(iter(&mut inner));
1558 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1560 F: FnMut(&mut Bencher),
1563 return self.summary;
1567 fn ns_from_dur(dur: Duration) -> u64 {
1568 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1571 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1575 let start = Instant::now();
1579 return ns_from_dur(start.elapsed());
1582 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1586 // Initial bench run to get ballpark figure.
1587 let ns_single = ns_iter_inner(inner, 1);
1589 // Try to estimate iter count for 1ms falling back to 1m
1590 // iterations if first run took < 1ns.
1591 let ns_target_total = 1_000_000; // 1ms
1592 let mut n = ns_target_total / cmp::max(1, ns_single);
1594 // if the first run took more than 1ms we don't want to just
1595 // be left doing 0 iterations on every loop. The unfortunate
1596 // side effect of not being able to do as many runs is
1597 // automatically handled by the statistical analysis below
1598 // (i.e. larger error bars).
1601 let mut total_run = Duration::new(0, 0);
1602 let samples: &mut [f64] = &mut [0.0_f64; 50];
1604 let loop_start = Instant::now();
1606 for p in &mut *samples {
1607 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1610 stats::winsorize(samples, 5.0);
1611 let summ = stats::Summary::new(samples);
1613 for p in &mut *samples {
1614 let ns = ns_iter_inner(inner, 5 * n);
1615 *p = ns as f64 / (5 * n) as f64;
1618 stats::winsorize(samples, 5.0);
1619 let summ5 = stats::Summary::new(samples);
1621 let loop_run = loop_start.elapsed();
1623 // If we've run for 100ms and seem to have converged to a
1625 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1626 && summ.median - summ5.median < summ5.median_abs_dev
1631 total_run = total_run + loop_run;
1632 // Longest we ever run for is 3s.
1633 if total_run > Duration::from_secs(3) {
1637 // If we overflow here just return the results so far. We check a
1638 // multiplier of 10 because we're about to multiply by 2 and the
1639 // next iteration of the loop will also multiply by 5 (to calculate
1640 // the summ5 result)
1641 n = match n.checked_mul(10) {
1651 use std::panic::{catch_unwind, AssertUnwindSafe};
1654 use std::sync::{Arc, Mutex};
1656 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1658 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1660 F: FnMut(&mut Bencher),
1662 let mut bs = Bencher {
1663 mode: BenchMode::Auto,
1668 let data = Arc::new(Mutex::new(Vec::new()));
1669 let data2 = data.clone();
1671 let oldio = if !nocapture {
1673 io::set_print(Some(Box::new(Sink(data2.clone())))),
1674 io::set_panic(Some(Box::new(Sink(data2)))),
1680 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1682 if let Some((printio, panicio)) = oldio {
1683 io::set_print(printio);
1684 io::set_panic(panicio);
1687 let test_result = match result {
1689 Ok(Some(ns_iter_summ)) => {
1690 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1691 let mb_s = bs.bytes * 1000 / ns_iter;
1693 let bs = BenchSamples {
1695 mb_s: mb_s as usize,
1697 TestResult::TrBench(bs)
1700 // iter not called, so no data.
1701 // FIXME: error in this case?
1702 let samples: &mut [f64] = &mut [0.0_f64; 1];
1703 let bs = BenchSamples {
1704 ns_iter_summ: stats::Summary::new(samples),
1707 TestResult::TrBench(bs)
1709 Err(_) => TestResult::TrFailed,
1712 let stdout = data.lock().unwrap().to_vec();
1713 monitor_ch.send((desc, test_result, stdout)).unwrap();
1716 pub fn run_once<F>(f: F)
1718 F: FnMut(&mut Bencher),
1720 let mut bs = Bencher {
1721 mode: BenchMode::Single,
1731 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, ShouldPanic,
1732 StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1734 use std::sync::mpsc::channel;
1739 pub fn do_not_run_ignored_tests() {
1743 let desc = TestDescAndFn {
1745 name: StaticTestName("whatever"),
1747 should_panic: ShouldPanic::No,
1750 testfn: DynTestFn(Box::new(f)),
1752 let (tx, rx) = channel();
1753 run_test(&TestOpts::new(), false, desc, tx);
1754 let (_, res, _) = rx.recv().unwrap();
1755 assert!(res != TrOk);
1759 pub fn ignored_tests_result_in_ignored() {
1761 let desc = TestDescAndFn {
1763 name: StaticTestName("whatever"),
1765 should_panic: ShouldPanic::No,
1768 testfn: DynTestFn(Box::new(f)),
1770 let (tx, rx) = channel();
1771 run_test(&TestOpts::new(), false, desc, tx);
1772 let (_, res, _) = rx.recv().unwrap();
1773 assert!(res == TrIgnored);
1777 fn test_should_panic() {
1781 let desc = TestDescAndFn {
1783 name: StaticTestName("whatever"),
1785 should_panic: ShouldPanic::Yes,
1788 testfn: DynTestFn(Box::new(f)),
1790 let (tx, rx) = channel();
1791 run_test(&TestOpts::new(), false, desc, tx);
1792 let (_, res, _) = rx.recv().unwrap();
1793 assert!(res == TrOk);
1797 fn test_should_panic_good_message() {
1799 panic!("an error message");
1801 let desc = TestDescAndFn {
1803 name: StaticTestName("whatever"),
1805 should_panic: ShouldPanic::YesWithMessage("error message"),
1808 testfn: DynTestFn(Box::new(f)),
1810 let (tx, rx) = channel();
1811 run_test(&TestOpts::new(), false, desc, tx);
1812 let (_, res, _) = rx.recv().unwrap();
1813 assert!(res == TrOk);
1817 fn test_should_panic_bad_message() {
1819 panic!("an error message");
1821 let expected = "foobar";
1822 let failed_msg = "Panic did not include expected string";
1823 let desc = TestDescAndFn {
1825 name: StaticTestName("whatever"),
1827 should_panic: ShouldPanic::YesWithMessage(expected),
1830 testfn: DynTestFn(Box::new(f)),
1832 let (tx, rx) = channel();
1833 run_test(&TestOpts::new(), false, desc, tx);
1834 let (_, res, _) = rx.recv().unwrap();
1835 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1839 fn test_should_panic_but_succeeds() {
1841 let desc = TestDescAndFn {
1843 name: StaticTestName("whatever"),
1845 should_panic: ShouldPanic::Yes,
1848 testfn: DynTestFn(Box::new(f)),
1850 let (tx, rx) = channel();
1851 run_test(&TestOpts::new(), false, desc, tx);
1852 let (_, res, _) = rx.recv().unwrap();
1853 assert!(res == TrFailed);
1857 fn parse_ignored_flag() {
1859 "progname".to_string(),
1860 "filter".to_string(),
1861 "--ignored".to_string(),
1863 let opts = match parse_opts(&args) {
1865 _ => panic!("Malformed arg in parse_ignored_flag"),
1867 assert!((opts.run_ignored));
1871 pub fn filter_for_ignored_option() {
1872 // When we run ignored tests the test filter should filter out all the
1873 // unignored tests and flip the ignore flag on the rest to false
1875 let mut opts = TestOpts::new();
1876 opts.run_tests = true;
1877 opts.run_ignored = true;
1882 name: StaticTestName("1"),
1884 should_panic: ShouldPanic::No,
1887 testfn: DynTestFn(Box::new(move || {})),
1891 name: StaticTestName("2"),
1893 should_panic: ShouldPanic::No,
1896 testfn: DynTestFn(Box::new(move || {})),
1899 let filtered = filter_tests(&opts, tests);
1901 assert_eq!(filtered.len(), 1);
1902 assert_eq!(filtered[0].desc.name.to_string(), "1");
1903 assert!(!filtered[0].desc.ignore);
1907 pub fn exact_filter_match() {
1908 fn tests() -> Vec<TestDescAndFn> {
1909 vec!["base", "base::test", "base::test1", "base::test2"]
1911 .map(|name| TestDescAndFn {
1913 name: StaticTestName(name),
1915 should_panic: ShouldPanic::No,
1918 testfn: DynTestFn(Box::new(move || {})),
1923 let substr = filter_tests(
1925 filter: Some("base".into()),
1930 assert_eq!(substr.len(), 4);
1932 let substr = filter_tests(
1934 filter: Some("bas".into()),
1939 assert_eq!(substr.len(), 4);
1941 let substr = filter_tests(
1943 filter: Some("::test".into()),
1948 assert_eq!(substr.len(), 3);
1950 let substr = filter_tests(
1952 filter: Some("base::test".into()),
1957 assert_eq!(substr.len(), 3);
1959 let exact = filter_tests(
1961 filter: Some("base".into()),
1967 assert_eq!(exact.len(), 1);
1969 let exact = filter_tests(
1971 filter: Some("bas".into()),
1977 assert_eq!(exact.len(), 0);
1979 let exact = filter_tests(
1981 filter: Some("::test".into()),
1987 assert_eq!(exact.len(), 0);
1989 let exact = filter_tests(
1991 filter: Some("base::test".into()),
1997 assert_eq!(exact.len(), 1);
2001 pub fn sort_tests() {
2002 let mut opts = TestOpts::new();
2003 opts.run_tests = true;
2006 "sha1::test".to_string(),
2007 "isize::test_to_str".to_string(),
2008 "isize::test_pow".to_string(),
2009 "test::do_not_run_ignored_tests".to_string(),
2010 "test::ignored_tests_result_in_ignored".to_string(),
2011 "test::first_free_arg_should_be_a_filter".to_string(),
2012 "test::parse_ignored_flag".to_string(),
2013 "test::filter_for_ignored_option".to_string(),
2014 "test::sort_tests".to_string(),
2018 let mut tests = Vec::new();
2019 for name in &names {
2020 let test = TestDescAndFn {
2022 name: DynTestName((*name).clone()),
2024 should_panic: ShouldPanic::No,
2027 testfn: DynTestFn(Box::new(testfn)),
2033 let filtered = filter_tests(&opts, tests);
2035 let expected = vec![
2036 "isize::test_pow".to_string(),
2037 "isize::test_to_str".to_string(),
2038 "sha1::test".to_string(),
2039 "test::do_not_run_ignored_tests".to_string(),
2040 "test::filter_for_ignored_option".to_string(),
2041 "test::first_free_arg_should_be_a_filter".to_string(),
2042 "test::ignored_tests_result_in_ignored".to_string(),
2043 "test::parse_ignored_flag".to_string(),
2044 "test::sort_tests".to_string(),
2047 for (a, b) in expected.iter().zip(filtered) {
2048 assert!(*a == b.desc.name.to_string());
2053 pub fn test_metricmap_compare() {
2054 let mut m1 = MetricMap::new();
2055 let mut m2 = MetricMap::new();
2056 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2057 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2059 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2060 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2062 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2063 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2065 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2066 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2068 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2069 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2071 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2072 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2076 pub fn test_bench_once_no_iter() {
2077 fn f(_: &mut Bencher) {}
2082 pub fn test_bench_once_iter() {
2083 fn f(b: &mut Bencher) {
2090 pub fn test_bench_no_iter() {
2091 fn f(_: &mut Bencher) {}
2093 let (tx, rx) = channel();
2095 let desc = TestDesc {
2096 name: StaticTestName("f"),
2098 should_panic: ShouldPanic::No,
2102 ::bench::benchmark(desc, tx, true, f);
2107 pub fn test_bench_iter() {
2108 fn f(b: &mut Bencher) {
2112 let (tx, rx) = channel();
2114 let desc = TestDesc {
2115 name: StaticTestName("f"),
2117 should_panic: ShouldPanic::No,
2121 ::bench::benchmark(desc, tx, true, f);