1 // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // NB: this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
27 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
28 // cargo) to detect this crate.
30 #![crate_name = "test"]
31 #![unstable(feature = "test", issue = "27812")]
32 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
33 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
34 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
37 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
38 #![cfg_attr(not(stage0), feature(nll))]
39 #![feature(set_stdio)]
40 #![feature(panic_unwind)]
41 #![feature(staged_api)]
42 #![feature(termination_trait_lib)]
45 #[cfg(any(unix, target_os = "cloudabi"))]
47 extern crate panic_unwind;
50 pub use self::TestFn::*;
51 pub use self::ColorConfig::*;
52 pub use self::TestResult::*;
53 pub use self::TestName::*;
54 use self::TestEvent::*;
55 use self::NamePadding::*;
56 use self::OutputLocation::*;
58 use std::panic::{catch_unwind, AssertUnwindSafe};
60 use std::boxed::FnBox;
62 use std::collections::BTreeMap;
66 use std::io::prelude::*;
68 use std::path::PathBuf;
69 use std::process::Termination;
70 use std::sync::mpsc::{channel, Sender};
71 use std::sync::{Arc, Mutex};
73 use std::time::{Duration, Instant};
77 const TEST_WARN_TIMEOUT_S: u64 = 60;
78 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
80 // to be used by rustc to compile tests in libtest
82 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
83 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, ShouldPanic,
84 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
85 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
91 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
93 // The name of a test. By convention this follows the rules for rust
94 // paths; i.e. it should be a series of identifiers separated by double
95 // colons. This way if some test runner wants to arrange the tests
96 // hierarchically it may.
98 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
100 StaticTestName(&'static str),
102 AlignedTestName(Cow<'static, str>, NamePadding),
105 fn as_slice(&self) -> &str {
107 StaticTestName(s) => s,
108 DynTestName(ref s) => s,
109 AlignedTestName(ref s, _) => &*s,
113 fn padding(&self) -> NamePadding {
115 &AlignedTestName(_, p) => p,
120 fn with_padding(&self, padding: NamePadding) -> TestName {
121 let name = match self {
122 &TestName::StaticTestName(name) => Cow::Borrowed(name),
123 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
124 &TestName::AlignedTestName(ref name, _) => name.clone(),
127 TestName::AlignedTestName(name, padding)
130 impl fmt::Display for TestName {
131 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
132 fmt::Display::fmt(self.as_slice(), f)
136 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
137 pub enum NamePadding {
143 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
144 let mut name = String::from(self.name.as_slice());
145 let fill = column_count.saturating_sub(name.len());
146 let pad = " ".repeat(fill);
157 /// Represents a benchmark function.
158 pub trait TDynBenchFn: Send {
159 fn run(&self, harness: &mut Bencher);
162 // A function that runs a test. If the function returns successfully,
163 // the test succeeds; if the function panics then the test fails. We
164 // may need to come up with a more clever definition of test in order
165 // to support isolation of tests into threads.
168 StaticBenchFn(fn(&mut Bencher)),
169 DynTestFn(Box<dyn FnBox() + Send>),
170 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
174 fn padding(&self) -> NamePadding {
176 StaticTestFn(..) => PadNone,
177 StaticBenchFn(..) => PadOnRight,
178 DynTestFn(..) => PadNone,
179 DynBenchFn(..) => PadOnRight,
184 impl fmt::Debug for TestFn {
185 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
186 f.write_str(match *self {
187 StaticTestFn(..) => "StaticTestFn(..)",
188 StaticBenchFn(..) => "StaticBenchFn(..)",
189 DynTestFn(..) => "DynTestFn(..)",
190 DynBenchFn(..) => "DynBenchFn(..)",
195 /// Manager of the benchmarking runs.
197 /// This is fed into functions marked with `#[bench]` to allow for
198 /// set-up & tear-down before running a piece of code repeatedly via a
203 summary: Option<stats::Summary>,
207 #[derive(Clone, PartialEq, Eq)]
213 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
214 pub enum ShouldPanic {
217 YesWithMessage(&'static str),
220 // The definition of a single test. A test runner will run a list of
222 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
223 pub struct TestDesc {
226 pub should_panic: ShouldPanic,
227 pub allow_fail: bool,
231 pub struct TestDescAndFn {
236 #[derive(Clone, PartialEq, Debug, Copy)]
243 pub fn new(value: f64, noise: f64) -> Metric {
244 Metric { value, noise }
248 /// In case we want to add other options as well, just add them in this struct.
249 #[derive(Copy, Clone, Debug)]
251 display_output: bool,
255 pub fn new() -> Options {
257 display_output: false,
261 pub fn display_output(mut self, display_output: bool) -> Options {
262 self.display_output = display_output;
267 // The default console test runner. It accepts the command line
268 // arguments and a vector of test_descs.
269 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
270 let mut opts = match parse_opts(args) {
273 eprintln!("error: {}", msg);
279 opts.options = options;
281 if let Err(e) = list_tests_console(&opts, tests) {
282 eprintln!("error: io error when listing tests: {:?}", e);
286 match run_tests_console(&opts, tests) {
288 Ok(false) => process::exit(101),
290 eprintln!("error: io error when listing tests: {:?}", e);
297 // A variant optimized for invocation with a static test vector.
298 // This will panic (intentionally) when fed any dynamic tests, because
299 // it is copying the static values out into a dynamic vector and cannot
300 // copy dynamic values. It is doing this because from this point on
301 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
302 // semantics into parallel test runners, which in turn requires a Vec<>
303 // rather than a &[].
304 pub fn test_main_static(tests: &[TestDescAndFn]) {
305 let args = env::args().collect::<Vec<_>>();
306 let owned_tests = tests
308 .map(|t| match t.testfn {
309 StaticTestFn(f) => TestDescAndFn {
310 testfn: StaticTestFn(f),
311 desc: t.desc.clone(),
313 StaticBenchFn(f) => TestDescAndFn {
314 testfn: StaticBenchFn(f),
315 desc: t.desc.clone(),
317 _ => panic!("non-static tests passed to test::test_main_static"),
320 test_main(&args, owned_tests, Options::new())
323 /// Invoked when unit tests terminate. Should panic if the unit
324 /// test is considered a failure. By default, invokes `report()`
325 /// and checks for a `0` result.
326 pub fn assert_test_result<T: Termination>(result: T) {
327 assert_eq!(result.report(), 0);
330 #[derive(Copy, Clone, Debug)]
331 pub enum ColorConfig {
337 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
338 pub enum OutputFormat {
345 pub struct TestOpts {
347 pub filter: Option<String>,
348 pub filter_exact: bool,
349 pub run_ignored: bool,
351 pub bench_benchmarks: bool,
352 pub logfile: Option<PathBuf>,
354 pub color: ColorConfig,
355 pub format: OutputFormat,
356 pub test_threads: Option<usize>,
357 pub skip: Vec<String>,
358 pub options: Options,
363 fn new() -> TestOpts {
370 bench_benchmarks: false,
374 format: OutputFormat::Pretty,
377 options: Options::new(),
382 /// Result of parsing the options.
383 pub type OptRes = Result<TestOpts, String>;
385 fn optgroups() -> getopts::Options {
386 let mut opts = getopts::Options::new();
387 opts.optflag("", "ignored", "Run ignored tests")
388 .optflag("", "test", "Run tests and not benchmarks")
389 .optflag("", "bench", "Run benchmarks instead of tests")
390 .optflag("", "list", "List all tests and benchmarks")
391 .optflag("h", "help", "Display this message (longer with --help)")
395 "Write logs to the specified file instead \
402 "don't capture stdout/stderr of each \
403 task, allow printing directly",
408 "Number of threads used for running tests \
415 "Skip tests whose names contain FILTER (this flag can \
416 be used multiple times)",
422 "Display one character per test instead of one line. \
423 Alias to --format=terse",
428 "Exactly match filters rather than by substring",
433 "Configure coloring of output:
434 auto = colorize if stdout is a tty and tests are run on serially (default);
435 always = always colorize output;
436 never = never colorize output;",
442 "Configure formatting of output:
443 pretty = Print verbose output;
444 terse = Display one character per test;
445 json = Output a json document",
451 "Enable nightly-only flags:
452 unstable-options = Allow use of experimental features",
458 fn usage(binary: &str, options: &getopts::Options) {
459 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
463 The FILTER string is tested against the name of all tests, and only those
464 tests whose names contain the filter are run.
466 By default, all tests are run in parallel. This can be altered with the
467 --test-threads flag or the RUST_TEST_THREADS environment variable when running
470 All tests have their standard output and standard error captured by default.
471 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
472 environment variable to a value other than "0". Logging is not captured by default.
476 #[test] - Indicates a function is a test to be run. This function
478 #[bench] - Indicates a function is a benchmark to be run. This
479 function takes one argument (test::Bencher).
480 #[should_panic] - This function (also labeled with #[test]) will only pass if
481 the code causes a panic (an assertion failure or panic!)
482 A message may be provided, which the failure string must
483 contain: #[should_panic(expected = "foo")].
484 #[ignore] - When applied to a function which is already attributed as a
485 test, then the test runner will ignore these tests during
486 normal test runs. Running with --ignored will run these
488 usage = options.usage(&message)
492 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
493 fn is_nightly() -> bool {
494 // Whether this is a feature-staged build, i.e. on the beta or stable channel
495 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
496 // Whether we should enable unstable features for bootstrapping
497 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
499 bootstrap || !disable_unstable_features
502 // Parses command line arguments into test options
503 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
504 let mut allow_unstable = false;
505 let opts = optgroups();
506 let args = args.get(1..).unwrap_or(args);
507 let matches = match opts.parse(args) {
509 Err(f) => return Some(Err(f.to_string())),
512 if let Some(opt) = matches.opt_str("Z") {
515 "the option `Z` is only accepted on the nightly compiler".into(),
520 "unstable-options" => {
521 allow_unstable = true;
524 return Some(Err("Unrecognized option to `Z`".into()));
529 if matches.opt_present("h") {
530 usage(&args[0], &opts);
534 let filter = if !matches.free.is_empty() {
535 Some(matches.free[0].clone())
540 let run_ignored = matches.opt_present("ignored");
541 let quiet = matches.opt_present("quiet");
542 let exact = matches.opt_present("exact");
543 let list = matches.opt_present("list");
545 let logfile = matches.opt_str("logfile");
546 let logfile = logfile.map(|s| PathBuf::from(&s));
548 let bench_benchmarks = matches.opt_present("bench");
549 let run_tests = !bench_benchmarks || matches.opt_present("test");
551 let mut nocapture = matches.opt_present("nocapture");
553 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
554 Ok(val) => &val != "0",
559 let test_threads = match matches.opt_str("test-threads") {
560 Some(n_str) => match n_str.parse::<usize>() {
561 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
564 return Some(Err(format!(
565 "argument for --test-threads must be a number > 0 \
574 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
575 Some("auto") | None => AutoColor,
576 Some("always") => AlwaysColor,
577 Some("never") => NeverColor,
580 return Some(Err(format!(
581 "argument for --color must be auto, always, or never (was \
588 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
589 None if quiet => OutputFormat::Terse,
590 Some("pretty") | None => OutputFormat::Pretty,
591 Some("terse") => OutputFormat::Terse,
595 "The \"json\" format is only accepted on the nightly compiler".into(),
602 return Some(Err(format!(
603 "argument for --format must be pretty, terse, or json (was \
610 let test_opts = TestOpts {
622 skip: matches.opt_strs("skip"),
623 options: Options::new(),
629 #[derive(Clone, PartialEq)]
630 pub struct BenchSamples {
631 ns_iter_summ: stats::Summary,
635 #[derive(Clone, PartialEq)]
636 pub enum TestResult {
642 TrBench(BenchSamples),
645 unsafe impl Send for TestResult {}
647 enum OutputLocation<T> {
648 Pretty(Box<term::StdoutTerminal>),
652 impl<T: Write> Write for OutputLocation<T> {
653 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
655 Pretty(ref mut term) => term.write(buf),
656 Raw(ref mut stdout) => stdout.write(buf),
660 fn flush(&mut self) -> io::Result<()> {
662 Pretty(ref mut term) => term.flush(),
663 Raw(ref mut stdout) => stdout.flush(),
668 struct ConsoleTestState {
669 log_out: Option<File>,
678 failures: Vec<(TestDesc, Vec<u8>)>,
679 not_failures: Vec<(TestDesc, Vec<u8>)>,
683 impl ConsoleTestState {
684 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
685 let log_out = match opts.logfile {
686 Some(ref path) => Some(File::create(path)?),
690 Ok(ConsoleTestState {
699 metrics: MetricMap::new(),
700 failures: Vec::new(),
701 not_failures: Vec::new(),
702 options: opts.options,
706 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
707 let msg = msg.as_ref();
710 Some(ref mut o) => o.write_all(msg.as_bytes()),
714 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
715 self.write_log(format!(
718 TrOk => "ok".to_owned(),
719 TrFailed => "failed".to_owned(),
720 TrFailedMsg(ref msg) => format!("failed: {}", msg),
721 TrIgnored => "ignored".to_owned(),
722 TrAllowedFail => "failed (allowed)".to_owned(),
723 TrBench(ref bs) => fmt_bench_samples(bs),
729 fn current_test_count(&self) -> usize {
730 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
734 // Format a number with thousands separators
735 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
737 let mut output = String::new();
738 let mut trailing = false;
739 for &pow in &[9, 6, 3, 0] {
740 let base = 10_usize.pow(pow);
741 if pow == 0 || trailing || n / base != 0 {
743 output.write_fmt(format_args!("{}", n / base)).unwrap();
745 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
758 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
760 let mut output = String::new();
762 let median = bs.ns_iter_summ.median as usize;
763 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
766 .write_fmt(format_args!(
767 "{:>11} ns/iter (+/- {})",
768 fmt_thousands_sep(median, ','),
769 fmt_thousands_sep(deviation, ',')
774 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
780 // List the tests to console, and optionally to logfile. Filters are honored.
781 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
782 let mut output = match term::stdout() {
783 None => Raw(io::stdout()),
784 Some(t) => Pretty(t),
787 let quiet = opts.format == OutputFormat::Terse;
788 let mut st = ConsoleTestState::new(opts)?;
793 for test in filter_tests(&opts, tests) {
797 desc: TestDesc { name, .. },
801 let fntype = match testfn {
802 StaticTestFn(..) | DynTestFn(..) => {
806 StaticBenchFn(..) | DynBenchFn(..) => {
812 writeln!(output, "{}: {}", name, fntype)?;
813 st.write_log(format!("{} {}\n", fntype, name))?;
816 fn plural(count: u32, s: &str) -> String {
818 1 => format!("{} {}", 1, s),
819 n => format!("{} {}s", n, s),
824 if ntest != 0 || nbench != 0 {
825 writeln!(output, "")?;
831 plural(ntest, "test"),
832 plural(nbench, "benchmark")
839 // A simple console test runner
840 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
843 st: &mut ConsoleTestState,
844 out: &mut dyn OutputFormatter,
845 ) -> io::Result<()> {
846 match (*event).clone() {
847 TeFiltered(ref filtered_tests) => {
848 st.total = filtered_tests.len();
849 out.write_run_start(filtered_tests.len())
851 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
852 TeWait(ref test) => out.write_test_start(test),
853 TeTimeout(ref test) => out.write_timeout(test),
854 TeResult(test, result, stdout) => {
855 st.write_log_result(&test, &result)?;
856 out.write_result(&test, &result, &*stdout)?;
860 st.not_failures.push((test, stdout));
862 TrIgnored => st.ignored += 1,
863 TrAllowedFail => st.allowed_fail += 1,
865 st.metrics.insert_metric(
866 test.name.as_slice(),
867 bs.ns_iter_summ.median,
868 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
874 st.failures.push((test, stdout));
876 TrFailedMsg(msg) => {
878 let mut stdout = stdout;
879 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
880 st.failures.push((test, stdout));
888 let output = match term::stdout() {
889 None => Raw(io::stdout()),
890 Some(t) => Pretty(t),
893 let max_name_len = tests
895 .max_by_key(|t| len_if_padded(*t))
896 .map(|t| t.desc.name.as_slice().len())
899 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
901 let mut out: Box<dyn OutputFormatter> = match opts.format {
902 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
908 OutputFormat::Terse => Box::new(TerseFormatter::new(
914 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
916 let mut st = ConsoleTestState::new(opts)?;
917 fn len_if_padded(t: &TestDescAndFn) -> usize {
918 match t.testfn.padding() {
920 PadOnRight => t.desc.name.as_slice().len(),
924 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
926 assert!(st.current_test_count() == st.total);
928 return out.write_run_finish(&st);
932 fn should_sort_failures_before_printing_them() {
933 let test_a = TestDesc {
934 name: StaticTestName("a"),
936 should_panic: ShouldPanic::No,
940 let test_b = TestDesc {
941 name: StaticTestName("b"),
943 should_panic: ShouldPanic::No,
947 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
949 let st = ConsoleTestState {
958 metrics: MetricMap::new(),
959 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
960 options: Options::new(),
961 not_failures: Vec::new(),
964 out.write_failures(&st).unwrap();
965 let s = match out.output_location() {
966 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
967 &Pretty(_) => unreachable!(),
970 let apos = s.find("a").unwrap();
971 let bpos = s.find("b").unwrap();
972 assert!(apos < bpos);
975 fn use_color(opts: &TestOpts) -> bool {
977 AutoColor => !opts.nocapture && stdout_isatty(),
983 #[cfg(any(target_os = "cloudabi", target_os = "redox",
984 all(target_arch = "wasm32", not(target_os = "emscripten"))))]
985 fn stdout_isatty() -> bool {
986 // FIXME: Implement isatty on Redox
990 fn stdout_isatty() -> bool {
991 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
994 fn stdout_isatty() -> bool {
997 type HANDLE = *mut u8;
998 type LPDWORD = *mut u32;
999 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1001 fn GetStdHandle(which: DWORD) -> HANDLE;
1002 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1005 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1007 GetConsoleMode(handle, &mut out) != 0
1012 pub enum TestEvent {
1013 TeFiltered(Vec<TestDesc>),
1015 TeResult(TestDesc, TestResult, Vec<u8>),
1016 TeTimeout(TestDesc),
1017 TeFilteredOut(usize),
1020 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1022 struct Sink(Arc<Mutex<Vec<u8>>>);
1023 impl Write for Sink {
1024 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1025 Write::write(&mut *self.0.lock().unwrap(), data)
1027 fn flush(&mut self) -> io::Result<()> {
1032 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1034 F: FnMut(TestEvent) -> io::Result<()>,
1036 use std::collections::HashMap;
1037 use std::sync::mpsc::RecvTimeoutError;
1039 let tests_len = tests.len();
1041 let mut filtered_tests = filter_tests(opts, tests);
1042 if !opts.bench_benchmarks {
1043 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1046 let filtered_tests = {
1047 let mut filtered_tests = filtered_tests;
1048 for test in filtered_tests.iter_mut() {
1049 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1055 let filtered_out = tests_len - filtered_tests.len();
1056 callback(TeFilteredOut(filtered_out))?;
1058 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1060 callback(TeFiltered(filtered_descs))?;
1062 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1063 filtered_tests.into_iter().partition(|e| match e.testfn {
1064 StaticTestFn(_) | DynTestFn(_) => true,
1068 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1070 let mut remaining = filtered_tests;
1071 remaining.reverse();
1072 let mut pending = 0;
1074 let (tx, rx) = channel::<MonitorMsg>();
1076 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1078 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1079 let now = Instant::now();
1080 let timed_out = running_tests
1082 .filter_map(|(desc, timeout)| {
1083 if &now >= timeout {
1090 for test in &timed_out {
1091 running_tests.remove(test);
1096 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1097 running_tests.values().min().map(|next_timeout| {
1098 let now = Instant::now();
1099 if *next_timeout >= now {
1107 if concurrency == 1 {
1108 while !remaining.is_empty() {
1109 let test = remaining.pop().unwrap();
1110 callback(TeWait(test.desc.clone()))?;
1111 run_test(opts, !opts.run_tests, test, tx.clone());
1112 let (test, result, stdout) = rx.recv().unwrap();
1113 callback(TeResult(test, result, stdout))?;
1116 while pending > 0 || !remaining.is_empty() {
1117 while pending < concurrency && !remaining.is_empty() {
1118 let test = remaining.pop().unwrap();
1119 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1120 running_tests.insert(test.desc.clone(), timeout);
1121 callback(TeWait(test.desc.clone()))?; //here no pad
1122 run_test(opts, !opts.run_tests, test, tx.clone());
1128 if let Some(timeout) = calc_timeout(&running_tests) {
1129 res = rx.recv_timeout(timeout);
1130 for test in get_timed_out_tests(&mut running_tests) {
1131 callback(TeTimeout(test))?;
1133 if res != Err(RecvTimeoutError::Timeout) {
1137 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1142 let (desc, result, stdout) = res.unwrap();
1143 running_tests.remove(&desc);
1145 callback(TeResult(desc, result, stdout))?;
1150 if opts.bench_benchmarks {
1151 // All benchmarks run at the end, in serial.
1152 for b in filtered_benchs {
1153 callback(TeWait(b.desc.clone()))?;
1154 run_test(opts, false, b, tx.clone());
1155 let (test, result, stdout) = rx.recv().unwrap();
1156 callback(TeResult(test, result, stdout))?;
1162 #[allow(deprecated)]
1163 fn get_concurrency() -> usize {
1164 return match env::var("RUST_TEST_THREADS") {
1166 let opt_n: Option<usize> = s.parse().ok();
1168 Some(n) if n > 0 => n,
1170 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1175 Err(..) => num_cpus(),
1180 fn num_cpus() -> usize {
1182 struct SYSTEM_INFO {
1183 wProcessorArchitecture: u16,
1186 lpMinimumApplicationAddress: *mut u8,
1187 lpMaximumApplicationAddress: *mut u8,
1188 dwActiveProcessorMask: *mut u8,
1189 dwNumberOfProcessors: u32,
1190 dwProcessorType: u32,
1191 dwAllocationGranularity: u32,
1192 wProcessorLevel: u16,
1193 wProcessorRevision: u16,
1196 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1199 let mut sysinfo = std::mem::zeroed();
1200 GetSystemInfo(&mut sysinfo);
1201 sysinfo.dwNumberOfProcessors as usize
1205 #[cfg(target_os = "redox")]
1206 fn num_cpus() -> usize {
1207 // FIXME: Implement num_cpus on Redox
1211 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
1212 fn num_cpus() -> usize {
1216 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1217 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1218 target_os = "macos", target_os = "solaris"))]
1219 fn num_cpus() -> usize {
1220 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1223 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1224 target_os = "netbsd"))]
1225 fn num_cpus() -> usize {
1228 let mut cpus: libc::c_uint = 0;
1229 let mut cpus_size = std::mem::size_of_val(&cpus);
1232 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1235 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1240 &mut cpus as *mut _ as *mut _,
1241 &mut cpus_size as *mut _ as *mut _,
1253 #[cfg(target_os = "openbsd")]
1254 fn num_cpus() -> usize {
1257 let mut cpus: libc::c_uint = 0;
1258 let mut cpus_size = std::mem::size_of_val(&cpus);
1259 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1265 &mut cpus as *mut _ as *mut _,
1266 &mut cpus_size as *mut _ as *mut _,
1277 #[cfg(target_os = "haiku")]
1278 fn num_cpus() -> usize {
1283 #[cfg(target_os = "l4re")]
1284 fn num_cpus() -> usize {
1290 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1291 let mut filtered = tests;
1292 // Remove tests that don't match the test filter
1293 filtered = match opts.filter {
1295 Some(ref filter) => filtered
1298 if opts.filter_exact {
1299 test.desc.name.as_slice() == &filter[..]
1301 test.desc.name.as_slice().contains(&filter[..])
1307 // Skip tests that match any of the skip filters
1311 !opts.skip.iter().any(|sf| {
1312 if opts.filter_exact {
1313 t.desc.name.as_slice() == &sf[..]
1315 t.desc.name.as_slice().contains(&sf[..])
1321 // Maybe pull out the ignored test and unignore them
1322 filtered = if !opts.run_ignored {
1325 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1326 if test.desc.ignore {
1327 let TestDescAndFn { desc, testfn } = test;
1328 Some(TestDescAndFn {
1339 filtered.into_iter().filter_map(filter).collect()
1342 // Sort the tests alphabetically
1343 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1348 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1349 // convert benchmarks to tests, if we're not benchmarking them
1353 let testfn = match x.testfn {
1354 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1355 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1357 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1358 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1373 test: TestDescAndFn,
1374 monitor_ch: Sender<MonitorMsg>,
1376 let TestDescAndFn { desc, testfn } = test;
1378 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1379 && desc.should_panic != ShouldPanic::No;
1381 if force_ignore || desc.ignore || ignore_because_panic_abort {
1382 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1388 monitor_ch: Sender<MonitorMsg>,
1390 testfn: Box<dyn FnBox() + Send>,
1392 // Buffer for capturing standard I/O
1393 let data = Arc::new(Mutex::new(Vec::new()));
1394 let data2 = data.clone();
1396 let name = desc.name.clone();
1397 let runtest = move || {
1398 let oldio = if !nocapture {
1400 io::set_print(Some(Box::new(Sink(data2.clone())))),
1401 io::set_panic(Some(Box::new(Sink(data2)))),
1407 let result = catch_unwind(AssertUnwindSafe(testfn));
1409 if let Some((printio, panicio)) = oldio {
1410 io::set_print(printio);
1411 io::set_panic(panicio);
1414 let test_result = calc_result(&desc, result);
1415 let stdout = data.lock().unwrap().to_vec();
1417 .send((desc.clone(), test_result, stdout))
1421 // If the platform is single-threaded we're just going to run
1422 // the test synchronously, regardless of the concurrency
1424 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1425 if supports_threads {
1426 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1427 cfg.spawn(runtest).unwrap();
1434 DynBenchFn(bencher) => {
1435 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1436 bencher.run(harness)
1439 StaticBenchFn(benchfn) => {
1440 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1441 (benchfn.clone())(harness)
1445 let cb = move || __rust_begin_short_backtrace(f);
1446 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1448 StaticTestFn(f) => run_test_inner(
1452 Box::new(move || __rust_begin_short_backtrace(f)),
1457 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1459 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1463 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1464 match (&desc.should_panic, task_result) {
1465 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1466 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1467 if err.downcast_ref::<String>()
1469 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1470 .map(|e| e.contains(msg))
1475 if desc.allow_fail {
1478 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1482 _ if desc.allow_fail => TrAllowedFail,
1487 #[derive(Clone, PartialEq)]
1488 pub struct MetricMap(BTreeMap<String, Metric>);
1491 pub fn new() -> MetricMap {
1492 MetricMap(BTreeMap::new())
1495 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1496 /// must be non-negative. The `noise` indicates the uncertainty of the
1497 /// metric, which doubles as the "noise range" of acceptable
1498 /// pairwise-regressions on this named value, when comparing from one
1499 /// metric to the next using `compare_to_old`.
1501 /// If `noise` is positive, then it means this metric is of a value
1502 /// you want to see grow smaller, so a change larger than `noise` in the
1503 /// positive direction represents a regression.
1505 /// If `noise` is negative, then it means this metric is of a value
1506 /// you want to see grow larger, so a change larger than `noise` in the
1507 /// negative direction represents a regression.
1508 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1509 let m = Metric { value, noise };
1510 self.0.insert(name.to_owned(), m);
1513 pub fn fmt_metrics(&self) -> String {
1516 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1517 .collect::<Vec<_>>();
1524 /// A function that is opaque to the optimizer, to allow benchmarks to
1525 /// pretend to use outputs to assist in avoiding dead-code
1528 /// This function is a no-op, and does not even read from `dummy`.
1529 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1530 pub fn black_box<T>(dummy: T) -> T {
1531 // we need to "use" the argument in some way LLVM can't
1533 unsafe { asm!("" : : "r"(&dummy)) }
1536 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1538 pub fn black_box<T>(dummy: T) -> T {
1543 /// Callback for benchmark functions to run in their body.
1544 pub fn iter<T, F>(&mut self, mut inner: F)
1548 if self.mode == BenchMode::Single {
1549 ns_iter_inner(&mut inner, 1);
1553 self.summary = Some(iter(&mut inner));
1556 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1558 F: FnMut(&mut Bencher),
1561 return self.summary;
1565 fn ns_from_dur(dur: Duration) -> u64 {
1566 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1569 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1573 let start = Instant::now();
1577 return ns_from_dur(start.elapsed());
1580 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1584 // Initial bench run to get ballpark figure.
1585 let ns_single = ns_iter_inner(inner, 1);
1587 // Try to estimate iter count for 1ms falling back to 1m
1588 // iterations if first run took < 1ns.
1589 let ns_target_total = 1_000_000; // 1ms
1590 let mut n = ns_target_total / cmp::max(1, ns_single);
1592 // if the first run took more than 1ms we don't want to just
1593 // be left doing 0 iterations on every loop. The unfortunate
1594 // side effect of not being able to do as many runs is
1595 // automatically handled by the statistical analysis below
1596 // (i.e. larger error bars).
1599 let mut total_run = Duration::new(0, 0);
1600 let samples: &mut [f64] = &mut [0.0_f64; 50];
1602 let loop_start = Instant::now();
1604 for p in &mut *samples {
1605 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1608 stats::winsorize(samples, 5.0);
1609 let summ = stats::Summary::new(samples);
1611 for p in &mut *samples {
1612 let ns = ns_iter_inner(inner, 5 * n);
1613 *p = ns as f64 / (5 * n) as f64;
1616 stats::winsorize(samples, 5.0);
1617 let summ5 = stats::Summary::new(samples);
1619 let loop_run = loop_start.elapsed();
1621 // If we've run for 100ms and seem to have converged to a
1623 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1624 && summ.median - summ5.median < summ5.median_abs_dev
1629 total_run = total_run + loop_run;
1630 // Longest we ever run for is 3s.
1631 if total_run > Duration::from_secs(3) {
1635 // If we overflow here just return the results so far. We check a
1636 // multiplier of 10 because we're about to multiply by 2 and the
1637 // next iteration of the loop will also multiply by 5 (to calculate
1638 // the summ5 result)
1639 n = match n.checked_mul(10) {
1649 use std::panic::{catch_unwind, AssertUnwindSafe};
1652 use std::sync::{Arc, Mutex};
1654 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1656 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1658 F: FnMut(&mut Bencher),
1660 let mut bs = Bencher {
1661 mode: BenchMode::Auto,
1666 let data = Arc::new(Mutex::new(Vec::new()));
1667 let data2 = data.clone();
1669 let oldio = if !nocapture {
1671 io::set_print(Some(Box::new(Sink(data2.clone())))),
1672 io::set_panic(Some(Box::new(Sink(data2)))),
1678 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1680 if let Some((printio, panicio)) = oldio {
1681 io::set_print(printio);
1682 io::set_panic(panicio);
1685 let test_result = match result {
1687 Ok(Some(ns_iter_summ)) => {
1688 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1689 let mb_s = bs.bytes * 1000 / ns_iter;
1691 let bs = BenchSamples {
1693 mb_s: mb_s as usize,
1695 TestResult::TrBench(bs)
1698 // iter not called, so no data.
1699 // FIXME: error in this case?
1700 let samples: &mut [f64] = &mut [0.0_f64; 1];
1701 let bs = BenchSamples {
1702 ns_iter_summ: stats::Summary::new(samples),
1705 TestResult::TrBench(bs)
1707 Err(_) => TestResult::TrFailed,
1710 let stdout = data.lock().unwrap().to_vec();
1711 monitor_ch.send((desc, test_result, stdout)).unwrap();
1714 pub fn run_once<F>(f: F)
1716 F: FnMut(&mut Bencher),
1718 let mut bs = Bencher {
1719 mode: BenchMode::Single,
1729 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, ShouldPanic,
1730 StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1732 use std::sync::mpsc::channel;
1737 pub fn do_not_run_ignored_tests() {
1741 let desc = TestDescAndFn {
1743 name: StaticTestName("whatever"),
1745 should_panic: ShouldPanic::No,
1748 testfn: DynTestFn(Box::new(f)),
1750 let (tx, rx) = channel();
1751 run_test(&TestOpts::new(), false, desc, tx);
1752 let (_, res, _) = rx.recv().unwrap();
1753 assert!(res != TrOk);
1757 pub fn ignored_tests_result_in_ignored() {
1759 let desc = TestDescAndFn {
1761 name: StaticTestName("whatever"),
1763 should_panic: ShouldPanic::No,
1766 testfn: DynTestFn(Box::new(f)),
1768 let (tx, rx) = channel();
1769 run_test(&TestOpts::new(), false, desc, tx);
1770 let (_, res, _) = rx.recv().unwrap();
1771 assert!(res == TrIgnored);
1775 fn test_should_panic() {
1779 let desc = TestDescAndFn {
1781 name: StaticTestName("whatever"),
1783 should_panic: ShouldPanic::Yes,
1786 testfn: DynTestFn(Box::new(f)),
1788 let (tx, rx) = channel();
1789 run_test(&TestOpts::new(), false, desc, tx);
1790 let (_, res, _) = rx.recv().unwrap();
1791 assert!(res == TrOk);
1795 fn test_should_panic_good_message() {
1797 panic!("an error message");
1799 let desc = TestDescAndFn {
1801 name: StaticTestName("whatever"),
1803 should_panic: ShouldPanic::YesWithMessage("error message"),
1806 testfn: DynTestFn(Box::new(f)),
1808 let (tx, rx) = channel();
1809 run_test(&TestOpts::new(), false, desc, tx);
1810 let (_, res, _) = rx.recv().unwrap();
1811 assert!(res == TrOk);
1815 fn test_should_panic_bad_message() {
1817 panic!("an error message");
1819 let expected = "foobar";
1820 let failed_msg = "Panic did not include expected string";
1821 let desc = TestDescAndFn {
1823 name: StaticTestName("whatever"),
1825 should_panic: ShouldPanic::YesWithMessage(expected),
1828 testfn: DynTestFn(Box::new(f)),
1830 let (tx, rx) = channel();
1831 run_test(&TestOpts::new(), false, desc, tx);
1832 let (_, res, _) = rx.recv().unwrap();
1833 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1837 fn test_should_panic_but_succeeds() {
1839 let desc = TestDescAndFn {
1841 name: StaticTestName("whatever"),
1843 should_panic: ShouldPanic::Yes,
1846 testfn: DynTestFn(Box::new(f)),
1848 let (tx, rx) = channel();
1849 run_test(&TestOpts::new(), false, desc, tx);
1850 let (_, res, _) = rx.recv().unwrap();
1851 assert!(res == TrFailed);
1855 fn parse_ignored_flag() {
1857 "progname".to_string(),
1858 "filter".to_string(),
1859 "--ignored".to_string(),
1861 let opts = match parse_opts(&args) {
1863 _ => panic!("Malformed arg in parse_ignored_flag"),
1865 assert!((opts.run_ignored));
1869 pub fn filter_for_ignored_option() {
1870 // When we run ignored tests the test filter should filter out all the
1871 // unignored tests and flip the ignore flag on the rest to false
1873 let mut opts = TestOpts::new();
1874 opts.run_tests = true;
1875 opts.run_ignored = true;
1880 name: StaticTestName("1"),
1882 should_panic: ShouldPanic::No,
1885 testfn: DynTestFn(Box::new(move || {})),
1889 name: StaticTestName("2"),
1891 should_panic: ShouldPanic::No,
1894 testfn: DynTestFn(Box::new(move || {})),
1897 let filtered = filter_tests(&opts, tests);
1899 assert_eq!(filtered.len(), 1);
1900 assert_eq!(filtered[0].desc.name.to_string(), "1");
1901 assert!(!filtered[0].desc.ignore);
1905 pub fn exact_filter_match() {
1906 fn tests() -> Vec<TestDescAndFn> {
1907 vec!["base", "base::test", "base::test1", "base::test2"]
1909 .map(|name| TestDescAndFn {
1911 name: StaticTestName(name),
1913 should_panic: ShouldPanic::No,
1916 testfn: DynTestFn(Box::new(move || {})),
1921 let substr = filter_tests(
1923 filter: Some("base".into()),
1928 assert_eq!(substr.len(), 4);
1930 let substr = filter_tests(
1932 filter: Some("bas".into()),
1937 assert_eq!(substr.len(), 4);
1939 let substr = filter_tests(
1941 filter: Some("::test".into()),
1946 assert_eq!(substr.len(), 3);
1948 let substr = filter_tests(
1950 filter: Some("base::test".into()),
1955 assert_eq!(substr.len(), 3);
1957 let exact = filter_tests(
1959 filter: Some("base".into()),
1965 assert_eq!(exact.len(), 1);
1967 let exact = filter_tests(
1969 filter: Some("bas".into()),
1975 assert_eq!(exact.len(), 0);
1977 let exact = filter_tests(
1979 filter: Some("::test".into()),
1985 assert_eq!(exact.len(), 0);
1987 let exact = filter_tests(
1989 filter: Some("base::test".into()),
1995 assert_eq!(exact.len(), 1);
1999 pub fn sort_tests() {
2000 let mut opts = TestOpts::new();
2001 opts.run_tests = true;
2004 "sha1::test".to_string(),
2005 "isize::test_to_str".to_string(),
2006 "isize::test_pow".to_string(),
2007 "test::do_not_run_ignored_tests".to_string(),
2008 "test::ignored_tests_result_in_ignored".to_string(),
2009 "test::first_free_arg_should_be_a_filter".to_string(),
2010 "test::parse_ignored_flag".to_string(),
2011 "test::filter_for_ignored_option".to_string(),
2012 "test::sort_tests".to_string(),
2016 let mut tests = Vec::new();
2017 for name in &names {
2018 let test = TestDescAndFn {
2020 name: DynTestName((*name).clone()),
2022 should_panic: ShouldPanic::No,
2025 testfn: DynTestFn(Box::new(testfn)),
2031 let filtered = filter_tests(&opts, tests);
2033 let expected = vec![
2034 "isize::test_pow".to_string(),
2035 "isize::test_to_str".to_string(),
2036 "sha1::test".to_string(),
2037 "test::do_not_run_ignored_tests".to_string(),
2038 "test::filter_for_ignored_option".to_string(),
2039 "test::first_free_arg_should_be_a_filter".to_string(),
2040 "test::ignored_tests_result_in_ignored".to_string(),
2041 "test::parse_ignored_flag".to_string(),
2042 "test::sort_tests".to_string(),
2045 for (a, b) in expected.iter().zip(filtered) {
2046 assert!(*a == b.desc.name.to_string());
2051 pub fn test_metricmap_compare() {
2052 let mut m1 = MetricMap::new();
2053 let mut m2 = MetricMap::new();
2054 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2055 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2057 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2058 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2060 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2061 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2063 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2064 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2066 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2067 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2069 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2070 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2074 pub fn test_bench_once_no_iter() {
2075 fn f(_: &mut Bencher) {}
2080 pub fn test_bench_once_iter() {
2081 fn f(b: &mut Bencher) {
2088 pub fn test_bench_no_iter() {
2089 fn f(_: &mut Bencher) {}
2091 let (tx, rx) = channel();
2093 let desc = TestDesc {
2094 name: StaticTestName("f"),
2096 should_panic: ShouldPanic::No,
2100 ::bench::benchmark(desc, tx, true, f);
2105 pub fn test_bench_iter() {
2106 fn f(b: &mut Bencher) {
2110 let (tx, rx) = channel();
2112 let desc = TestDesc {
2113 name: StaticTestName("f"),
2115 should_panic: ShouldPanic::No,
2119 ::bench::benchmark(desc, tx, true, f);