1 // Copyright 2012-2017 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/first-edition/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // NB: this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
27 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
28 // cargo) to detect this crate.
30 #![crate_name = "test"]
31 #![unstable(feature = "test", issue = "27812")]
32 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
33 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
34 html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
37 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
38 #![cfg_attr(not(stage0), feature(nll))]
39 #![cfg_attr(not(stage0), feature(infer_outlives_requirements))]
40 #![feature(set_stdio)]
41 #![feature(panic_unwind)]
42 #![feature(staged_api)]
43 #![feature(termination_trait_lib)]
46 #[cfg(any(unix, target_os = "cloudabi"))]
48 extern crate panic_unwind;
51 pub use self::TestFn::*;
52 pub use self::ColorConfig::*;
53 pub use self::TestResult::*;
54 pub use self::TestName::*;
55 use self::TestEvent::*;
56 use self::NamePadding::*;
57 use self::OutputLocation::*;
59 use std::panic::{catch_unwind, AssertUnwindSafe};
61 use std::boxed::FnBox;
63 use std::collections::BTreeMap;
67 use std::io::prelude::*;
69 use std::path::PathBuf;
70 use std::process::Termination;
71 use std::sync::mpsc::{channel, Sender};
72 use std::sync::{Arc, Mutex};
74 use std::time::{Duration, Instant};
78 const TEST_WARN_TIMEOUT_S: u64 = 60;
79 const QUIET_MODE_MAX_COLUMN: usize = 100; // insert a '\n' after 100 tests in quiet mode
81 // to be used by rustc to compile tests in libtest
83 pub use {assert_test_result, filter_tests, parse_opts, run_test, test_main, test_main_static,
84 Bencher, DynTestFn, DynTestName, Metric, MetricMap, Options, ShouldPanic,
85 StaticBenchFn, StaticTestFn, StaticTestName, TestDesc, TestDescAndFn, TestName,
86 TestOpts, TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk};
92 use formatters::{JsonFormatter, OutputFormatter, PrettyFormatter, TerseFormatter};
94 // The name of a test. By convention this follows the rules for rust
95 // paths; i.e. it should be a series of identifiers separated by double
96 // colons. This way if some test runner wants to arrange the tests
97 // hierarchically it may.
99 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
101 StaticTestName(&'static str),
103 AlignedTestName(Cow<'static, str>, NamePadding),
106 fn as_slice(&self) -> &str {
108 StaticTestName(s) => s,
109 DynTestName(ref s) => s,
110 AlignedTestName(ref s, _) => &*s,
114 fn padding(&self) -> NamePadding {
116 &AlignedTestName(_, p) => p,
121 fn with_padding(&self, padding: NamePadding) -> TestName {
122 let name = match self {
123 &TestName::StaticTestName(name) => Cow::Borrowed(name),
124 &TestName::DynTestName(ref name) => Cow::Owned(name.clone()),
125 &TestName::AlignedTestName(ref name, _) => name.clone(),
128 TestName::AlignedTestName(name, padding)
131 impl fmt::Display for TestName {
132 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
133 fmt::Display::fmt(self.as_slice(), f)
137 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)]
138 pub enum NamePadding {
144 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
145 let mut name = String::from(self.name.as_slice());
146 let fill = column_count.saturating_sub(name.len());
147 let pad = " ".repeat(fill);
158 /// Represents a benchmark function.
159 pub trait TDynBenchFn: Send {
160 fn run(&self, harness: &mut Bencher);
163 // A function that runs a test. If the function returns successfully,
164 // the test succeeds; if the function panics then the test fails. We
165 // may need to come up with a more clever definition of test in order
166 // to support isolation of tests into threads.
169 StaticBenchFn(fn(&mut Bencher)),
170 DynTestFn(Box<dyn FnBox() + Send>),
171 DynBenchFn(Box<dyn TDynBenchFn + 'static>),
175 fn padding(&self) -> NamePadding {
177 StaticTestFn(..) => PadNone,
178 StaticBenchFn(..) => PadOnRight,
179 DynTestFn(..) => PadNone,
180 DynBenchFn(..) => PadOnRight,
185 impl fmt::Debug for TestFn {
186 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
187 f.write_str(match *self {
188 StaticTestFn(..) => "StaticTestFn(..)",
189 StaticBenchFn(..) => "StaticBenchFn(..)",
190 DynTestFn(..) => "DynTestFn(..)",
191 DynBenchFn(..) => "DynBenchFn(..)",
196 /// Manager of the benchmarking runs.
198 /// This is fed into functions marked with `#[bench]` to allow for
199 /// set-up & tear-down before running a piece of code repeatedly via a
204 summary: Option<stats::Summary>,
208 #[derive(Clone, PartialEq, Eq)]
214 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
215 pub enum ShouldPanic {
218 YesWithMessage(&'static str),
221 // The definition of a single test. A test runner will run a list of
223 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
224 pub struct TestDesc {
227 pub should_panic: ShouldPanic,
228 pub allow_fail: bool,
232 pub struct TestDescAndFn {
237 #[derive(Clone, PartialEq, Debug, Copy)]
244 pub fn new(value: f64, noise: f64) -> Metric {
245 Metric { value, noise }
249 /// In case we want to add other options as well, just add them in this struct.
250 #[derive(Copy, Clone, Debug)]
252 display_output: bool,
256 pub fn new() -> Options {
258 display_output: false,
262 pub fn display_output(mut self, display_output: bool) -> Options {
263 self.display_output = display_output;
268 // The default console test runner. It accepts the command line
269 // arguments and a vector of test_descs.
270 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
271 let mut opts = match parse_opts(args) {
274 eprintln!("error: {}", msg);
280 opts.options = options;
282 if let Err(e) = list_tests_console(&opts, tests) {
283 eprintln!("error: io error when listing tests: {:?}", e);
287 match run_tests_console(&opts, tests) {
289 Ok(false) => process::exit(101),
291 eprintln!("error: io error when listing tests: {:?}", e);
298 // A variant optimized for invocation with a static test vector.
299 // This will panic (intentionally) when fed any dynamic tests, because
300 // it is copying the static values out into a dynamic vector and cannot
301 // copy dynamic values. It is doing this because from this point on
302 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
303 // semantics into parallel test runners, which in turn requires a Vec<>
304 // rather than a &[].
305 pub fn test_main_static(tests: &[TestDescAndFn]) {
306 let args = env::args().collect::<Vec<_>>();
307 let owned_tests = tests
309 .map(|t| match t.testfn {
310 StaticTestFn(f) => TestDescAndFn {
311 testfn: StaticTestFn(f),
312 desc: t.desc.clone(),
314 StaticBenchFn(f) => TestDescAndFn {
315 testfn: StaticBenchFn(f),
316 desc: t.desc.clone(),
318 _ => panic!("non-static tests passed to test::test_main_static"),
321 test_main(&args, owned_tests, Options::new())
324 /// Invoked when unit tests terminate. Should panic if the unit
325 /// test is considered a failure. By default, invokes `report()`
326 /// and checks for a `0` result.
327 pub fn assert_test_result<T: Termination>(result: T) {
328 let code = result.report();
332 "the test returned a termination value with a non-zero status code ({}) \
333 which indicates a failure",
338 #[derive(Copy, Clone, Debug)]
339 pub enum ColorConfig {
345 #[derive(Copy, Clone, Debug, PartialEq, Eq)]
346 pub enum OutputFormat {
353 pub struct TestOpts {
355 pub filter: Option<String>,
356 pub filter_exact: bool,
357 pub run_ignored: bool,
359 pub bench_benchmarks: bool,
360 pub logfile: Option<PathBuf>,
362 pub color: ColorConfig,
363 pub format: OutputFormat,
364 pub test_threads: Option<usize>,
365 pub skip: Vec<String>,
366 pub options: Options,
371 fn new() -> TestOpts {
378 bench_benchmarks: false,
382 format: OutputFormat::Pretty,
385 options: Options::new(),
390 /// Result of parsing the options.
391 pub type OptRes = Result<TestOpts, String>;
393 fn optgroups() -> getopts::Options {
394 let mut opts = getopts::Options::new();
395 opts.optflag("", "ignored", "Run ignored tests")
396 .optflag("", "test", "Run tests and not benchmarks")
397 .optflag("", "bench", "Run benchmarks instead of tests")
398 .optflag("", "list", "List all tests and benchmarks")
399 .optflag("h", "help", "Display this message (longer with --help)")
403 "Write logs to the specified file instead \
410 "don't capture stdout/stderr of each \
411 task, allow printing directly",
416 "Number of threads used for running tests \
423 "Skip tests whose names contain FILTER (this flag can \
424 be used multiple times)",
430 "Display one character per test instead of one line. \
431 Alias to --format=terse",
436 "Exactly match filters rather than by substring",
441 "Configure coloring of output:
442 auto = colorize if stdout is a tty and tests are run on serially (default);
443 always = always colorize output;
444 never = never colorize output;",
450 "Configure formatting of output:
451 pretty = Print verbose output;
452 terse = Display one character per test;
453 json = Output a json document",
459 "Enable nightly-only flags:
460 unstable-options = Allow use of experimental features",
466 fn usage(binary: &str, options: &getopts::Options) {
467 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
471 The FILTER string is tested against the name of all tests, and only those
472 tests whose names contain the filter are run.
474 By default, all tests are run in parallel. This can be altered with the
475 --test-threads flag or the RUST_TEST_THREADS environment variable when running
478 All tests have their standard output and standard error captured by default.
479 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
480 environment variable to a value other than "0". Logging is not captured by default.
484 #[test] - Indicates a function is a test to be run. This function
486 #[bench] - Indicates a function is a benchmark to be run. This
487 function takes one argument (test::Bencher).
488 #[should_panic] - This function (also labeled with #[test]) will only pass if
489 the code causes a panic (an assertion failure or panic!)
490 A message may be provided, which the failure string must
491 contain: #[should_panic(expected = "foo")].
492 #[ignore] - When applied to a function which is already attributed as a
493 test, then the test runner will ignore these tests during
494 normal test runs. Running with --ignored will run these
496 usage = options.usage(&message)
500 // FIXME: Copied from libsyntax until linkage errors are resolved. Issue #47566
501 fn is_nightly() -> bool {
502 // Whether this is a feature-staged build, i.e. on the beta or stable channel
503 let disable_unstable_features = option_env!("CFG_DISABLE_UNSTABLE_FEATURES").is_some();
504 // Whether we should enable unstable features for bootstrapping
505 let bootstrap = env::var("RUSTC_BOOTSTRAP").is_ok();
507 bootstrap || !disable_unstable_features
510 // Parses command line arguments into test options
511 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
512 let mut allow_unstable = false;
513 let opts = optgroups();
514 let args = args.get(1..).unwrap_or(args);
515 let matches = match opts.parse(args) {
517 Err(f) => return Some(Err(f.to_string())),
520 if let Some(opt) = matches.opt_str("Z") {
523 "the option `Z` is only accepted on the nightly compiler".into(),
528 "unstable-options" => {
529 allow_unstable = true;
532 return Some(Err("Unrecognized option to `Z`".into()));
537 if matches.opt_present("h") {
538 usage(&args[0], &opts);
542 let filter = if !matches.free.is_empty() {
543 Some(matches.free[0].clone())
548 let run_ignored = matches.opt_present("ignored");
549 let quiet = matches.opt_present("quiet");
550 let exact = matches.opt_present("exact");
551 let list = matches.opt_present("list");
553 let logfile = matches.opt_str("logfile");
554 let logfile = logfile.map(|s| PathBuf::from(&s));
556 let bench_benchmarks = matches.opt_present("bench");
557 let run_tests = !bench_benchmarks || matches.opt_present("test");
559 let mut nocapture = matches.opt_present("nocapture");
561 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
562 Ok(val) => &val != "0",
567 let test_threads = match matches.opt_str("test-threads") {
568 Some(n_str) => match n_str.parse::<usize>() {
569 Ok(0) => return Some(Err("argument for --test-threads must not be 0".to_string())),
572 return Some(Err(format!(
573 "argument for --test-threads must be a number > 0 \
582 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
583 Some("auto") | None => AutoColor,
584 Some("always") => AlwaysColor,
585 Some("never") => NeverColor,
588 return Some(Err(format!(
589 "argument for --color must be auto, always, or never (was \
596 let format = match matches.opt_str("format").as_ref().map(|s| &**s) {
597 None if quiet => OutputFormat::Terse,
598 Some("pretty") | None => OutputFormat::Pretty,
599 Some("terse") => OutputFormat::Terse,
603 "The \"json\" format is only accepted on the nightly compiler".into(),
610 return Some(Err(format!(
611 "argument for --format must be pretty, terse, or json (was \
618 let test_opts = TestOpts {
630 skip: matches.opt_strs("skip"),
631 options: Options::new(),
637 #[derive(Clone, PartialEq)]
638 pub struct BenchSamples {
639 ns_iter_summ: stats::Summary,
643 #[derive(Clone, PartialEq)]
644 pub enum TestResult {
650 TrBench(BenchSamples),
653 unsafe impl Send for TestResult {}
655 enum OutputLocation<T> {
656 Pretty(Box<term::StdoutTerminal>),
660 impl<T: Write> Write for OutputLocation<T> {
661 fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
663 Pretty(ref mut term) => term.write(buf),
664 Raw(ref mut stdout) => stdout.write(buf),
668 fn flush(&mut self) -> io::Result<()> {
670 Pretty(ref mut term) => term.flush(),
671 Raw(ref mut stdout) => stdout.flush(),
676 struct ConsoleTestState {
677 log_out: Option<File>,
686 failures: Vec<(TestDesc, Vec<u8>)>,
687 not_failures: Vec<(TestDesc, Vec<u8>)>,
691 impl ConsoleTestState {
692 pub fn new(opts: &TestOpts) -> io::Result<ConsoleTestState> {
693 let log_out = match opts.logfile {
694 Some(ref path) => Some(File::create(path)?),
698 Ok(ConsoleTestState {
707 metrics: MetricMap::new(),
708 failures: Vec::new(),
709 not_failures: Vec::new(),
710 options: opts.options,
714 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
715 let msg = msg.as_ref();
718 Some(ref mut o) => o.write_all(msg.as_bytes()),
722 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
723 self.write_log(format!(
726 TrOk => "ok".to_owned(),
727 TrFailed => "failed".to_owned(),
728 TrFailedMsg(ref msg) => format!("failed: {}", msg),
729 TrIgnored => "ignored".to_owned(),
730 TrAllowedFail => "failed (allowed)".to_owned(),
731 TrBench(ref bs) => fmt_bench_samples(bs),
737 fn current_test_count(&self) -> usize {
738 self.passed + self.failed + self.ignored + self.measured + self.allowed_fail
742 // Format a number with thousands separators
743 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
745 let mut output = String::new();
746 let mut trailing = false;
747 for &pow in &[9, 6, 3, 0] {
748 let base = 10_usize.pow(pow);
749 if pow == 0 || trailing || n / base != 0 {
751 output.write_fmt(format_args!("{}", n / base)).unwrap();
753 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
766 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
768 let mut output = String::new();
770 let median = bs.ns_iter_summ.median as usize;
771 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
774 .write_fmt(format_args!(
775 "{:>11} ns/iter (+/- {})",
776 fmt_thousands_sep(median, ','),
777 fmt_thousands_sep(deviation, ',')
782 .write_fmt(format_args!(" = {} MB/s", bs.mb_s))
788 // List the tests to console, and optionally to logfile. Filters are honored.
789 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
790 let mut output = match term::stdout() {
791 None => Raw(io::stdout()),
792 Some(t) => Pretty(t),
795 let quiet = opts.format == OutputFormat::Terse;
796 let mut st = ConsoleTestState::new(opts)?;
801 for test in filter_tests(&opts, tests) {
805 desc: TestDesc { name, .. },
809 let fntype = match testfn {
810 StaticTestFn(..) | DynTestFn(..) => {
814 StaticBenchFn(..) | DynBenchFn(..) => {
820 writeln!(output, "{}: {}", name, fntype)?;
821 st.write_log(format!("{} {}\n", fntype, name))?;
824 fn plural(count: u32, s: &str) -> String {
826 1 => format!("{} {}", 1, s),
827 n => format!("{} {}s", n, s),
832 if ntest != 0 || nbench != 0 {
833 writeln!(output, "")?;
839 plural(ntest, "test"),
840 plural(nbench, "benchmark")
847 // A simple console test runner
848 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
851 st: &mut ConsoleTestState,
852 out: &mut dyn OutputFormatter,
853 ) -> io::Result<()> {
854 match (*event).clone() {
855 TeFiltered(ref filtered_tests) => {
856 st.total = filtered_tests.len();
857 out.write_run_start(filtered_tests.len())
859 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
860 TeWait(ref test) => out.write_test_start(test),
861 TeTimeout(ref test) => out.write_timeout(test),
862 TeResult(test, result, stdout) => {
863 st.write_log_result(&test, &result)?;
864 out.write_result(&test, &result, &*stdout)?;
868 st.not_failures.push((test, stdout));
870 TrIgnored => st.ignored += 1,
871 TrAllowedFail => st.allowed_fail += 1,
873 st.metrics.insert_metric(
874 test.name.as_slice(),
875 bs.ns_iter_summ.median,
876 bs.ns_iter_summ.max - bs.ns_iter_summ.min,
882 st.failures.push((test, stdout));
884 TrFailedMsg(msg) => {
886 let mut stdout = stdout;
887 stdout.extend_from_slice(format!("note: {}", msg).as_bytes());
888 st.failures.push((test, stdout));
896 let output = match term::stdout() {
897 None => Raw(io::stdout()),
898 Some(t) => Pretty(t),
901 let max_name_len = tests
903 .max_by_key(|t| len_if_padded(*t))
904 .map(|t| t.desc.name.as_slice().len())
907 let is_multithreaded = opts.test_threads.unwrap_or_else(get_concurrency) > 1;
909 let mut out: Box<dyn OutputFormatter> = match opts.format {
910 OutputFormat::Pretty => Box::new(PrettyFormatter::new(
916 OutputFormat::Terse => Box::new(TerseFormatter::new(
922 OutputFormat::Json => Box::new(JsonFormatter::new(output)),
924 let mut st = ConsoleTestState::new(opts)?;
925 fn len_if_padded(t: &TestDescAndFn) -> usize {
926 match t.testfn.padding() {
928 PadOnRight => t.desc.name.as_slice().len(),
932 run_tests(opts, tests, |x| callback(&x, &mut st, &mut *out))?;
934 assert!(st.current_test_count() == st.total);
936 return out.write_run_finish(&st);
940 fn should_sort_failures_before_printing_them() {
941 let test_a = TestDesc {
942 name: StaticTestName("a"),
944 should_panic: ShouldPanic::No,
948 let test_b = TestDesc {
949 name: StaticTestName("b"),
951 should_panic: ShouldPanic::No,
955 let mut out = PrettyFormatter::new(Raw(Vec::new()), false, 10, false);
957 let st = ConsoleTestState {
966 metrics: MetricMap::new(),
967 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
968 options: Options::new(),
969 not_failures: Vec::new(),
972 out.write_failures(&st).unwrap();
973 let s = match out.output_location() {
974 &Raw(ref m) => String::from_utf8_lossy(&m[..]),
975 &Pretty(_) => unreachable!(),
978 let apos = s.find("a").unwrap();
979 let bpos = s.find("b").unwrap();
980 assert!(apos < bpos);
983 fn use_color(opts: &TestOpts) -> bool {
985 AutoColor => !opts.nocapture && stdout_isatty(),
991 #[cfg(any(target_os = "cloudabi", target_os = "redox",
992 all(target_arch = "wasm32", not(target_os = "emscripten"))))]
993 fn stdout_isatty() -> bool {
994 // FIXME: Implement isatty on Redox
998 fn stdout_isatty() -> bool {
999 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1002 fn stdout_isatty() -> bool {
1005 type HANDLE = *mut u8;
1006 type LPDWORD = *mut u32;
1007 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1009 fn GetStdHandle(which: DWORD) -> HANDLE;
1010 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1013 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1015 GetConsoleMode(handle, &mut out) != 0
1020 pub enum TestEvent {
1021 TeFiltered(Vec<TestDesc>),
1023 TeResult(TestDesc, TestResult, Vec<u8>),
1024 TeTimeout(TestDesc),
1025 TeFilteredOut(usize),
1028 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1030 struct Sink(Arc<Mutex<Vec<u8>>>);
1031 impl Write for Sink {
1032 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1033 Write::write(&mut *self.0.lock().unwrap(), data)
1035 fn flush(&mut self) -> io::Result<()> {
1040 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1042 F: FnMut(TestEvent) -> io::Result<()>,
1044 use std::collections::HashMap;
1045 use std::sync::mpsc::RecvTimeoutError;
1047 let tests_len = tests.len();
1049 let mut filtered_tests = filter_tests(opts, tests);
1050 if !opts.bench_benchmarks {
1051 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1054 let filtered_tests = {
1055 let mut filtered_tests = filtered_tests;
1056 for test in filtered_tests.iter_mut() {
1057 test.desc.name = test.desc.name.with_padding(test.testfn.padding());
1063 let filtered_out = tests_len - filtered_tests.len();
1064 callback(TeFilteredOut(filtered_out))?;
1066 let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
1068 callback(TeFiltered(filtered_descs))?;
1070 let (filtered_tests, filtered_benchs): (Vec<_>, _) =
1071 filtered_tests.into_iter().partition(|e| match e.testfn {
1072 StaticTestFn(_) | DynTestFn(_) => true,
1076 let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
1078 let mut remaining = filtered_tests;
1079 remaining.reverse();
1080 let mut pending = 0;
1082 let (tx, rx) = channel::<MonitorMsg>();
1084 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1086 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1087 let now = Instant::now();
1088 let timed_out = running_tests
1090 .filter_map(|(desc, timeout)| {
1091 if &now >= timeout {
1098 for test in &timed_out {
1099 running_tests.remove(test);
1104 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1105 running_tests.values().min().map(|next_timeout| {
1106 let now = Instant::now();
1107 if *next_timeout >= now {
1115 if concurrency == 1 {
1116 while !remaining.is_empty() {
1117 let test = remaining.pop().unwrap();
1118 callback(TeWait(test.desc.clone()))?;
1119 run_test(opts, !opts.run_tests, test, tx.clone());
1120 let (test, result, stdout) = rx.recv().unwrap();
1121 callback(TeResult(test, result, stdout))?;
1124 while pending > 0 || !remaining.is_empty() {
1125 while pending < concurrency && !remaining.is_empty() {
1126 let test = remaining.pop().unwrap();
1127 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1128 running_tests.insert(test.desc.clone(), timeout);
1129 callback(TeWait(test.desc.clone()))?; //here no pad
1130 run_test(opts, !opts.run_tests, test, tx.clone());
1136 if let Some(timeout) = calc_timeout(&running_tests) {
1137 res = rx.recv_timeout(timeout);
1138 for test in get_timed_out_tests(&mut running_tests) {
1139 callback(TeTimeout(test))?;
1141 if res != Err(RecvTimeoutError::Timeout) {
1145 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1150 let (desc, result, stdout) = res.unwrap();
1151 running_tests.remove(&desc);
1153 callback(TeResult(desc, result, stdout))?;
1158 if opts.bench_benchmarks {
1159 // All benchmarks run at the end, in serial.
1160 for b in filtered_benchs {
1161 callback(TeWait(b.desc.clone()))?;
1162 run_test(opts, false, b, tx.clone());
1163 let (test, result, stdout) = rx.recv().unwrap();
1164 callback(TeResult(test, result, stdout))?;
1170 #[allow(deprecated)]
1171 fn get_concurrency() -> usize {
1172 return match env::var("RUST_TEST_THREADS") {
1174 let opt_n: Option<usize> = s.parse().ok();
1176 Some(n) if n > 0 => n,
1178 "RUST_TEST_THREADS is `{}`, should be a positive integer.",
1183 Err(..) => num_cpus(),
1188 fn num_cpus() -> usize {
1190 struct SYSTEM_INFO {
1191 wProcessorArchitecture: u16,
1194 lpMinimumApplicationAddress: *mut u8,
1195 lpMaximumApplicationAddress: *mut u8,
1196 dwActiveProcessorMask: *mut u8,
1197 dwNumberOfProcessors: u32,
1198 dwProcessorType: u32,
1199 dwAllocationGranularity: u32,
1200 wProcessorLevel: u16,
1201 wProcessorRevision: u16,
1204 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1207 let mut sysinfo = std::mem::zeroed();
1208 GetSystemInfo(&mut sysinfo);
1209 sysinfo.dwNumberOfProcessors as usize
1213 #[cfg(target_os = "redox")]
1214 fn num_cpus() -> usize {
1215 // FIXME: Implement num_cpus on Redox
1219 #[cfg(all(target_arch = "wasm32", not(target_os = "emscripten")))]
1220 fn num_cpus() -> usize {
1224 #[cfg(any(target_os = "android", target_os = "cloudabi", target_os = "emscripten",
1225 target_os = "fuchsia", target_os = "ios", target_os = "linux",
1226 target_os = "macos", target_os = "solaris"))]
1227 fn num_cpus() -> usize {
1228 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1231 #[cfg(any(target_os = "freebsd", target_os = "dragonfly", target_os = "bitrig",
1232 target_os = "netbsd"))]
1233 fn num_cpus() -> usize {
1236 let mut cpus: libc::c_uint = 0;
1237 let mut cpus_size = std::mem::size_of_val(&cpus);
1240 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1243 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1248 &mut cpus as *mut _ as *mut _,
1249 &mut cpus_size as *mut _ as *mut _,
1261 #[cfg(target_os = "openbsd")]
1262 fn num_cpus() -> usize {
1265 let mut cpus: libc::c_uint = 0;
1266 let mut cpus_size = std::mem::size_of_val(&cpus);
1267 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1273 &mut cpus as *mut _ as *mut _,
1274 &mut cpus_size as *mut _ as *mut _,
1285 #[cfg(target_os = "haiku")]
1286 fn num_cpus() -> usize {
1291 #[cfg(target_os = "l4re")]
1292 fn num_cpus() -> usize {
1298 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1299 let mut filtered = tests;
1300 // Remove tests that don't match the test filter
1301 filtered = match opts.filter {
1303 Some(ref filter) => filtered
1306 if opts.filter_exact {
1307 test.desc.name.as_slice() == &filter[..]
1309 test.desc.name.as_slice().contains(&filter[..])
1315 // Skip tests that match any of the skip filters
1319 !opts.skip.iter().any(|sf| {
1320 if opts.filter_exact {
1321 t.desc.name.as_slice() == &sf[..]
1323 t.desc.name.as_slice().contains(&sf[..])
1329 // Maybe pull out the ignored test and unignore them
1330 filtered = if !opts.run_ignored {
1333 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1334 if test.desc.ignore {
1335 let TestDescAndFn { desc, testfn } = test;
1336 Some(TestDescAndFn {
1347 filtered.into_iter().filter_map(filter).collect()
1350 // Sort the tests alphabetically
1351 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1356 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1357 // convert benchmarks to tests, if we're not benchmarking them
1361 let testfn = match x.testfn {
1362 DynBenchFn(bench) => DynTestFn(Box::new(move || {
1363 bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
1365 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
1366 bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
1381 test: TestDescAndFn,
1382 monitor_ch: Sender<MonitorMsg>,
1384 let TestDescAndFn { desc, testfn } = test;
1386 let ignore_because_panic_abort = cfg!(target_arch = "wasm32") && !cfg!(target_os = "emscripten")
1387 && desc.should_panic != ShouldPanic::No;
1389 if force_ignore || desc.ignore || ignore_because_panic_abort {
1390 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1396 monitor_ch: Sender<MonitorMsg>,
1398 testfn: Box<dyn FnBox() + Send>,
1400 // Buffer for capturing standard I/O
1401 let data = Arc::new(Mutex::new(Vec::new()));
1402 let data2 = data.clone();
1404 let name = desc.name.clone();
1405 let runtest = move || {
1406 let oldio = if !nocapture {
1408 io::set_print(Some(Box::new(Sink(data2.clone())))),
1409 io::set_panic(Some(Box::new(Sink(data2)))),
1415 let result = catch_unwind(AssertUnwindSafe(testfn));
1417 if let Some((printio, panicio)) = oldio {
1418 io::set_print(printio);
1419 io::set_panic(panicio);
1422 let test_result = calc_result(&desc, result);
1423 let stdout = data.lock().unwrap().to_vec();
1425 .send((desc.clone(), test_result, stdout))
1429 // If the platform is single-threaded we're just going to run
1430 // the test synchronously, regardless of the concurrency
1432 let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
1433 if supports_threads {
1434 let cfg = thread::Builder::new().name(name.as_slice().to_owned());
1435 cfg.spawn(runtest).unwrap();
1442 DynBenchFn(bencher) => {
1443 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1444 bencher.run(harness)
1447 StaticBenchFn(benchfn) => {
1448 ::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
1449 (benchfn.clone())(harness)
1453 let cb = move || __rust_begin_short_backtrace(f);
1454 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1456 StaticTestFn(f) => run_test_inner(
1460 Box::new(move || __rust_begin_short_backtrace(f)),
1465 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1467 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1471 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<dyn Any + Send>>) -> TestResult {
1472 match (&desc.should_panic, task_result) {
1473 (&ShouldPanic::No, Ok(())) | (&ShouldPanic::Yes, Err(_)) => TrOk,
1474 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) => {
1475 if err.downcast_ref::<String>()
1477 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1478 .map(|e| e.contains(msg))
1483 if desc.allow_fail {
1486 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1490 _ if desc.allow_fail => TrAllowedFail,
1495 #[derive(Clone, PartialEq)]
1496 pub struct MetricMap(BTreeMap<String, Metric>);
1499 pub fn new() -> MetricMap {
1500 MetricMap(BTreeMap::new())
1503 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1504 /// must be non-negative. The `noise` indicates the uncertainty of the
1505 /// metric, which doubles as the "noise range" of acceptable
1506 /// pairwise-regressions on this named value, when comparing from one
1507 /// metric to the next using `compare_to_old`.
1509 /// If `noise` is positive, then it means this metric is of a value
1510 /// you want to see grow smaller, so a change larger than `noise` in the
1511 /// positive direction represents a regression.
1513 /// If `noise` is negative, then it means this metric is of a value
1514 /// you want to see grow larger, so a change larger than `noise` in the
1515 /// negative direction represents a regression.
1516 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1517 let m = Metric { value, noise };
1518 self.0.insert(name.to_owned(), m);
1521 pub fn fmt_metrics(&self) -> String {
1524 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1525 .collect::<Vec<_>>();
1532 /// A function that is opaque to the optimizer, to allow benchmarks to
1533 /// pretend to use outputs to assist in avoiding dead-code
1536 /// This function is a no-op, and does not even read from `dummy`.
1537 #[cfg(not(any(target_arch = "asmjs", target_arch = "wasm32")))]
1538 pub fn black_box<T>(dummy: T) -> T {
1539 // we need to "use" the argument in some way LLVM can't
1541 unsafe { asm!("" : : "r"(&dummy)) }
1544 #[cfg(any(target_arch = "asmjs", target_arch = "wasm32"))]
1546 pub fn black_box<T>(dummy: T) -> T {
1551 /// Callback for benchmark functions to run in their body.
1552 pub fn iter<T, F>(&mut self, mut inner: F)
1556 if self.mode == BenchMode::Single {
1557 ns_iter_inner(&mut inner, 1);
1561 self.summary = Some(iter(&mut inner));
1564 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1566 F: FnMut(&mut Bencher),
1569 return self.summary;
1573 fn ns_from_dur(dur: Duration) -> u64 {
1574 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1577 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1581 let start = Instant::now();
1585 return ns_from_dur(start.elapsed());
1588 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1592 // Initial bench run to get ballpark figure.
1593 let ns_single = ns_iter_inner(inner, 1);
1595 // Try to estimate iter count for 1ms falling back to 1m
1596 // iterations if first run took < 1ns.
1597 let ns_target_total = 1_000_000; // 1ms
1598 let mut n = ns_target_total / cmp::max(1, ns_single);
1600 // if the first run took more than 1ms we don't want to just
1601 // be left doing 0 iterations on every loop. The unfortunate
1602 // side effect of not being able to do as many runs is
1603 // automatically handled by the statistical analysis below
1604 // (i.e. larger error bars).
1607 let mut total_run = Duration::new(0, 0);
1608 let samples: &mut [f64] = &mut [0.0_f64; 50];
1610 let loop_start = Instant::now();
1612 for p in &mut *samples {
1613 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1616 stats::winsorize(samples, 5.0);
1617 let summ = stats::Summary::new(samples);
1619 for p in &mut *samples {
1620 let ns = ns_iter_inner(inner, 5 * n);
1621 *p = ns as f64 / (5 * n) as f64;
1624 stats::winsorize(samples, 5.0);
1625 let summ5 = stats::Summary::new(samples);
1627 let loop_run = loop_start.elapsed();
1629 // If we've run for 100ms and seem to have converged to a
1631 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0
1632 && summ.median - summ5.median < summ5.median_abs_dev
1637 total_run = total_run + loop_run;
1638 // Longest we ever run for is 3s.
1639 if total_run > Duration::from_secs(3) {
1643 // If we overflow here just return the results so far. We check a
1644 // multiplier of 10 because we're about to multiply by 2 and the
1645 // next iteration of the loop will also multiply by 5 (to calculate
1646 // the summ5 result)
1647 n = match n.checked_mul(10) {
1657 use std::panic::{catch_unwind, AssertUnwindSafe};
1660 use std::sync::{Arc, Mutex};
1662 use super::{BenchMode, BenchSamples, Bencher, MonitorMsg, Sender, Sink, TestDesc, TestResult};
1664 pub fn benchmark<F>(desc: TestDesc, monitor_ch: Sender<MonitorMsg>, nocapture: bool, f: F)
1666 F: FnMut(&mut Bencher),
1668 let mut bs = Bencher {
1669 mode: BenchMode::Auto,
1674 let data = Arc::new(Mutex::new(Vec::new()));
1675 let data2 = data.clone();
1677 let oldio = if !nocapture {
1679 io::set_print(Some(Box::new(Sink(data2.clone())))),
1680 io::set_panic(Some(Box::new(Sink(data2)))),
1686 let result = catch_unwind(AssertUnwindSafe(|| bs.bench(f)));
1688 if let Some((printio, panicio)) = oldio {
1689 io::set_print(printio);
1690 io::set_panic(panicio);
1693 let test_result = match result {
1695 Ok(Some(ns_iter_summ)) => {
1696 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1697 let mb_s = bs.bytes * 1000 / ns_iter;
1699 let bs = BenchSamples {
1701 mb_s: mb_s as usize,
1703 TestResult::TrBench(bs)
1706 // iter not called, so no data.
1707 // FIXME: error in this case?
1708 let samples: &mut [f64] = &mut [0.0_f64; 1];
1709 let bs = BenchSamples {
1710 ns_iter_summ: stats::Summary::new(samples),
1713 TestResult::TrBench(bs)
1715 Err(_) => TestResult::TrFailed,
1718 let stdout = data.lock().unwrap().to_vec();
1719 monitor_ch.send((desc, test_result, stdout)).unwrap();
1722 pub fn run_once<F>(f: F)
1724 F: FnMut(&mut Bencher),
1726 let mut bs = Bencher {
1727 mode: BenchMode::Single,
1737 use test::{filter_tests, parse_opts, run_test, DynTestFn, DynTestName, MetricMap, ShouldPanic,
1738 StaticTestName, TestDesc, TestDescAndFn, TestOpts, TrFailed, TrFailedMsg,
1740 use std::sync::mpsc::channel;
1745 pub fn do_not_run_ignored_tests() {
1749 let desc = TestDescAndFn {
1751 name: StaticTestName("whatever"),
1753 should_panic: ShouldPanic::No,
1756 testfn: DynTestFn(Box::new(f)),
1758 let (tx, rx) = channel();
1759 run_test(&TestOpts::new(), false, desc, tx);
1760 let (_, res, _) = rx.recv().unwrap();
1761 assert!(res != TrOk);
1765 pub fn ignored_tests_result_in_ignored() {
1767 let desc = TestDescAndFn {
1769 name: StaticTestName("whatever"),
1771 should_panic: ShouldPanic::No,
1774 testfn: DynTestFn(Box::new(f)),
1776 let (tx, rx) = channel();
1777 run_test(&TestOpts::new(), false, desc, tx);
1778 let (_, res, _) = rx.recv().unwrap();
1779 assert!(res == TrIgnored);
1783 fn test_should_panic() {
1787 let desc = TestDescAndFn {
1789 name: StaticTestName("whatever"),
1791 should_panic: ShouldPanic::Yes,
1794 testfn: DynTestFn(Box::new(f)),
1796 let (tx, rx) = channel();
1797 run_test(&TestOpts::new(), false, desc, tx);
1798 let (_, res, _) = rx.recv().unwrap();
1799 assert!(res == TrOk);
1803 fn test_should_panic_good_message() {
1805 panic!("an error message");
1807 let desc = TestDescAndFn {
1809 name: StaticTestName("whatever"),
1811 should_panic: ShouldPanic::YesWithMessage("error message"),
1814 testfn: DynTestFn(Box::new(f)),
1816 let (tx, rx) = channel();
1817 run_test(&TestOpts::new(), false, desc, tx);
1818 let (_, res, _) = rx.recv().unwrap();
1819 assert!(res == TrOk);
1823 fn test_should_panic_bad_message() {
1825 panic!("an error message");
1827 let expected = "foobar";
1828 let failed_msg = "Panic did not include expected string";
1829 let desc = TestDescAndFn {
1831 name: StaticTestName("whatever"),
1833 should_panic: ShouldPanic::YesWithMessage(expected),
1836 testfn: DynTestFn(Box::new(f)),
1838 let (tx, rx) = channel();
1839 run_test(&TestOpts::new(), false, desc, tx);
1840 let (_, res, _) = rx.recv().unwrap();
1841 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1845 fn test_should_panic_but_succeeds() {
1847 let desc = TestDescAndFn {
1849 name: StaticTestName("whatever"),
1851 should_panic: ShouldPanic::Yes,
1854 testfn: DynTestFn(Box::new(f)),
1856 let (tx, rx) = channel();
1857 run_test(&TestOpts::new(), false, desc, tx);
1858 let (_, res, _) = rx.recv().unwrap();
1859 assert!(res == TrFailed);
1863 fn parse_ignored_flag() {
1865 "progname".to_string(),
1866 "filter".to_string(),
1867 "--ignored".to_string(),
1869 let opts = match parse_opts(&args) {
1871 _ => panic!("Malformed arg in parse_ignored_flag"),
1873 assert!((opts.run_ignored));
1877 pub fn filter_for_ignored_option() {
1878 // When we run ignored tests the test filter should filter out all the
1879 // unignored tests and flip the ignore flag on the rest to false
1881 let mut opts = TestOpts::new();
1882 opts.run_tests = true;
1883 opts.run_ignored = true;
1888 name: StaticTestName("1"),
1890 should_panic: ShouldPanic::No,
1893 testfn: DynTestFn(Box::new(move || {})),
1897 name: StaticTestName("2"),
1899 should_panic: ShouldPanic::No,
1902 testfn: DynTestFn(Box::new(move || {})),
1905 let filtered = filter_tests(&opts, tests);
1907 assert_eq!(filtered.len(), 1);
1908 assert_eq!(filtered[0].desc.name.to_string(), "1");
1909 assert!(!filtered[0].desc.ignore);
1913 pub fn exact_filter_match() {
1914 fn tests() -> Vec<TestDescAndFn> {
1915 vec!["base", "base::test", "base::test1", "base::test2"]
1917 .map(|name| TestDescAndFn {
1919 name: StaticTestName(name),
1921 should_panic: ShouldPanic::No,
1924 testfn: DynTestFn(Box::new(move || {})),
1929 let substr = filter_tests(
1931 filter: Some("base".into()),
1936 assert_eq!(substr.len(), 4);
1938 let substr = filter_tests(
1940 filter: Some("bas".into()),
1945 assert_eq!(substr.len(), 4);
1947 let substr = filter_tests(
1949 filter: Some("::test".into()),
1954 assert_eq!(substr.len(), 3);
1956 let substr = filter_tests(
1958 filter: Some("base::test".into()),
1963 assert_eq!(substr.len(), 3);
1965 let exact = filter_tests(
1967 filter: Some("base".into()),
1973 assert_eq!(exact.len(), 1);
1975 let exact = filter_tests(
1977 filter: Some("bas".into()),
1983 assert_eq!(exact.len(), 0);
1985 let exact = filter_tests(
1987 filter: Some("::test".into()),
1993 assert_eq!(exact.len(), 0);
1995 let exact = filter_tests(
1997 filter: Some("base::test".into()),
2003 assert_eq!(exact.len(), 1);
2007 pub fn sort_tests() {
2008 let mut opts = TestOpts::new();
2009 opts.run_tests = true;
2012 "sha1::test".to_string(),
2013 "isize::test_to_str".to_string(),
2014 "isize::test_pow".to_string(),
2015 "test::do_not_run_ignored_tests".to_string(),
2016 "test::ignored_tests_result_in_ignored".to_string(),
2017 "test::first_free_arg_should_be_a_filter".to_string(),
2018 "test::parse_ignored_flag".to_string(),
2019 "test::filter_for_ignored_option".to_string(),
2020 "test::sort_tests".to_string(),
2024 let mut tests = Vec::new();
2025 for name in &names {
2026 let test = TestDescAndFn {
2028 name: DynTestName((*name).clone()),
2030 should_panic: ShouldPanic::No,
2033 testfn: DynTestFn(Box::new(testfn)),
2039 let filtered = filter_tests(&opts, tests);
2041 let expected = vec![
2042 "isize::test_pow".to_string(),
2043 "isize::test_to_str".to_string(),
2044 "sha1::test".to_string(),
2045 "test::do_not_run_ignored_tests".to_string(),
2046 "test::filter_for_ignored_option".to_string(),
2047 "test::first_free_arg_should_be_a_filter".to_string(),
2048 "test::ignored_tests_result_in_ignored".to_string(),
2049 "test::parse_ignored_flag".to_string(),
2050 "test::sort_tests".to_string(),
2053 for (a, b) in expected.iter().zip(filtered) {
2054 assert!(*a == b.desc.name.to_string());
2059 pub fn test_metricmap_compare() {
2060 let mut m1 = MetricMap::new();
2061 let mut m2 = MetricMap::new();
2062 m1.insert_metric("in-both-noise", 1000.0, 200.0);
2063 m2.insert_metric("in-both-noise", 1100.0, 200.0);
2065 m1.insert_metric("in-first-noise", 1000.0, 2.0);
2066 m2.insert_metric("in-second-noise", 1000.0, 2.0);
2068 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
2069 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
2071 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
2072 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
2074 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
2075 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
2077 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
2078 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
2082 pub fn test_bench_once_no_iter() {
2083 fn f(_: &mut Bencher) {}
2088 pub fn test_bench_once_iter() {
2089 fn f(b: &mut Bencher) {
2096 pub fn test_bench_no_iter() {
2097 fn f(_: &mut Bencher) {}
2099 let (tx, rx) = channel();
2101 let desc = TestDesc {
2102 name: StaticTestName("f"),
2104 should_panic: ShouldPanic::No,
2108 ::bench::benchmark(desc, tx, true, f);
2113 pub fn test_bench_iter() {
2114 fn f(b: &mut Bencher) {
2118 let (tx, rx) = channel();
2120 let desc = TestDesc {
2121 name: StaticTestName("f"),
2123 should_panic: ShouldPanic::No,
2127 ::bench::benchmark(desc, tx, true, f);