1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
38 #![feature(rustc_private)]
39 #![feature(set_stdio)]
40 #![feature(staged_api)]
41 #![feature(panic_unwind)]
46 extern crate panic_unwind;
48 pub use self::TestFn::*;
49 pub use self::ColorConfig::*;
50 pub use self::TestResult::*;
51 pub use self::TestName::*;
52 use self::TestEvent::*;
53 use self::NamePadding::*;
54 use self::OutputLocation::*;
56 use std::panic::{catch_unwind, AssertUnwindSafe};
59 use std::collections::BTreeMap;
63 use std::io::prelude::*;
65 use std::iter::repeat;
66 use std::path::PathBuf;
67 use std::sync::mpsc::{channel, Sender};
68 use std::sync::{Arc, Mutex};
70 use std::time::{Instant, Duration};
72 const TEST_WARN_TIMEOUT_S: u64 = 60;
74 // to be used by rustc to compile tests in libtest
76 pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
77 TrFailedMsg, TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName,
78 DynTestName, DynTestFn, run_test, test_main, test_main_static, filter_tests,
79 parse_opts, StaticBenchFn, ShouldPanic, Options};
84 // The name of a test. By convention this follows the rules for rust
85 // paths; i.e. it should be a series of identifiers separated by double
86 // colons. This way if some test runner wants to arrange the tests
87 // hierarchically it may.
89 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
91 StaticTestName(&'static str),
95 fn as_slice(&self) -> &str {
97 StaticTestName(s) => s,
98 DynTestName(ref s) => s,
102 impl fmt::Display for TestName {
103 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
104 fmt::Display::fmt(self.as_slice(), f)
108 #[derive(Clone, Copy, PartialEq, Eq)]
109 pub enum NamePadding {
115 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
116 let mut name = String::from(self.name.as_slice());
117 let fill = column_count.saturating_sub(name.len());
118 let pad = repeat(" ").take(fill).collect::<String>();
129 /// Represents a benchmark function.
130 pub trait TDynBenchFn: Send {
131 fn run(&self, harness: &mut Bencher);
134 pub trait FnBox<T>: Send + 'static {
135 fn call_box(self: Box<Self>, t: T);
138 impl<T, F: FnOnce(T) + Send + 'static> FnBox<T> for F {
139 fn call_box(self: Box<F>, t: T) {
144 // A function that runs a test. If the function returns successfully,
145 // the test succeeds; if the function panics then the test fails. We
146 // may need to come up with a more clever definition of test in order
147 // to support isolation of tests into threads.
150 StaticBenchFn(fn(&mut Bencher)),
151 StaticMetricFn(fn(&mut MetricMap)),
152 DynTestFn(Box<FnBox<()>>),
153 DynMetricFn(Box<for<'a> FnBox<&'a mut MetricMap>>),
154 DynBenchFn(Box<TDynBenchFn + 'static>),
158 fn padding(&self) -> NamePadding {
160 StaticTestFn(..) => PadNone,
161 StaticBenchFn(..) => PadOnRight,
162 StaticMetricFn(..) => PadOnRight,
163 DynTestFn(..) => PadNone,
164 DynMetricFn(..) => PadOnRight,
165 DynBenchFn(..) => PadOnRight,
170 impl fmt::Debug for TestFn {
171 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
172 f.write_str(match *self {
173 StaticTestFn(..) => "StaticTestFn(..)",
174 StaticBenchFn(..) => "StaticBenchFn(..)",
175 StaticMetricFn(..) => "StaticMetricFn(..)",
176 DynTestFn(..) => "DynTestFn(..)",
177 DynMetricFn(..) => "DynMetricFn(..)",
178 DynBenchFn(..) => "DynBenchFn(..)",
183 /// Manager of the benchmarking runs.
185 /// This is fed into functions marked with `#[bench]` to allow for
186 /// set-up & tear-down before running a piece of code repeatedly via a
191 summary: Option<stats::Summary>,
195 #[derive(Clone, PartialEq, Eq)]
201 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
202 pub enum ShouldPanic {
205 YesWithMessage(&'static str),
208 // The definition of a single test. A test runner will run a list of
210 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
211 pub struct TestDesc {
214 pub should_panic: ShouldPanic,
218 pub struct TestPaths {
219 pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
220 pub base: PathBuf, // e.g., compile-test, auxiliary
221 pub relative_dir: PathBuf, // e.g., foo/bar
225 pub struct TestDescAndFn {
230 #[derive(Clone, PartialEq, Debug, Copy)]
237 pub fn new(value: f64, noise: f64) -> Metric {
246 pub struct MetricMap(BTreeMap<String, Metric>);
248 impl Clone for MetricMap {
249 fn clone(&self) -> MetricMap {
250 let MetricMap(ref map) = *self;
251 MetricMap(map.clone())
255 /// In case we want to add other options as well, just add them in this struct.
256 #[derive(Copy, Clone, Debug)]
258 display_output: bool,
262 pub fn new() -> Options {
264 display_output: false,
268 pub fn display_output(mut self, display_output: bool) -> Options {
269 self.display_output = display_output;
274 // The default console test runner. It accepts the command line
275 // arguments and a vector of test_descs.
276 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
277 let mut opts = match parse_opts(args) {
279 Some(Err(msg)) => panic!("{:?}", msg),
282 opts.options = options;
284 if let Err(e) = list_tests_console(&opts, tests) {
285 panic!("io error when listing tests: {:?}", e);
288 match run_tests_console(&opts, tests) {
290 Ok(false) => std::process::exit(101),
291 Err(e) => panic!("io error when running tests: {:?}", e),
296 // A variant optimized for invocation with a static test vector.
297 // This will panic (intentionally) when fed any dynamic tests, because
298 // it is copying the static values out into a dynamic vector and cannot
299 // copy dynamic values. It is doing this because from this point on
300 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
301 // semantics into parallel test runners, which in turn requires a Vec<>
302 // rather than a &[].
303 pub fn test_main_static(tests: &[TestDescAndFn]) {
304 let args = env::args().collect::<Vec<_>>();
305 let owned_tests = tests.iter()
310 testfn: StaticTestFn(f),
311 desc: t.desc.clone(),
314 StaticBenchFn(f) => {
316 testfn: StaticBenchFn(f),
317 desc: t.desc.clone(),
320 _ => panic!("non-static tests passed to test::test_main_static"),
324 test_main(&args, owned_tests, Options::new())
327 #[derive(Copy, Clone, Debug)]
328 pub enum ColorConfig {
335 pub struct TestOpts {
337 pub filter: Option<String>,
338 pub filter_exact: bool,
339 pub run_ignored: bool,
341 pub bench_benchmarks: bool,
342 pub logfile: Option<PathBuf>,
344 pub color: ColorConfig,
346 pub test_threads: Option<usize>,
347 pub skip: Vec<String>,
348 pub options: Options,
353 fn new() -> TestOpts {
360 bench_benchmarks: false,
367 options: Options::new(),
372 /// Result of parsing the options.
373 pub type OptRes = Result<TestOpts, String>;
375 #[cfg_attr(rustfmt, rustfmt_skip)]
376 fn optgroups() -> Vec<getopts::OptGroup> {
377 vec![getopts::optflag("", "ignored", "Run ignored tests"),
378 getopts::optflag("", "test", "Run tests and not benchmarks"),
379 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
380 getopts::optflag("", "list", "List all tests and benchmarks"),
381 getopts::optflag("h", "help", "Display this message (longer with --help)"),
382 getopts::optopt("", "logfile", "Write logs to the specified file instead \
384 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
385 task, allow printing directly"),
386 getopts::optopt("", "test-threads", "Number of threads used for running tests \
387 in parallel", "n_threads"),
388 getopts::optmulti("", "skip", "Skip tests whose names contain FILTER (this flag can \
389 be used multiple times)","FILTER"),
390 getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
391 getopts::optflag("", "exact", "Exactly match filters rather than by substring"),
392 getopts::optopt("", "color", "Configure coloring of output:
393 auto = colorize if stdout is a tty and tests are run on serially (default);
394 always = always colorize output;
395 never = never colorize output;", "auto|always|never")]
398 fn usage(binary: &str) {
399 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
402 The FILTER string is tested against the name of all tests, and only those
403 tests whose names contain the filter are run.
405 By default, all tests are run in parallel. This can be altered with the
406 --test-threads flag or the RUST_TEST_THREADS environment variable when running
409 All tests have their standard output and standard error captured by default.
410 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
411 environment variable to a value other than "0". Logging is not captured by default.
415 #[test] - Indicates a function is a test to be run. This function
417 #[bench] - Indicates a function is a benchmark to be run. This
418 function takes one argument (test::Bencher).
419 #[should_panic] - This function (also labeled with #[test]) will only pass if
420 the code causes a panic (an assertion failure or panic!)
421 A message may be provided, which the failure string must
422 contain: #[should_panic(expected = "foo")].
423 #[ignore] - When applied to a function which is already attributed as a
424 test, then the test runner will ignore these tests during
425 normal test runs. Running with --ignored will run these
427 usage = getopts::usage(&message, &optgroups()));
430 // Parses command line arguments into test options
431 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
432 let args_ = &args[1..];
433 let matches = match getopts::getopts(args_, &optgroups()) {
435 Err(f) => return Some(Err(f.to_string())),
438 if matches.opt_present("h") {
443 let filter = if !matches.free.is_empty() {
444 Some(matches.free[0].clone())
449 let run_ignored = matches.opt_present("ignored");
450 let quiet = matches.opt_present("quiet");
451 let exact = matches.opt_present("exact");
452 let list = matches.opt_present("list");
454 let logfile = matches.opt_str("logfile");
455 let logfile = logfile.map(|s| PathBuf::from(&s));
457 let bench_benchmarks = matches.opt_present("bench");
458 let run_tests = !bench_benchmarks || matches.opt_present("test");
460 let mut nocapture = matches.opt_present("nocapture");
462 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
463 Ok(val) => &val != "0",
468 let test_threads = match matches.opt_str("test-threads") {
470 match n_str.parse::<usize>() {
472 return Some(Err(format!("argument for --test-threads must not be 0"))),
475 return Some(Err(format!("argument for --test-threads must be a number > 0 \
482 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
483 Some("auto") | None => AutoColor,
484 Some("always") => AlwaysColor,
485 Some("never") => NeverColor,
488 return Some(Err(format!("argument for --color must be auto, always, or never (was \
494 let test_opts = TestOpts {
498 run_ignored: run_ignored,
499 run_tests: run_tests,
500 bench_benchmarks: bench_benchmarks,
502 nocapture: nocapture,
505 test_threads: test_threads,
506 skip: matches.opt_strs("skip"),
507 options: Options::new(),
513 #[derive(Clone, PartialEq)]
514 pub struct BenchSamples {
515 ns_iter_summ: stats::Summary,
519 #[derive(Clone, PartialEq)]
520 pub enum TestResult {
525 TrMetrics(MetricMap),
526 TrBench(BenchSamples),
529 unsafe impl Send for TestResult {}
531 enum OutputLocation<T> {
532 Pretty(Box<term::StdoutTerminal>),
536 struct ConsoleTestState<T> {
537 log_out: Option<File>,
538 out: OutputLocation<T>,
547 failures: Vec<(TestDesc, Vec<u8>)>,
548 not_failures: Vec<(TestDesc, Vec<u8>)>,
549 max_name_len: usize, // number of columns to fill when aligning names
553 impl<T: Write> ConsoleTestState<T> {
554 pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
555 let log_out = match opts.logfile {
556 Some(ref path) => Some(File::create(path)?),
559 let out = match term::stdout() {
560 None => Raw(io::stdout()),
561 Some(t) => Pretty(t),
564 Ok(ConsoleTestState {
567 use_color: use_color(opts),
574 metrics: MetricMap::new(),
575 failures: Vec::new(),
576 not_failures: Vec::new(),
578 options: opts.options,
582 pub fn write_ok(&mut self) -> io::Result<()> {
583 self.write_short_result("ok", ".", term::color::GREEN)
586 pub fn write_failed(&mut self) -> io::Result<()> {
587 self.write_short_result("FAILED", "F", term::color::RED)
590 pub fn write_ignored(&mut self) -> io::Result<()> {
591 self.write_short_result("ignored", "i", term::color::YELLOW)
594 pub fn write_metric(&mut self) -> io::Result<()> {
595 self.write_pretty("metric", term::color::CYAN)
598 pub fn write_bench(&mut self) -> io::Result<()> {
599 self.write_pretty("bench", term::color::CYAN)
602 pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
605 self.write_pretty(quiet, color)
607 self.write_pretty(verbose, color)?;
608 self.write_plain("\n")
612 pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
614 Pretty(ref mut term) => {
618 term.write_all(word.as_bytes())?;
624 Raw(ref mut stdout) => {
625 stdout.write_all(word.as_bytes())?;
631 pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> {
634 Pretty(ref mut term) => {
635 term.write_all(s.as_bytes())?;
638 Raw(ref mut stdout) => {
639 stdout.write_all(s.as_bytes())?;
645 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
647 let noun = if len != 1 {
652 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
655 pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
656 if self.quiet && align != PadOnRight {
659 let name = test.padded_name(self.max_name_len, align);
660 self.write_plain(&format!("test {} ... ", name))
664 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
666 TrOk => self.write_ok(),
667 TrFailed | TrFailedMsg(_) => self.write_failed(),
668 TrIgnored => self.write_ignored(),
669 TrMetrics(ref mm) => {
670 self.write_metric()?;
671 self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
675 self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
680 pub fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
681 self.write_plain(&format!("test {} has been running for over {} seconds\n",
683 TEST_WARN_TIMEOUT_S))
686 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
687 let msg = msg.as_ref();
690 Some(ref mut o) => o.write_all(msg.as_bytes()),
694 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
698 TrOk => "ok".to_owned(),
699 TrFailed => "failed".to_owned(),
700 TrFailedMsg(ref msg) => format!("failed: {}", msg),
701 TrIgnored => "ignored".to_owned(),
702 TrMetrics(ref mm) => mm.fmt_metrics(),
703 TrBench(ref bs) => fmt_bench_samples(bs),
708 pub fn write_failures(&mut self) -> io::Result<()> {
709 self.write_plain("\nfailures:\n")?;
710 let mut failures = Vec::new();
711 let mut fail_out = String::new();
712 for &(ref f, ref stdout) in &self.failures {
713 failures.push(f.name.to_string());
714 if !stdout.is_empty() {
715 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
716 let output = String::from_utf8_lossy(stdout);
717 fail_out.push_str(&output);
718 fail_out.push_str("\n");
721 if !fail_out.is_empty() {
722 self.write_plain("\n")?;
723 self.write_plain(&fail_out)?;
726 self.write_plain("\nfailures:\n")?;
728 for name in &failures {
729 self.write_plain(&format!(" {}\n", name))?;
734 pub fn write_outputs(&mut self) -> io::Result<()> {
735 self.write_plain("\nsuccesses:\n")?;
736 let mut successes = Vec::new();
737 let mut stdouts = String::new();
738 for &(ref f, ref stdout) in &self.not_failures {
739 successes.push(f.name.to_string());
740 if !stdout.is_empty() {
741 stdouts.push_str(&format!("---- {} stdout ----\n\t", f.name));
742 let output = String::from_utf8_lossy(stdout);
743 stdouts.push_str(&output);
744 stdouts.push_str("\n");
747 if !stdouts.is_empty() {
748 self.write_plain("\n")?;
749 self.write_plain(&stdouts)?;
752 self.write_plain("\nsuccesses:\n")?;
754 for name in &successes {
755 self.write_plain(&format!(" {}\n", name))?;
760 pub fn write_run_finish(&mut self) -> io::Result<bool> {
761 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
763 if self.options.display_output {
764 self.write_outputs()?;
766 let success = self.failed == 0;
768 self.write_failures()?;
771 self.write_plain("\ntest result: ")?;
773 // There's no parallelism at this point so it's safe to use color
774 self.write_pretty("ok", term::color::GREEN)?;
776 self.write_pretty("FAILED", term::color::RED)?;
778 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
783 self.write_plain(&s)?;
788 // Format a number with thousands separators
789 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
791 let mut output = String::new();
792 let mut trailing = false;
793 for &pow in &[9, 6, 3, 0] {
794 let base = 10_usize.pow(pow);
795 if pow == 0 || trailing || n / base != 0 {
797 output.write_fmt(format_args!("{}", n / base)).unwrap();
799 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
812 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
814 let mut output = String::new();
816 let median = bs.ns_iter_summ.median as usize;
817 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
819 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
820 fmt_thousands_sep(median, ','),
821 fmt_thousands_sep(deviation, ',')))
824 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
829 // List the tests to console, and optionally to logfile. Filters are honored.
830 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
831 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
837 for test in filter_tests(&opts, tests) {
840 let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
842 let fntype = match testfn {
843 StaticTestFn(..) | DynTestFn(..) => { ntest += 1; "test" },
844 StaticBenchFn(..) | DynBenchFn(..) => { nbench += 1; "benchmark" },
845 StaticMetricFn(..) | DynMetricFn(..) => { nmetric += 1; "metric" },
848 st.write_plain(format!("{}: {}\n", name, fntype))?;
849 st.write_log(format!("{} {}\n", fntype, name))?;
852 fn plural(count: u32, s: &str) -> String {
854 1 => format!("{} {}", 1, s),
855 n => format!("{} {}s", n, s),
860 if ntest != 0 || nbench != 0 || nmetric != 0 {
861 st.write_plain("\n")?;
863 st.write_plain(format!("{}, {}, {}\n",
864 plural(ntest, "test"),
865 plural(nbench, "benchmark"),
866 plural(nmetric, "metric")))?;
872 // A simple console test runner
873 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
875 fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
876 match (*event).clone() {
877 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
878 TeWait(ref test, padding) => st.write_test_start(test, padding),
879 TeTimeout(ref test) => st.write_timeout(test),
880 TeResult(test, result, stdout) => {
881 st.write_log_result(&test, &result)?;
882 st.write_result(&result)?;
886 st.not_failures.push((test, stdout));
888 TrIgnored => st.ignored += 1,
890 let tname = test.name;
891 let MetricMap(mm) = mm;
894 .insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
899 st.metrics.insert_metric(test.name.as_slice(),
900 bs.ns_iter_summ.median,
901 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
906 st.failures.push((test, stdout));
908 TrFailedMsg(msg) => {
910 let mut stdout = stdout;
911 stdout.extend_from_slice(
912 format!("note: {}", msg).as_bytes()
914 st.failures.push((test, stdout));
922 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
923 fn len_if_padded(t: &TestDescAndFn) -> usize {
924 match t.testfn.padding() {
926 PadOnRight => t.desc.name.as_slice().len(),
929 if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
930 let n = t.desc.name.as_slice();
931 st.max_name_len = n.len();
933 run_tests(opts, tests, |x| callback(&x, &mut st))?;
934 return st.write_run_finish();
938 fn should_sort_failures_before_printing_them() {
939 let test_a = TestDesc {
940 name: StaticTestName("a"),
942 should_panic: ShouldPanic::No,
945 let test_b = TestDesc {
946 name: StaticTestName("b"),
948 should_panic: ShouldPanic::No,
951 let mut st = ConsoleTestState {
953 out: Raw(Vec::new()),
962 metrics: MetricMap::new(),
963 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
964 options: Options::new(),
965 not_failures: Vec::new(),
968 st.write_failures().unwrap();
969 let s = match st.out {
970 Raw(ref m) => String::from_utf8_lossy(&m[..]),
971 Pretty(_) => unreachable!(),
974 let apos = s.find("a").unwrap();
975 let bpos = s.find("b").unwrap();
976 assert!(apos < bpos);
979 fn use_color(opts: &TestOpts) -> bool {
981 AutoColor => !opts.nocapture && stdout_isatty(),
987 #[cfg(target_os = "redox")]
988 fn stdout_isatty() -> bool {
989 // FIXME: Implement isatty on Redox
993 fn stdout_isatty() -> bool {
994 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
997 fn stdout_isatty() -> bool {
1000 type HANDLE = *mut u8;
1001 type LPDWORD = *mut u32;
1002 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1004 fn GetStdHandle(which: DWORD) -> HANDLE;
1005 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1008 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1010 GetConsoleMode(handle, &mut out) != 0
1015 pub enum TestEvent {
1016 TeFiltered(Vec<TestDesc>),
1017 TeWait(TestDesc, NamePadding),
1018 TeResult(TestDesc, TestResult, Vec<u8>),
1019 TeTimeout(TestDesc),
1022 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1025 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1026 where F: FnMut(TestEvent) -> io::Result<()>
1028 use std::collections::HashMap;
1029 use std::sync::mpsc::RecvTimeoutError;
1031 let mut filtered_tests = filter_tests(opts, tests);
1032 if !opts.bench_benchmarks {
1033 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1036 let filtered_descs = filtered_tests.iter()
1037 .map(|t| t.desc.clone())
1040 callback(TeFiltered(filtered_descs))?;
1042 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
1043 filtered_tests.into_iter().partition(|e| {
1045 StaticTestFn(_) | DynTestFn(_) => true,
1050 let concurrency = match opts.test_threads {
1052 None => get_concurrency(),
1055 let mut remaining = filtered_tests;
1056 remaining.reverse();
1057 let mut pending = 0;
1059 let (tx, rx) = channel::<MonitorMsg>();
1061 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1063 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1064 let now = Instant::now();
1065 let timed_out = running_tests.iter()
1066 .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone())} else { None })
1068 for test in &timed_out {
1069 running_tests.remove(test);
1074 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1075 running_tests.values().min().map(|next_timeout| {
1076 let now = Instant::now();
1077 if *next_timeout >= now {
1084 while pending > 0 || !remaining.is_empty() {
1085 while pending < concurrency && !remaining.is_empty() {
1086 let test = remaining.pop().unwrap();
1087 if concurrency == 1 {
1088 // We are doing one test at a time so we can print the name
1089 // of the test before we run it. Useful for debugging tests
1090 // that hang forever.
1091 callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
1093 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1094 running_tests.insert(test.desc.clone(), timeout);
1095 run_test(opts, !opts.run_tests, test, tx.clone());
1101 if let Some(timeout) = calc_timeout(&running_tests) {
1102 res = rx.recv_timeout(timeout);
1103 for test in get_timed_out_tests(&mut running_tests) {
1104 callback(TeTimeout(test))?;
1106 if res != Err(RecvTimeoutError::Timeout) {
1110 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1115 let (desc, result, stdout) = res.unwrap();
1116 running_tests.remove(&desc);
1118 if concurrency != 1 {
1119 callback(TeWait(desc.clone(), PadNone))?;
1121 callback(TeResult(desc, result, stdout))?;
1125 if opts.bench_benchmarks {
1126 // All benchmarks run at the end, in serial.
1127 // (this includes metric fns)
1128 for b in filtered_benchs_and_metrics {
1129 callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
1130 run_test(opts, false, b, tx.clone());
1131 let (test, result, stdout) = rx.recv().unwrap();
1132 callback(TeResult(test, result, stdout))?;
1138 #[allow(deprecated)]
1139 fn get_concurrency() -> usize {
1140 return match env::var("RUST_TEST_THREADS") {
1142 let opt_n: Option<usize> = s.parse().ok();
1144 Some(n) if n > 0 => n,
1146 panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
1151 Err(..) => num_cpus(),
1156 fn num_cpus() -> usize {
1158 struct SYSTEM_INFO {
1159 wProcessorArchitecture: u16,
1162 lpMinimumApplicationAddress: *mut u8,
1163 lpMaximumApplicationAddress: *mut u8,
1164 dwActiveProcessorMask: *mut u8,
1165 dwNumberOfProcessors: u32,
1166 dwProcessorType: u32,
1167 dwAllocationGranularity: u32,
1168 wProcessorLevel: u16,
1169 wProcessorRevision: u16,
1172 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1175 let mut sysinfo = std::mem::zeroed();
1176 GetSystemInfo(&mut sysinfo);
1177 sysinfo.dwNumberOfProcessors as usize
1181 #[cfg(target_os = "redox")]
1182 fn num_cpus() -> usize {
1183 // FIXME: Implement num_cpus on Redox
1187 #[cfg(any(target_os = "linux",
1188 target_os = "macos",
1190 target_os = "android",
1191 target_os = "solaris",
1192 target_os = "emscripten",
1193 target_os = "fuchsia"))]
1194 fn num_cpus() -> usize {
1195 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1198 #[cfg(any(target_os = "freebsd",
1199 target_os = "dragonfly",
1200 target_os = "bitrig",
1201 target_os = "netbsd"))]
1202 fn num_cpus() -> usize {
1205 let mut cpus: libc::c_uint = 0;
1206 let mut cpus_size = std::mem::size_of_val(&cpus);
1209 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1212 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1214 libc::sysctl(mib.as_mut_ptr(),
1216 &mut cpus as *mut _ as *mut _,
1217 &mut cpus_size as *mut _ as *mut _,
1228 #[cfg(target_os = "openbsd")]
1229 fn num_cpus() -> usize {
1232 let mut cpus: libc::c_uint = 0;
1233 let mut cpus_size = std::mem::size_of_val(&cpus);
1234 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1237 libc::sysctl(mib.as_mut_ptr(),
1239 &mut cpus as *mut _ as *mut _,
1240 &mut cpus_size as *mut _ as *mut _,
1250 #[cfg(target_os = "haiku")]
1251 fn num_cpus() -> usize {
1257 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1258 let mut filtered = tests;
1260 // Remove tests that don't match the test filter
1261 filtered = match opts.filter {
1263 Some(ref filter) => {
1264 filtered.into_iter()
1266 if opts.filter_exact {
1267 test.desc.name.as_slice() == &filter[..]
1269 test.desc.name.as_slice().contains(&filter[..])
1276 // Skip tests that match any of the skip filters
1277 filtered = filtered.into_iter()
1278 .filter(|t| !opts.skip.iter().any(|sf| {
1279 if opts.filter_exact {
1280 t.desc.name.as_slice() == &sf[..]
1282 t.desc.name.as_slice().contains(&sf[..])
1287 // Maybe pull out the ignored test and unignore them
1288 filtered = if !opts.run_ignored {
1291 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1292 if test.desc.ignore {
1293 let TestDescAndFn {desc, testfn} = test;
1294 Some(TestDescAndFn {
1295 desc: TestDesc { ignore: false, ..desc },
1302 filtered.into_iter().filter_map(filter).collect()
1305 // Sort the tests alphabetically
1306 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1311 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1312 // convert benchmarks to tests, if we're not benchmarking them
1313 tests.into_iter().map(|x| {
1314 let testfn = match x.testfn {
1315 DynBenchFn(bench) => {
1316 DynTestFn(Box::new(move |()| {
1317 bench::run_once(|b| {
1318 __rust_begin_short_backtrace(|| bench.run(b))
1322 StaticBenchFn(benchfn) => {
1323 DynTestFn(Box::new(move |()| {
1324 bench::run_once(|b| {
1325 __rust_begin_short_backtrace(|| benchfn(b))
1338 pub fn run_test(opts: &TestOpts,
1340 test: TestDescAndFn,
1341 monitor_ch: Sender<MonitorMsg>) {
1343 let TestDescAndFn {desc, testfn} = test;
1345 if force_ignore || desc.ignore {
1346 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1350 fn run_test_inner(desc: TestDesc,
1351 monitor_ch: Sender<MonitorMsg>,
1353 testfn: Box<FnBox<()>>) {
1354 struct Sink(Arc<Mutex<Vec<u8>>>);
1355 impl Write for Sink {
1356 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1357 Write::write(&mut *self.0.lock().unwrap(), data)
1359 fn flush(&mut self) -> io::Result<()> {
1364 // Buffer for capturing standard I/O
1365 let data = Arc::new(Mutex::new(Vec::new()));
1366 let data2 = data.clone();
1368 let name = desc.name.clone();
1369 let runtest = move || {
1370 let oldio = if !nocapture {
1372 io::set_print(Some(Box::new(Sink(data2.clone())))),
1373 io::set_panic(Some(Box::new(Sink(data2))))
1379 let result = catch_unwind(AssertUnwindSafe(|| {
1383 if let Some((printio, panicio)) = oldio {
1384 io::set_print(printio);
1385 io::set_panic(panicio);
1388 let test_result = calc_result(&desc, result);
1389 let stdout = data.lock().unwrap().to_vec();
1390 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1394 // If the platform is single-threaded we're just going to run
1395 // the test synchronously, regardless of the concurrency
1397 let supports_threads = !cfg!(target_os = "emscripten");
1398 if supports_threads {
1399 let cfg = thread::Builder::new().name(match name {
1400 DynTestName(ref name) => name.clone(),
1401 StaticTestName(name) => name.to_owned(),
1403 cfg.spawn(runtest).unwrap();
1410 DynBenchFn(bencher) => {
1411 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1412 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1415 StaticBenchFn(benchfn) => {
1416 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1417 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1421 let mut mm = MetricMap::new();
1422 f.call_box(&mut mm);
1423 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1426 StaticMetricFn(f) => {
1427 let mut mm = MetricMap::new();
1429 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1433 let cb = move |()| {
1434 __rust_begin_short_backtrace(|| f.call_box(()))
1436 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1439 run_test_inner(desc, monitor_ch, opts.nocapture,
1440 Box::new(move |()| __rust_begin_short_backtrace(f))),
1444 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1446 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1450 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
1451 match (&desc.should_panic, task_result) {
1452 (&ShouldPanic::No, Ok(())) |
1453 (&ShouldPanic::Yes, Err(_)) => TrOk,
1454 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) =>
1455 if err.downcast_ref::<String>()
1457 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1458 .map(|e| e.contains(msg))
1462 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1469 pub fn new() -> MetricMap {
1470 MetricMap(BTreeMap::new())
1473 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1474 /// must be non-negative. The `noise` indicates the uncertainty of the
1475 /// metric, which doubles as the "noise range" of acceptable
1476 /// pairwise-regressions on this named value, when comparing from one
1477 /// metric to the next using `compare_to_old`.
1479 /// If `noise` is positive, then it means this metric is of a value
1480 /// you want to see grow smaller, so a change larger than `noise` in the
1481 /// positive direction represents a regression.
1483 /// If `noise` is negative, then it means this metric is of a value
1484 /// you want to see grow larger, so a change larger than `noise` in the
1485 /// negative direction represents a regression.
1486 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1491 let MetricMap(ref mut map) = *self;
1492 map.insert(name.to_owned(), m);
1495 pub fn fmt_metrics(&self) -> String {
1496 let MetricMap(ref mm) = *self;
1497 let v: Vec<String> = mm.iter()
1498 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1507 /// A function that is opaque to the optimizer, to allow benchmarks to
1508 /// pretend to use outputs to assist in avoiding dead-code
1511 /// This function is a no-op, and does not even read from `dummy`.
1512 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1513 target_arch = "asmjs", target_arch = "wasm32")))]
1514 pub fn black_box<T>(dummy: T) -> T {
1515 // we need to "use" the argument in some way LLVM can't
1517 unsafe { asm!("" : : "r"(&dummy)) }
1520 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1521 target_arch = "asmjs", target_arch = "wasm32"))]
1523 pub fn black_box<T>(dummy: T) -> T {
1529 /// Callback for benchmark functions to run in their body.
1530 pub fn iter<T, F>(&mut self, mut inner: F)
1531 where F: FnMut() -> T
1533 if self.mode == BenchMode::Single {
1534 ns_iter_inner(&mut inner, 1);
1538 self.summary = Some(iter(&mut inner));
1541 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1542 where F: FnMut(&mut Bencher)
1545 return self.summary;
1549 fn ns_from_dur(dur: Duration) -> u64 {
1550 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1553 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1554 where F: FnMut() -> T
1556 let start = Instant::now();
1560 return ns_from_dur(start.elapsed());
1564 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1565 where F: FnMut() -> T
1567 // Initial bench run to get ballpark figure.
1568 let ns_single = ns_iter_inner(inner, 1);
1570 // Try to estimate iter count for 1ms falling back to 1m
1571 // iterations if first run took < 1ns.
1572 let ns_target_total = 1_000_000; // 1ms
1573 let mut n = ns_target_total / cmp::max(1, ns_single);
1575 // if the first run took more than 1ms we don't want to just
1576 // be left doing 0 iterations on every loop. The unfortunate
1577 // side effect of not being able to do as many runs is
1578 // automatically handled by the statistical analysis below
1579 // (i.e. larger error bars).
1582 let mut total_run = Duration::new(0, 0);
1583 let samples: &mut [f64] = &mut [0.0_f64; 50];
1585 let loop_start = Instant::now();
1587 for p in &mut *samples {
1588 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1591 stats::winsorize(samples, 5.0);
1592 let summ = stats::Summary::new(samples);
1594 for p in &mut *samples {
1595 let ns = ns_iter_inner(inner, 5 * n);
1596 *p = ns as f64 / (5 * n) as f64;
1599 stats::winsorize(samples, 5.0);
1600 let summ5 = stats::Summary::new(samples);
1602 let loop_run = loop_start.elapsed();
1604 // If we've run for 100ms and seem to have converged to a
1606 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
1607 summ.median - summ5.median < summ5.median_abs_dev {
1611 total_run = total_run + loop_run;
1612 // Longest we ever run for is 3s.
1613 if total_run > Duration::from_secs(3) {
1617 // If we overflow here just return the results so far. We check a
1618 // multiplier of 10 because we're about to multiply by 2 and the
1619 // next iteration of the loop will also multiply by 5 (to calculate
1620 // the summ5 result)
1621 n = match n.checked_mul(10) {
1633 use super::{Bencher, BenchSamples, BenchMode};
1635 pub fn benchmark<F>(f: F) -> BenchSamples
1636 where F: FnMut(&mut Bencher)
1638 let mut bs = Bencher {
1639 mode: BenchMode::Auto,
1644 return match bs.bench(f) {
1645 Some(ns_iter_summ) => {
1646 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1647 let mb_s = bs.bytes * 1000 / ns_iter;
1650 ns_iter_summ: ns_iter_summ,
1651 mb_s: mb_s as usize,
1655 // iter not called, so no data.
1656 // FIXME: error in this case?
1657 let samples: &mut [f64] = &mut [0.0_f64; 1];
1659 ns_iter_summ: stats::Summary::new(samples),
1666 pub fn run_once<F>(f: F)
1667 where F: FnMut(&mut Bencher)
1669 let mut bs = Bencher {
1670 mode: BenchMode::Single,
1680 use test::{TrFailed, TrFailedMsg, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc,
1681 TestDescAndFn, TestOpts, run_test, MetricMap, StaticTestName, DynTestName,
1682 DynTestFn, ShouldPanic};
1683 use std::sync::mpsc::channel;
1688 pub fn do_not_run_ignored_tests() {
1692 let desc = TestDescAndFn {
1694 name: StaticTestName("whatever"),
1696 should_panic: ShouldPanic::No,
1698 testfn: DynTestFn(Box::new(move |()| f())),
1700 let (tx, rx) = channel();
1701 run_test(&TestOpts::new(), false, desc, tx);
1702 let (_, res, _) = rx.recv().unwrap();
1703 assert!(res != TrOk);
1707 pub fn ignored_tests_result_in_ignored() {
1709 let desc = TestDescAndFn {
1711 name: StaticTestName("whatever"),
1713 should_panic: ShouldPanic::No,
1715 testfn: DynTestFn(Box::new(move |()| f())),
1717 let (tx, rx) = channel();
1718 run_test(&TestOpts::new(), false, desc, tx);
1719 let (_, res, _) = rx.recv().unwrap();
1720 assert!(res == TrIgnored);
1724 fn test_should_panic() {
1728 let desc = TestDescAndFn {
1730 name: StaticTestName("whatever"),
1732 should_panic: ShouldPanic::Yes,
1734 testfn: DynTestFn(Box::new(move |()| f())),
1736 let (tx, rx) = channel();
1737 run_test(&TestOpts::new(), false, desc, tx);
1738 let (_, res, _) = rx.recv().unwrap();
1739 assert!(res == TrOk);
1743 fn test_should_panic_good_message() {
1745 panic!("an error message");
1747 let desc = TestDescAndFn {
1749 name: StaticTestName("whatever"),
1751 should_panic: ShouldPanic::YesWithMessage("error message"),
1753 testfn: DynTestFn(Box::new(move |()| f())),
1755 let (tx, rx) = channel();
1756 run_test(&TestOpts::new(), false, desc, tx);
1757 let (_, res, _) = rx.recv().unwrap();
1758 assert!(res == TrOk);
1762 fn test_should_panic_bad_message() {
1764 panic!("an error message");
1766 let expected = "foobar";
1767 let failed_msg = "Panic did not include expected string";
1768 let desc = TestDescAndFn {
1770 name: StaticTestName("whatever"),
1772 should_panic: ShouldPanic::YesWithMessage(expected),
1774 testfn: DynTestFn(Box::new(move |()| f())),
1776 let (tx, rx) = channel();
1777 run_test(&TestOpts::new(), false, desc, tx);
1778 let (_, res, _) = rx.recv().unwrap();
1779 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1783 fn test_should_panic_but_succeeds() {
1785 let desc = TestDescAndFn {
1787 name: StaticTestName("whatever"),
1789 should_panic: ShouldPanic::Yes,
1791 testfn: DynTestFn(Box::new(move |()| f())),
1793 let (tx, rx) = channel();
1794 run_test(&TestOpts::new(), false, desc, tx);
1795 let (_, res, _) = rx.recv().unwrap();
1796 assert!(res == TrFailed);
1800 fn parse_ignored_flag() {
1801 let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1802 let opts = match parse_opts(&args) {
1804 _ => panic!("Malformed arg in parse_ignored_flag"),
1806 assert!((opts.run_ignored));
1810 pub fn filter_for_ignored_option() {
1811 // When we run ignored tests the test filter should filter out all the
1812 // unignored tests and flip the ignore flag on the rest to false
1814 let mut opts = TestOpts::new();
1815 opts.run_tests = true;
1816 opts.run_ignored = true;
1818 let tests = vec![TestDescAndFn {
1820 name: StaticTestName("1"),
1822 should_panic: ShouldPanic::No,
1824 testfn: DynTestFn(Box::new(move |()| {})),
1828 name: StaticTestName("2"),
1830 should_panic: ShouldPanic::No,
1832 testfn: DynTestFn(Box::new(move |()| {})),
1834 let filtered = filter_tests(&opts, tests);
1836 assert_eq!(filtered.len(), 1);
1837 assert_eq!(filtered[0].desc.name.to_string(), "1");
1838 assert!(!filtered[0].desc.ignore);
1842 pub fn exact_filter_match() {
1843 fn tests() -> Vec<TestDescAndFn> {
1849 .map(|name| TestDescAndFn {
1851 name: StaticTestName(name),
1853 should_panic: ShouldPanic::No,
1855 testfn: DynTestFn(Box::new(move |()| {}))
1860 let substr = filter_tests(&TestOpts {
1861 filter: Some("base".into()),
1864 assert_eq!(substr.len(), 4);
1866 let substr = filter_tests(&TestOpts {
1867 filter: Some("bas".into()),
1870 assert_eq!(substr.len(), 4);
1872 let substr = filter_tests(&TestOpts {
1873 filter: Some("::test".into()),
1876 assert_eq!(substr.len(), 3);
1878 let substr = filter_tests(&TestOpts {
1879 filter: Some("base::test".into()),
1882 assert_eq!(substr.len(), 3);
1884 let exact = filter_tests(&TestOpts {
1885 filter: Some("base".into()),
1886 filter_exact: true, ..TestOpts::new()
1888 assert_eq!(exact.len(), 1);
1890 let exact = filter_tests(&TestOpts {
1891 filter: Some("bas".into()),
1895 assert_eq!(exact.len(), 0);
1897 let exact = filter_tests(&TestOpts {
1898 filter: Some("::test".into()),
1902 assert_eq!(exact.len(), 0);
1904 let exact = filter_tests(&TestOpts {
1905 filter: Some("base::test".into()),
1909 assert_eq!(exact.len(), 1);
1913 pub fn sort_tests() {
1914 let mut opts = TestOpts::new();
1915 opts.run_tests = true;
1917 let names = vec!["sha1::test".to_string(),
1918 "isize::test_to_str".to_string(),
1919 "isize::test_pow".to_string(),
1920 "test::do_not_run_ignored_tests".to_string(),
1921 "test::ignored_tests_result_in_ignored".to_string(),
1922 "test::first_free_arg_should_be_a_filter".to_string(),
1923 "test::parse_ignored_flag".to_string(),
1924 "test::filter_for_ignored_option".to_string(),
1925 "test::sort_tests".to_string()];
1928 let mut tests = Vec::new();
1929 for name in &names {
1930 let test = TestDescAndFn {
1932 name: DynTestName((*name).clone()),
1934 should_panic: ShouldPanic::No,
1936 testfn: DynTestFn(Box::new(move |()| testfn())),
1942 let filtered = filter_tests(&opts, tests);
1944 let expected = vec!["isize::test_pow".to_string(),
1945 "isize::test_to_str".to_string(),
1946 "sha1::test".to_string(),
1947 "test::do_not_run_ignored_tests".to_string(),
1948 "test::filter_for_ignored_option".to_string(),
1949 "test::first_free_arg_should_be_a_filter".to_string(),
1950 "test::ignored_tests_result_in_ignored".to_string(),
1951 "test::parse_ignored_flag".to_string(),
1952 "test::sort_tests".to_string()];
1954 for (a, b) in expected.iter().zip(filtered) {
1955 assert!(*a == b.desc.name.to_string());
1960 pub fn test_metricmap_compare() {
1961 let mut m1 = MetricMap::new();
1962 let mut m2 = MetricMap::new();
1963 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1964 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1966 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1967 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1969 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1970 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1972 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1973 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1975 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1976 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1978 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1979 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1983 pub fn test_bench_once_no_iter() {
1984 fn f(_: &mut Bencher) {}
1989 pub fn test_bench_once_iter() {
1990 fn f(b: &mut Bencher) {
1998 pub fn test_bench_no_iter() {
1999 fn f(_: &mut Bencher) {}
2000 bench::benchmark(f);
2004 pub fn test_bench_iter() {
2005 fn f(b: &mut Bencher) {
2009 bench::benchmark(f);