1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
38 #![feature(rustc_private)]
39 #![feature(set_stdio)]
40 #![feature(staged_api)]
41 #![feature(panic_unwind)]
46 extern crate panic_unwind;
48 pub use self::TestFn::*;
49 pub use self::ColorConfig::*;
50 pub use self::TestResult::*;
51 pub use self::TestName::*;
52 use self::TestEvent::*;
53 use self::NamePadding::*;
54 use self::OutputLocation::*;
56 use std::panic::{catch_unwind, AssertUnwindSafe};
59 use std::collections::BTreeMap;
63 use std::io::prelude::*;
65 use std::iter::repeat;
66 use std::path::PathBuf;
67 use std::sync::mpsc::{channel, Sender};
68 use std::sync::{Arc, Mutex};
70 use std::time::{Instant, Duration};
72 const TEST_WARN_TIMEOUT_S: u64 = 60;
74 // to be used by rustc to compile tests in libtest
76 pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
77 TrFailedMsg, TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName,
78 DynTestName, DynTestFn, run_test, test_main, test_main_static, filter_tests,
79 parse_opts, StaticBenchFn, ShouldPanic, Options};
84 // The name of a test. By convention this follows the rules for rust
85 // paths; i.e. it should be a series of identifiers separated by double
86 // colons. This way if some test runner wants to arrange the tests
87 // hierarchically it may.
89 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
91 StaticTestName(&'static str),
95 fn as_slice(&self) -> &str {
97 StaticTestName(s) => s,
98 DynTestName(ref s) => s,
102 impl fmt::Display for TestName {
103 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
104 fmt::Display::fmt(self.as_slice(), f)
108 #[derive(Clone, Copy, PartialEq, Eq)]
109 pub enum NamePadding {
115 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
116 let mut name = String::from(self.name.as_slice());
117 let fill = column_count.saturating_sub(name.len());
118 let pad = repeat(" ").take(fill).collect::<String>();
129 /// Represents a benchmark function.
130 pub trait TDynBenchFn: Send {
131 fn run(&self, harness: &mut Bencher);
134 pub trait FnBox<T>: Send + 'static {
135 fn call_box(self: Box<Self>, t: T);
138 impl<T, F: FnOnce(T) + Send + 'static> FnBox<T> for F {
139 fn call_box(self: Box<F>, t: T) {
144 // A function that runs a test. If the function returns successfully,
145 // the test succeeds; if the function panics then the test fails. We
146 // may need to come up with a more clever definition of test in order
147 // to support isolation of tests into threads.
150 StaticBenchFn(fn(&mut Bencher)),
151 StaticMetricFn(fn(&mut MetricMap)),
152 DynTestFn(Box<FnBox<()>>),
153 DynMetricFn(Box<for<'a> FnBox<&'a mut MetricMap>>),
154 DynBenchFn(Box<TDynBenchFn + 'static>),
158 fn padding(&self) -> NamePadding {
160 StaticTestFn(..) => PadNone,
161 StaticBenchFn(..) => PadOnRight,
162 StaticMetricFn(..) => PadOnRight,
163 DynTestFn(..) => PadNone,
164 DynMetricFn(..) => PadOnRight,
165 DynBenchFn(..) => PadOnRight,
170 impl fmt::Debug for TestFn {
171 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
172 f.write_str(match *self {
173 StaticTestFn(..) => "StaticTestFn(..)",
174 StaticBenchFn(..) => "StaticBenchFn(..)",
175 StaticMetricFn(..) => "StaticMetricFn(..)",
176 DynTestFn(..) => "DynTestFn(..)",
177 DynMetricFn(..) => "DynMetricFn(..)",
178 DynBenchFn(..) => "DynBenchFn(..)",
183 /// Manager of the benchmarking runs.
185 /// This is fed into functions marked with `#[bench]` to allow for
186 /// set-up & tear-down before running a piece of code repeatedly via a
191 summary: Option<stats::Summary>,
195 #[derive(Clone, PartialEq, Eq)]
201 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
202 pub enum ShouldPanic {
205 YesWithMessage(&'static str),
208 // The definition of a single test. A test runner will run a list of
210 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
211 pub struct TestDesc {
214 pub should_panic: ShouldPanic,
218 pub struct TestPaths {
219 pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
220 pub base: PathBuf, // e.g., compile-test, auxiliary
221 pub relative_dir: PathBuf, // e.g., foo/bar
225 pub struct TestDescAndFn {
230 #[derive(Clone, PartialEq, Debug, Copy)]
237 pub fn new(value: f64, noise: f64) -> Metric {
246 pub struct MetricMap(BTreeMap<String, Metric>);
248 impl Clone for MetricMap {
249 fn clone(&self) -> MetricMap {
250 let MetricMap(ref map) = *self;
251 MetricMap(map.clone())
255 /// In case we want to add other options as well, just add them in this struct.
256 #[derive(Copy, Clone, Debug)]
258 display_output: bool,
262 pub fn new() -> Options {
264 display_output: false,
268 pub fn display_output(mut self, display_output: bool) -> Options {
269 self.display_output = display_output;
274 // The default console test runner. It accepts the command line
275 // arguments and a vector of test_descs.
276 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Options) {
277 let mut opts = match parse_opts(args) {
279 Some(Err(msg)) => panic!("{:?}", msg),
282 opts.options = options;
284 if let Err(e) = list_tests_console(&opts, tests) {
285 panic!("io error when listing tests: {:?}", e);
288 match run_tests_console(&opts, tests) {
290 Ok(false) => std::process::exit(101),
291 Err(e) => panic!("io error when running tests: {:?}", e),
296 // A variant optimized for invocation with a static test vector.
297 // This will panic (intentionally) when fed any dynamic tests, because
298 // it is copying the static values out into a dynamic vector and cannot
299 // copy dynamic values. It is doing this because from this point on
300 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
301 // semantics into parallel test runners, which in turn requires a Vec<>
302 // rather than a &[].
303 pub fn test_main_static(tests: &[TestDescAndFn]) {
304 let args = env::args().collect::<Vec<_>>();
305 let owned_tests = tests.iter()
310 testfn: StaticTestFn(f),
311 desc: t.desc.clone(),
314 StaticBenchFn(f) => {
316 testfn: StaticBenchFn(f),
317 desc: t.desc.clone(),
320 _ => panic!("non-static tests passed to test::test_main_static"),
324 test_main(&args, owned_tests, Options::new())
327 #[derive(Copy, Clone, Debug)]
328 pub enum ColorConfig {
335 pub struct TestOpts {
337 pub filter: Option<String>,
338 pub filter_exact: bool,
339 pub run_ignored: bool,
341 pub bench_benchmarks: bool,
342 pub logfile: Option<PathBuf>,
344 pub color: ColorConfig,
346 pub test_threads: Option<usize>,
347 pub skip: Vec<String>,
348 pub options: Options,
353 fn new() -> TestOpts {
360 bench_benchmarks: false,
367 options: Options::new(),
372 /// Result of parsing the options.
373 pub type OptRes = Result<TestOpts, String>;
375 #[cfg_attr(rustfmt, rustfmt_skip)]
376 fn optgroups() -> Vec<getopts::OptGroup> {
377 vec![getopts::optflag("", "ignored", "Run ignored tests"),
378 getopts::optflag("", "test", "Run tests and not benchmarks"),
379 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
380 getopts::optflag("", "list", "List all tests and benchmarks"),
381 getopts::optflag("h", "help", "Display this message (longer with --help)"),
382 getopts::optopt("", "logfile", "Write logs to the specified file instead \
384 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
385 task, allow printing directly"),
386 getopts::optopt("", "test-threads", "Number of threads used for running tests \
387 in parallel", "n_threads"),
388 getopts::optmulti("", "skip", "Skip tests whose names contain FILTER (this flag can \
389 be used multiple times)","FILTER"),
390 getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
391 getopts::optflag("", "exact", "Exactly match filters rather than by substring"),
392 getopts::optopt("", "color", "Configure coloring of output:
393 auto = colorize if stdout is a tty and tests are run on serially (default);
394 always = always colorize output;
395 never = never colorize output;", "auto|always|never")]
398 fn usage(binary: &str) {
399 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
402 The FILTER string is tested against the name of all tests, and only those
403 tests whose names contain the filter are run.
405 By default, all tests are run in parallel. This can be altered with the
406 --test-threads flag or the RUST_TEST_THREADS environment variable when running
409 All tests have their standard output and standard error captured by default.
410 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
411 environment variable to a value other than "0". Logging is not captured by default.
415 #[test] - Indicates a function is a test to be run. This function
417 #[bench] - Indicates a function is a benchmark to be run. This
418 function takes one argument (test::Bencher).
419 #[should_panic] - This function (also labeled with #[test]) will only pass if
420 the code causes a panic (an assertion failure or panic!)
421 A message may be provided, which the failure string must
422 contain: #[should_panic(expected = "foo")].
423 #[ignore] - When applied to a function which is already attributed as a
424 test, then the test runner will ignore these tests during
425 normal test runs. Running with --ignored will run these
427 usage = getopts::usage(&message, &optgroups()));
430 // Parses command line arguments into test options
431 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
432 let args_ = &args[1..];
433 let matches = match getopts::getopts(args_, &optgroups()) {
435 Err(f) => return Some(Err(f.to_string())),
438 if matches.opt_present("h") {
443 let filter = if !matches.free.is_empty() {
444 Some(matches.free[0].clone())
449 let run_ignored = matches.opt_present("ignored");
450 let quiet = matches.opt_present("quiet");
451 let exact = matches.opt_present("exact");
452 let list = matches.opt_present("list");
454 let logfile = matches.opt_str("logfile");
455 let logfile = logfile.map(|s| PathBuf::from(&s));
457 let bench_benchmarks = matches.opt_present("bench");
458 let run_tests = !bench_benchmarks || matches.opt_present("test");
460 let mut nocapture = matches.opt_present("nocapture");
462 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
463 Ok(val) => &val != "0",
468 let test_threads = match matches.opt_str("test-threads") {
470 match n_str.parse::<usize>() {
472 return Some(Err(format!("argument for --test-threads must not be 0"))),
475 return Some(Err(format!("argument for --test-threads must be a number > 0 \
482 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
483 Some("auto") | None => AutoColor,
484 Some("always") => AlwaysColor,
485 Some("never") => NeverColor,
488 return Some(Err(format!("argument for --color must be auto, always, or never (was \
494 let test_opts = TestOpts {
498 run_ignored: run_ignored,
499 run_tests: run_tests,
500 bench_benchmarks: bench_benchmarks,
502 nocapture: nocapture,
505 test_threads: test_threads,
506 skip: matches.opt_strs("skip"),
507 options: Options::new(),
513 #[derive(Clone, PartialEq)]
514 pub struct BenchSamples {
515 ns_iter_summ: stats::Summary,
519 #[derive(Clone, PartialEq)]
520 pub enum TestResult {
525 TrMetrics(MetricMap),
526 TrBench(BenchSamples),
529 unsafe impl Send for TestResult {}
531 enum OutputLocation<T> {
532 Pretty(Box<term::StdoutTerminal>),
536 struct ConsoleTestState<T> {
537 log_out: Option<File>,
538 out: OutputLocation<T>,
548 failures: Vec<(TestDesc, Vec<u8>)>,
549 not_failures: Vec<(TestDesc, Vec<u8>)>,
550 max_name_len: usize, // number of columns to fill when aligning names
554 impl<T: Write> ConsoleTestState<T> {
555 pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
556 let log_out = match opts.logfile {
557 Some(ref path) => Some(File::create(path)?),
560 let out = match term::stdout() {
561 None => Raw(io::stdout()),
562 Some(t) => Pretty(t),
565 Ok(ConsoleTestState {
568 use_color: use_color(opts),
576 metrics: MetricMap::new(),
577 failures: Vec::new(),
578 not_failures: Vec::new(),
580 options: opts.options,
584 pub fn write_ok(&mut self) -> io::Result<()> {
585 self.write_short_result("ok", ".", term::color::GREEN)
588 pub fn write_failed(&mut self) -> io::Result<()> {
589 self.write_short_result("FAILED", "F", term::color::RED)
592 pub fn write_ignored(&mut self) -> io::Result<()> {
593 self.write_short_result("ignored", "i", term::color::YELLOW)
596 pub fn write_metric(&mut self) -> io::Result<()> {
597 self.write_pretty("metric", term::color::CYAN)
600 pub fn write_bench(&mut self) -> io::Result<()> {
601 self.write_pretty("bench", term::color::CYAN)
604 pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
607 self.write_pretty(quiet, color)
609 self.write_pretty(verbose, color)?;
610 self.write_plain("\n")
614 pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
616 Pretty(ref mut term) => {
620 term.write_all(word.as_bytes())?;
626 Raw(ref mut stdout) => {
627 stdout.write_all(word.as_bytes())?;
633 pub fn write_plain<S: AsRef<str>>(&mut self, s: S) -> io::Result<()> {
636 Pretty(ref mut term) => {
637 term.write_all(s.as_bytes())?;
640 Raw(ref mut stdout) => {
641 stdout.write_all(s.as_bytes())?;
647 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
649 let noun = if len != 1 {
654 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
657 pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
658 if self.quiet && align != PadOnRight {
661 let name = test.padded_name(self.max_name_len, align);
662 self.write_plain(&format!("test {} ... ", name))
666 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
668 TrOk => self.write_ok(),
669 TrFailed | TrFailedMsg(_) => self.write_failed(),
670 TrIgnored => self.write_ignored(),
671 TrMetrics(ref mm) => {
672 self.write_metric()?;
673 self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
677 self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
682 pub fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
683 self.write_plain(&format!("test {} has been running for over {} seconds\n",
685 TEST_WARN_TIMEOUT_S))
688 pub fn write_log<S: AsRef<str>>(&mut self, msg: S) -> io::Result<()> {
689 let msg = msg.as_ref();
692 Some(ref mut o) => o.write_all(msg.as_bytes()),
696 pub fn write_log_result(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
700 TrOk => "ok".to_owned(),
701 TrFailed => "failed".to_owned(),
702 TrFailedMsg(ref msg) => format!("failed: {}", msg),
703 TrIgnored => "ignored".to_owned(),
704 TrMetrics(ref mm) => mm.fmt_metrics(),
705 TrBench(ref bs) => fmt_bench_samples(bs),
710 pub fn write_failures(&mut self) -> io::Result<()> {
711 self.write_plain("\nfailures:\n")?;
712 let mut failures = Vec::new();
713 let mut fail_out = String::new();
714 for &(ref f, ref stdout) in &self.failures {
715 failures.push(f.name.to_string());
716 if !stdout.is_empty() {
717 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
718 let output = String::from_utf8_lossy(stdout);
719 fail_out.push_str(&output);
720 fail_out.push_str("\n");
723 if !fail_out.is_empty() {
724 self.write_plain("\n")?;
725 self.write_plain(&fail_out)?;
728 self.write_plain("\nfailures:\n")?;
730 for name in &failures {
731 self.write_plain(&format!(" {}\n", name))?;
736 pub fn write_outputs(&mut self) -> io::Result<()> {
737 self.write_plain("\nsuccesses:\n")?;
738 let mut successes = Vec::new();
739 let mut stdouts = String::new();
740 for &(ref f, ref stdout) in &self.not_failures {
741 successes.push(f.name.to_string());
742 if !stdout.is_empty() {
743 stdouts.push_str(&format!("---- {} stdout ----\n\t", f.name));
744 let output = String::from_utf8_lossy(stdout);
745 stdouts.push_str(&output);
746 stdouts.push_str("\n");
749 if !stdouts.is_empty() {
750 self.write_plain("\n")?;
751 self.write_plain(&stdouts)?;
754 self.write_plain("\nsuccesses:\n")?;
756 for name in &successes {
757 self.write_plain(&format!(" {}\n", name))?;
762 pub fn write_run_finish(&mut self) -> io::Result<bool> {
763 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
765 if self.options.display_output {
766 self.write_outputs()?;
768 let success = self.failed == 0;
770 self.write_failures()?;
773 self.write_plain("\ntest result: ")?;
775 // There's no parallelism at this point so it's safe to use color
776 self.write_pretty("ok", term::color::GREEN)?;
778 self.write_pretty("FAILED", term::color::RED)?;
780 let s = format!(". {} passed; {} failed; {} ignored; {} measured; {} filtered out\n\n",
786 self.write_plain(&s)?;
791 // Format a number with thousands separators
792 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
794 let mut output = String::new();
795 let mut trailing = false;
796 for &pow in &[9, 6, 3, 0] {
797 let base = 10_usize.pow(pow);
798 if pow == 0 || trailing || n / base != 0 {
800 output.write_fmt(format_args!("{}", n / base)).unwrap();
802 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
815 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
817 let mut output = String::new();
819 let median = bs.ns_iter_summ.median as usize;
820 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
822 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
823 fmt_thousands_sep(median, ','),
824 fmt_thousands_sep(deviation, ',')))
827 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
832 // List the tests to console, and optionally to logfile. Filters are honored.
833 pub fn list_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<()> {
834 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
840 for test in filter_tests(&opts, tests) {
843 let TestDescAndFn { desc: TestDesc { name, .. }, testfn } = test;
845 let fntype = match testfn {
846 StaticTestFn(..) | DynTestFn(..) => { ntest += 1; "test" },
847 StaticBenchFn(..) | DynBenchFn(..) => { nbench += 1; "benchmark" },
848 StaticMetricFn(..) | DynMetricFn(..) => { nmetric += 1; "metric" },
851 st.write_plain(format!("{}: {}\n", name, fntype))?;
852 st.write_log(format!("{} {}\n", fntype, name))?;
855 fn plural(count: u32, s: &str) -> String {
857 1 => format!("{} {}", 1, s),
858 n => format!("{} {}s", n, s),
863 if ntest != 0 || nbench != 0 || nmetric != 0 {
864 st.write_plain("\n")?;
866 st.write_plain(format!("{}, {}, {}\n",
867 plural(ntest, "test"),
868 plural(nbench, "benchmark"),
869 plural(nmetric, "metric")))?;
875 // A simple console test runner
876 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
878 fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
879 match (*event).clone() {
880 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
881 TeFilteredOut(filtered_out) => Ok(st.filtered_out = filtered_out),
882 TeWait(ref test, padding) => st.write_test_start(test, padding),
883 TeTimeout(ref test) => st.write_timeout(test),
884 TeResult(test, result, stdout) => {
885 st.write_log_result(&test, &result)?;
886 st.write_result(&result)?;
890 st.not_failures.push((test, stdout));
892 TrIgnored => st.ignored += 1,
894 let tname = test.name;
895 let MetricMap(mm) = mm;
898 .insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
903 st.metrics.insert_metric(test.name.as_slice(),
904 bs.ns_iter_summ.median,
905 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
910 st.failures.push((test, stdout));
912 TrFailedMsg(msg) => {
914 let mut stdout = stdout;
915 stdout.extend_from_slice(
916 format!("note: {}", msg).as_bytes()
918 st.failures.push((test, stdout));
926 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
927 fn len_if_padded(t: &TestDescAndFn) -> usize {
928 match t.testfn.padding() {
930 PadOnRight => t.desc.name.as_slice().len(),
933 if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
934 let n = t.desc.name.as_slice();
935 st.max_name_len = n.len();
937 run_tests(opts, tests, |x| callback(&x, &mut st))?;
938 return st.write_run_finish();
942 fn should_sort_failures_before_printing_them() {
943 let test_a = TestDesc {
944 name: StaticTestName("a"),
946 should_panic: ShouldPanic::No,
949 let test_b = TestDesc {
950 name: StaticTestName("b"),
952 should_panic: ShouldPanic::No,
955 let mut st = ConsoleTestState {
957 out: Raw(Vec::new()),
967 metrics: MetricMap::new(),
968 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
969 options: Options::new(),
970 not_failures: Vec::new(),
973 st.write_failures().unwrap();
974 let s = match st.out {
975 Raw(ref m) => String::from_utf8_lossy(&m[..]),
976 Pretty(_) => unreachable!(),
979 let apos = s.find("a").unwrap();
980 let bpos = s.find("b").unwrap();
981 assert!(apos < bpos);
984 fn use_color(opts: &TestOpts) -> bool {
986 AutoColor => !opts.nocapture && stdout_isatty(),
992 #[cfg(target_os = "redox")]
993 fn stdout_isatty() -> bool {
994 // FIXME: Implement isatty on Redox
998 fn stdout_isatty() -> bool {
999 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
1002 fn stdout_isatty() -> bool {
1005 type HANDLE = *mut u8;
1006 type LPDWORD = *mut u32;
1007 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
1009 fn GetStdHandle(which: DWORD) -> HANDLE;
1010 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
1013 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
1015 GetConsoleMode(handle, &mut out) != 0
1020 pub enum TestEvent {
1021 TeFiltered(Vec<TestDesc>),
1022 TeWait(TestDesc, NamePadding),
1023 TeResult(TestDesc, TestResult, Vec<u8>),
1024 TeTimeout(TestDesc),
1025 TeFilteredOut(usize),
1028 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
1031 pub fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
1032 where F: FnMut(TestEvent) -> io::Result<()>
1034 use std::collections::HashMap;
1035 use std::sync::mpsc::RecvTimeoutError;
1037 let tests_len = tests.len();
1039 let mut filtered_tests = filter_tests(opts, tests);
1040 if !opts.bench_benchmarks {
1041 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
1044 let filtered_out = tests_len - filtered_tests.len();
1045 callback(TeFilteredOut(filtered_out))?;
1047 let filtered_descs = filtered_tests.iter()
1048 .map(|t| t.desc.clone())
1051 callback(TeFiltered(filtered_descs))?;
1053 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
1054 filtered_tests.into_iter().partition(|e| {
1056 StaticTestFn(_) | DynTestFn(_) => true,
1061 let concurrency = match opts.test_threads {
1063 None => get_concurrency(),
1066 let mut remaining = filtered_tests;
1067 remaining.reverse();
1068 let mut pending = 0;
1070 let (tx, rx) = channel::<MonitorMsg>();
1072 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
1074 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
1075 let now = Instant::now();
1076 let timed_out = running_tests.iter()
1077 .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone())} else { None })
1079 for test in &timed_out {
1080 running_tests.remove(test);
1085 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
1086 running_tests.values().min().map(|next_timeout| {
1087 let now = Instant::now();
1088 if *next_timeout >= now {
1095 while pending > 0 || !remaining.is_empty() {
1096 while pending < concurrency && !remaining.is_empty() {
1097 let test = remaining.pop().unwrap();
1098 if concurrency == 1 {
1099 // We are doing one test at a time so we can print the name
1100 // of the test before we run it. Useful for debugging tests
1101 // that hang forever.
1102 callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
1104 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
1105 running_tests.insert(test.desc.clone(), timeout);
1106 run_test(opts, !opts.run_tests, test, tx.clone());
1112 if let Some(timeout) = calc_timeout(&running_tests) {
1113 res = rx.recv_timeout(timeout);
1114 for test in get_timed_out_tests(&mut running_tests) {
1115 callback(TeTimeout(test))?;
1117 if res != Err(RecvTimeoutError::Timeout) {
1121 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
1126 let (desc, result, stdout) = res.unwrap();
1127 running_tests.remove(&desc);
1129 if concurrency != 1 {
1130 callback(TeWait(desc.clone(), PadNone))?;
1132 callback(TeResult(desc, result, stdout))?;
1136 if opts.bench_benchmarks {
1137 // All benchmarks run at the end, in serial.
1138 // (this includes metric fns)
1139 for b in filtered_benchs_and_metrics {
1140 callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
1141 run_test(opts, false, b, tx.clone());
1142 let (test, result, stdout) = rx.recv().unwrap();
1143 callback(TeResult(test, result, stdout))?;
1149 #[allow(deprecated)]
1150 fn get_concurrency() -> usize {
1151 return match env::var("RUST_TEST_THREADS") {
1153 let opt_n: Option<usize> = s.parse().ok();
1155 Some(n) if n > 0 => n,
1157 panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
1162 Err(..) => num_cpus(),
1167 fn num_cpus() -> usize {
1169 struct SYSTEM_INFO {
1170 wProcessorArchitecture: u16,
1173 lpMinimumApplicationAddress: *mut u8,
1174 lpMaximumApplicationAddress: *mut u8,
1175 dwActiveProcessorMask: *mut u8,
1176 dwNumberOfProcessors: u32,
1177 dwProcessorType: u32,
1178 dwAllocationGranularity: u32,
1179 wProcessorLevel: u16,
1180 wProcessorRevision: u16,
1183 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1186 let mut sysinfo = std::mem::zeroed();
1187 GetSystemInfo(&mut sysinfo);
1188 sysinfo.dwNumberOfProcessors as usize
1192 #[cfg(target_os = "redox")]
1193 fn num_cpus() -> usize {
1194 // FIXME: Implement num_cpus on Redox
1198 #[cfg(any(target_os = "linux",
1199 target_os = "macos",
1201 target_os = "android",
1202 target_os = "solaris",
1203 target_os = "emscripten",
1204 target_os = "fuchsia"))]
1205 fn num_cpus() -> usize {
1206 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1209 #[cfg(any(target_os = "freebsd",
1210 target_os = "dragonfly",
1211 target_os = "bitrig",
1212 target_os = "netbsd"))]
1213 fn num_cpus() -> usize {
1216 let mut cpus: libc::c_uint = 0;
1217 let mut cpus_size = std::mem::size_of_val(&cpus);
1220 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1223 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1225 libc::sysctl(mib.as_mut_ptr(),
1227 &mut cpus as *mut _ as *mut _,
1228 &mut cpus_size as *mut _ as *mut _,
1239 #[cfg(target_os = "openbsd")]
1240 fn num_cpus() -> usize {
1243 let mut cpus: libc::c_uint = 0;
1244 let mut cpus_size = std::mem::size_of_val(&cpus);
1245 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1248 libc::sysctl(mib.as_mut_ptr(),
1250 &mut cpus as *mut _ as *mut _,
1251 &mut cpus_size as *mut _ as *mut _,
1261 #[cfg(target_os = "haiku")]
1262 fn num_cpus() -> usize {
1268 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1269 let mut filtered = tests;
1271 // Remove tests that don't match the test filter
1272 filtered = match opts.filter {
1274 Some(ref filter) => {
1275 filtered.into_iter()
1277 if opts.filter_exact {
1278 test.desc.name.as_slice() == &filter[..]
1280 test.desc.name.as_slice().contains(&filter[..])
1287 // Skip tests that match any of the skip filters
1288 filtered = filtered.into_iter()
1289 .filter(|t| !opts.skip.iter().any(|sf| {
1290 if opts.filter_exact {
1291 t.desc.name.as_slice() == &sf[..]
1293 t.desc.name.as_slice().contains(&sf[..])
1298 // Maybe pull out the ignored test and unignore them
1299 filtered = if !opts.run_ignored {
1302 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1303 if test.desc.ignore {
1304 let TestDescAndFn {desc, testfn} = test;
1305 Some(TestDescAndFn {
1306 desc: TestDesc { ignore: false, ..desc },
1313 filtered.into_iter().filter_map(filter).collect()
1316 // Sort the tests alphabetically
1317 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1322 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1323 // convert benchmarks to tests, if we're not benchmarking them
1324 tests.into_iter().map(|x| {
1325 let testfn = match x.testfn {
1326 DynBenchFn(bench) => {
1327 DynTestFn(Box::new(move |()| {
1328 bench::run_once(|b| {
1329 __rust_begin_short_backtrace(|| bench.run(b))
1333 StaticBenchFn(benchfn) => {
1334 DynTestFn(Box::new(move |()| {
1335 bench::run_once(|b| {
1336 __rust_begin_short_backtrace(|| benchfn(b))
1349 pub fn run_test(opts: &TestOpts,
1351 test: TestDescAndFn,
1352 monitor_ch: Sender<MonitorMsg>) {
1354 let TestDescAndFn {desc, testfn} = test;
1356 if force_ignore || desc.ignore {
1357 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1361 fn run_test_inner(desc: TestDesc,
1362 monitor_ch: Sender<MonitorMsg>,
1364 testfn: Box<FnBox<()>>) {
1365 struct Sink(Arc<Mutex<Vec<u8>>>);
1366 impl Write for Sink {
1367 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1368 Write::write(&mut *self.0.lock().unwrap(), data)
1370 fn flush(&mut self) -> io::Result<()> {
1375 // Buffer for capturing standard I/O
1376 let data = Arc::new(Mutex::new(Vec::new()));
1377 let data2 = data.clone();
1379 let name = desc.name.clone();
1380 let runtest = move || {
1381 let oldio = if !nocapture {
1383 io::set_print(Some(Box::new(Sink(data2.clone())))),
1384 io::set_panic(Some(Box::new(Sink(data2))))
1390 let result = catch_unwind(AssertUnwindSafe(|| {
1394 if let Some((printio, panicio)) = oldio {
1395 io::set_print(printio);
1396 io::set_panic(panicio);
1399 let test_result = calc_result(&desc, result);
1400 let stdout = data.lock().unwrap().to_vec();
1401 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1405 // If the platform is single-threaded we're just going to run
1406 // the test synchronously, regardless of the concurrency
1408 let supports_threads = !cfg!(target_os = "emscripten");
1409 if supports_threads {
1410 let cfg = thread::Builder::new().name(match name {
1411 DynTestName(ref name) => name.clone(),
1412 StaticTestName(name) => name.to_owned(),
1414 cfg.spawn(runtest).unwrap();
1421 DynBenchFn(bencher) => {
1422 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1423 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1426 StaticBenchFn(benchfn) => {
1427 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1428 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1432 let mut mm = MetricMap::new();
1433 f.call_box(&mut mm);
1434 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1437 StaticMetricFn(f) => {
1438 let mut mm = MetricMap::new();
1440 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1444 let cb = move |()| {
1445 __rust_begin_short_backtrace(|| f.call_box(()))
1447 run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(cb))
1450 run_test_inner(desc, monitor_ch, opts.nocapture,
1451 Box::new(move |()| __rust_begin_short_backtrace(f))),
1455 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
1457 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
1461 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
1462 match (&desc.should_panic, task_result) {
1463 (&ShouldPanic::No, Ok(())) |
1464 (&ShouldPanic::Yes, Err(_)) => TrOk,
1465 (&ShouldPanic::YesWithMessage(msg), Err(ref err)) =>
1466 if err.downcast_ref::<String>()
1468 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1469 .map(|e| e.contains(msg))
1473 TrFailedMsg(format!("Panic did not include expected string '{}'", msg))
1480 pub fn new() -> MetricMap {
1481 MetricMap(BTreeMap::new())
1484 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1485 /// must be non-negative. The `noise` indicates the uncertainty of the
1486 /// metric, which doubles as the "noise range" of acceptable
1487 /// pairwise-regressions on this named value, when comparing from one
1488 /// metric to the next using `compare_to_old`.
1490 /// If `noise` is positive, then it means this metric is of a value
1491 /// you want to see grow smaller, so a change larger than `noise` in the
1492 /// positive direction represents a regression.
1494 /// If `noise` is negative, then it means this metric is of a value
1495 /// you want to see grow larger, so a change larger than `noise` in the
1496 /// negative direction represents a regression.
1497 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1502 let MetricMap(ref mut map) = *self;
1503 map.insert(name.to_owned(), m);
1506 pub fn fmt_metrics(&self) -> String {
1507 let MetricMap(ref mm) = *self;
1508 let v: Vec<String> = mm.iter()
1509 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1518 /// A function that is opaque to the optimizer, to allow benchmarks to
1519 /// pretend to use outputs to assist in avoiding dead-code
1522 /// This function is a no-op, and does not even read from `dummy`.
1523 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1524 target_arch = "asmjs", target_arch = "wasm32")))]
1525 pub fn black_box<T>(dummy: T) -> T {
1526 // we need to "use" the argument in some way LLVM can't
1528 unsafe { asm!("" : : "r"(&dummy)) }
1531 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1532 target_arch = "asmjs", target_arch = "wasm32"))]
1534 pub fn black_box<T>(dummy: T) -> T {
1540 /// Callback for benchmark functions to run in their body.
1541 pub fn iter<T, F>(&mut self, mut inner: F)
1542 where F: FnMut() -> T
1544 if self.mode == BenchMode::Single {
1545 ns_iter_inner(&mut inner, 1);
1549 self.summary = Some(iter(&mut inner));
1552 pub fn bench<F>(&mut self, mut f: F) -> Option<stats::Summary>
1553 where F: FnMut(&mut Bencher)
1556 return self.summary;
1560 fn ns_from_dur(dur: Duration) -> u64 {
1561 dur.as_secs() * 1_000_000_000 + (dur.subsec_nanos() as u64)
1564 fn ns_iter_inner<T, F>(inner: &mut F, k: u64) -> u64
1565 where F: FnMut() -> T
1567 let start = Instant::now();
1571 return ns_from_dur(start.elapsed());
1575 pub fn iter<T, F>(inner: &mut F) -> stats::Summary
1576 where F: FnMut() -> T
1578 // Initial bench run to get ballpark figure.
1579 let ns_single = ns_iter_inner(inner, 1);
1581 // Try to estimate iter count for 1ms falling back to 1m
1582 // iterations if first run took < 1ns.
1583 let ns_target_total = 1_000_000; // 1ms
1584 let mut n = ns_target_total / cmp::max(1, ns_single);
1586 // if the first run took more than 1ms we don't want to just
1587 // be left doing 0 iterations on every loop. The unfortunate
1588 // side effect of not being able to do as many runs is
1589 // automatically handled by the statistical analysis below
1590 // (i.e. larger error bars).
1593 let mut total_run = Duration::new(0, 0);
1594 let samples: &mut [f64] = &mut [0.0_f64; 50];
1596 let loop_start = Instant::now();
1598 for p in &mut *samples {
1599 *p = ns_iter_inner(inner, n) as f64 / n as f64;
1602 stats::winsorize(samples, 5.0);
1603 let summ = stats::Summary::new(samples);
1605 for p in &mut *samples {
1606 let ns = ns_iter_inner(inner, 5 * n);
1607 *p = ns as f64 / (5 * n) as f64;
1610 stats::winsorize(samples, 5.0);
1611 let summ5 = stats::Summary::new(samples);
1613 let loop_run = loop_start.elapsed();
1615 // If we've run for 100ms and seem to have converged to a
1617 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
1618 summ.median - summ5.median < summ5.median_abs_dev {
1622 total_run = total_run + loop_run;
1623 // Longest we ever run for is 3s.
1624 if total_run > Duration::from_secs(3) {
1628 // If we overflow here just return the results so far. We check a
1629 // multiplier of 10 because we're about to multiply by 2 and the
1630 // next iteration of the loop will also multiply by 5 (to calculate
1631 // the summ5 result)
1632 n = match n.checked_mul(10) {
1644 use super::{Bencher, BenchSamples, BenchMode};
1646 pub fn benchmark<F>(f: F) -> BenchSamples
1647 where F: FnMut(&mut Bencher)
1649 let mut bs = Bencher {
1650 mode: BenchMode::Auto,
1655 return match bs.bench(f) {
1656 Some(ns_iter_summ) => {
1657 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1658 let mb_s = bs.bytes * 1000 / ns_iter;
1661 ns_iter_summ: ns_iter_summ,
1662 mb_s: mb_s as usize,
1666 // iter not called, so no data.
1667 // FIXME: error in this case?
1668 let samples: &mut [f64] = &mut [0.0_f64; 1];
1670 ns_iter_summ: stats::Summary::new(samples),
1677 pub fn run_once<F>(f: F)
1678 where F: FnMut(&mut Bencher)
1680 let mut bs = Bencher {
1681 mode: BenchMode::Single,
1691 use test::{TrFailed, TrFailedMsg, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc,
1692 TestDescAndFn, TestOpts, run_test, MetricMap, StaticTestName, DynTestName,
1693 DynTestFn, ShouldPanic};
1694 use std::sync::mpsc::channel;
1699 pub fn do_not_run_ignored_tests() {
1703 let desc = TestDescAndFn {
1705 name: StaticTestName("whatever"),
1707 should_panic: ShouldPanic::No,
1709 testfn: DynTestFn(Box::new(move |()| f())),
1711 let (tx, rx) = channel();
1712 run_test(&TestOpts::new(), false, desc, tx);
1713 let (_, res, _) = rx.recv().unwrap();
1714 assert!(res != TrOk);
1718 pub fn ignored_tests_result_in_ignored() {
1720 let desc = TestDescAndFn {
1722 name: StaticTestName("whatever"),
1724 should_panic: ShouldPanic::No,
1726 testfn: DynTestFn(Box::new(move |()| f())),
1728 let (tx, rx) = channel();
1729 run_test(&TestOpts::new(), false, desc, tx);
1730 let (_, res, _) = rx.recv().unwrap();
1731 assert!(res == TrIgnored);
1735 fn test_should_panic() {
1739 let desc = TestDescAndFn {
1741 name: StaticTestName("whatever"),
1743 should_panic: ShouldPanic::Yes,
1745 testfn: DynTestFn(Box::new(move |()| f())),
1747 let (tx, rx) = channel();
1748 run_test(&TestOpts::new(), false, desc, tx);
1749 let (_, res, _) = rx.recv().unwrap();
1750 assert!(res == TrOk);
1754 fn test_should_panic_good_message() {
1756 panic!("an error message");
1758 let desc = TestDescAndFn {
1760 name: StaticTestName("whatever"),
1762 should_panic: ShouldPanic::YesWithMessage("error message"),
1764 testfn: DynTestFn(Box::new(move |()| f())),
1766 let (tx, rx) = channel();
1767 run_test(&TestOpts::new(), false, desc, tx);
1768 let (_, res, _) = rx.recv().unwrap();
1769 assert!(res == TrOk);
1773 fn test_should_panic_bad_message() {
1775 panic!("an error message");
1777 let expected = "foobar";
1778 let failed_msg = "Panic did not include expected string";
1779 let desc = TestDescAndFn {
1781 name: StaticTestName("whatever"),
1783 should_panic: ShouldPanic::YesWithMessage(expected),
1785 testfn: DynTestFn(Box::new(move |()| f())),
1787 let (tx, rx) = channel();
1788 run_test(&TestOpts::new(), false, desc, tx);
1789 let (_, res, _) = rx.recv().unwrap();
1790 assert!(res == TrFailedMsg(format!("{} '{}'", failed_msg, expected)));
1794 fn test_should_panic_but_succeeds() {
1796 let desc = TestDescAndFn {
1798 name: StaticTestName("whatever"),
1800 should_panic: ShouldPanic::Yes,
1802 testfn: DynTestFn(Box::new(move |()| f())),
1804 let (tx, rx) = channel();
1805 run_test(&TestOpts::new(), false, desc, tx);
1806 let (_, res, _) = rx.recv().unwrap();
1807 assert!(res == TrFailed);
1811 fn parse_ignored_flag() {
1812 let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1813 let opts = match parse_opts(&args) {
1815 _ => panic!("Malformed arg in parse_ignored_flag"),
1817 assert!((opts.run_ignored));
1821 pub fn filter_for_ignored_option() {
1822 // When we run ignored tests the test filter should filter out all the
1823 // unignored tests and flip the ignore flag on the rest to false
1825 let mut opts = TestOpts::new();
1826 opts.run_tests = true;
1827 opts.run_ignored = true;
1829 let tests = vec![TestDescAndFn {
1831 name: StaticTestName("1"),
1833 should_panic: ShouldPanic::No,
1835 testfn: DynTestFn(Box::new(move |()| {})),
1839 name: StaticTestName("2"),
1841 should_panic: ShouldPanic::No,
1843 testfn: DynTestFn(Box::new(move |()| {})),
1845 let filtered = filter_tests(&opts, tests);
1847 assert_eq!(filtered.len(), 1);
1848 assert_eq!(filtered[0].desc.name.to_string(), "1");
1849 assert!(!filtered[0].desc.ignore);
1853 pub fn exact_filter_match() {
1854 fn tests() -> Vec<TestDescAndFn> {
1860 .map(|name| TestDescAndFn {
1862 name: StaticTestName(name),
1864 should_panic: ShouldPanic::No,
1866 testfn: DynTestFn(Box::new(move |()| {}))
1871 let substr = filter_tests(&TestOpts {
1872 filter: Some("base".into()),
1875 assert_eq!(substr.len(), 4);
1877 let substr = filter_tests(&TestOpts {
1878 filter: Some("bas".into()),
1881 assert_eq!(substr.len(), 4);
1883 let substr = filter_tests(&TestOpts {
1884 filter: Some("::test".into()),
1887 assert_eq!(substr.len(), 3);
1889 let substr = filter_tests(&TestOpts {
1890 filter: Some("base::test".into()),
1893 assert_eq!(substr.len(), 3);
1895 let exact = filter_tests(&TestOpts {
1896 filter: Some("base".into()),
1897 filter_exact: true, ..TestOpts::new()
1899 assert_eq!(exact.len(), 1);
1901 let exact = filter_tests(&TestOpts {
1902 filter: Some("bas".into()),
1906 assert_eq!(exact.len(), 0);
1908 let exact = filter_tests(&TestOpts {
1909 filter: Some("::test".into()),
1913 assert_eq!(exact.len(), 0);
1915 let exact = filter_tests(&TestOpts {
1916 filter: Some("base::test".into()),
1920 assert_eq!(exact.len(), 1);
1924 pub fn sort_tests() {
1925 let mut opts = TestOpts::new();
1926 opts.run_tests = true;
1928 let names = vec!["sha1::test".to_string(),
1929 "isize::test_to_str".to_string(),
1930 "isize::test_pow".to_string(),
1931 "test::do_not_run_ignored_tests".to_string(),
1932 "test::ignored_tests_result_in_ignored".to_string(),
1933 "test::first_free_arg_should_be_a_filter".to_string(),
1934 "test::parse_ignored_flag".to_string(),
1935 "test::filter_for_ignored_option".to_string(),
1936 "test::sort_tests".to_string()];
1939 let mut tests = Vec::new();
1940 for name in &names {
1941 let test = TestDescAndFn {
1943 name: DynTestName((*name).clone()),
1945 should_panic: ShouldPanic::No,
1947 testfn: DynTestFn(Box::new(move |()| testfn())),
1953 let filtered = filter_tests(&opts, tests);
1955 let expected = vec!["isize::test_pow".to_string(),
1956 "isize::test_to_str".to_string(),
1957 "sha1::test".to_string(),
1958 "test::do_not_run_ignored_tests".to_string(),
1959 "test::filter_for_ignored_option".to_string(),
1960 "test::first_free_arg_should_be_a_filter".to_string(),
1961 "test::ignored_tests_result_in_ignored".to_string(),
1962 "test::parse_ignored_flag".to_string(),
1963 "test::sort_tests".to_string()];
1965 for (a, b) in expected.iter().zip(filtered) {
1966 assert!(*a == b.desc.name.to_string());
1971 pub fn test_metricmap_compare() {
1972 let mut m1 = MetricMap::new();
1973 let mut m2 = MetricMap::new();
1974 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1975 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1977 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1978 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1980 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1981 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1983 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1984 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1986 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1987 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1989 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1990 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
1994 pub fn test_bench_once_no_iter() {
1995 fn f(_: &mut Bencher) {}
2000 pub fn test_bench_once_iter() {
2001 fn f(b: &mut Bencher) {
2009 pub fn test_bench_no_iter() {
2010 fn f(_: &mut Bencher) {}
2011 bench::benchmark(f);
2015 pub fn test_bench_iter() {
2016 fn f(b: &mut Bencher) {
2020 bench::benchmark(f);