1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
34 #![cfg_attr(not(stage0), deny(warnings))]
37 #![feature(box_syntax)]
40 #![feature(rustc_private)]
41 #![feature(set_stdio)]
42 #![feature(staged_api)]
43 #![feature(question_mark)]
44 #![feature(panic_unwind)]
45 #![feature(mpsc_recv_timeout)]
50 extern crate panic_unwind;
52 pub use self::TestFn::*;
53 pub use self::ColorConfig::*;
54 pub use self::TestResult::*;
55 pub use self::TestName::*;
56 use self::TestEvent::*;
57 use self::NamePadding::*;
58 use self::OutputLocation::*;
60 use std::boxed::FnBox;
64 use std::collections::BTreeMap;
68 use std::io::prelude::*;
70 use std::iter::repeat;
71 use std::path::PathBuf;
72 use std::sync::mpsc::{channel, Sender};
73 use std::sync::{Arc, Mutex};
75 use std::time::{Instant, Duration};
77 const TEST_WARN_TIMEOUT_S: u64 = 60;
79 // to be used by rustc to compile tests in libtest
81 pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
82 TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName, DynTestName,
83 DynTestFn, run_test, test_main, test_main_static, filter_tests, parse_opts,
84 StaticBenchFn, ShouldPanic};
89 // The name of a test. By convention this follows the rules for rust
90 // paths; i.e. it should be a series of identifiers separated by double
91 // colons. This way if some test runner wants to arrange the tests
92 // hierarchically it may.
94 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
96 StaticTestName(&'static str),
100 fn as_slice(&self) -> &str {
102 StaticTestName(s) => s,
103 DynTestName(ref s) => s,
107 impl fmt::Display for TestName {
108 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
109 fmt::Display::fmt(self.as_slice(), f)
113 #[derive(Clone, Copy, PartialEq, Eq)]
120 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
121 let mut name = String::from(self.name.as_slice());
122 let fill = column_count.saturating_sub(name.len());
123 let pad = repeat(" ").take(fill).collect::<String>();
134 /// Represents a benchmark function.
135 pub trait TDynBenchFn: Send {
136 fn run(&self, harness: &mut Bencher);
139 // A function that runs a test. If the function returns successfully,
140 // the test succeeds; if the function panics then the test fails. We
141 // may need to come up with a more clever definition of test in order
142 // to support isolation of tests into threads.
145 StaticBenchFn(fn(&mut Bencher)),
146 StaticMetricFn(fn(&mut MetricMap)),
147 DynTestFn(Box<FnBox() + Send>),
148 DynMetricFn(Box<FnBox(&mut MetricMap) + Send>),
149 DynBenchFn(Box<TDynBenchFn + 'static>),
153 fn padding(&self) -> NamePadding {
155 StaticTestFn(..) => PadNone,
156 StaticBenchFn(..) => PadOnRight,
157 StaticMetricFn(..) => PadOnRight,
158 DynTestFn(..) => PadNone,
159 DynMetricFn(..) => PadOnRight,
160 DynBenchFn(..) => PadOnRight,
165 impl fmt::Debug for TestFn {
166 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
167 f.write_str(match *self {
168 StaticTestFn(..) => "StaticTestFn(..)",
169 StaticBenchFn(..) => "StaticBenchFn(..)",
170 StaticMetricFn(..) => "StaticMetricFn(..)",
171 DynTestFn(..) => "DynTestFn(..)",
172 DynMetricFn(..) => "DynMetricFn(..)",
173 DynBenchFn(..) => "DynBenchFn(..)",
178 /// Manager of the benchmarking runs.
180 /// This is fed into functions marked with `#[bench]` to allow for
181 /// set-up & tear-down before running a piece of code repeatedly via a
183 #[derive(Copy, Clone)]
190 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
191 pub enum ShouldPanic {
194 YesWithMessage(&'static str),
197 // The definition of a single test. A test runner will run a list of
199 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
200 pub struct TestDesc {
203 pub should_panic: ShouldPanic,
207 pub struct TestPaths {
208 pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
209 pub base: PathBuf, // e.g., compile-test, auxiliary
210 pub relative_dir: PathBuf, // e.g., foo/bar
214 pub struct TestDescAndFn {
219 #[derive(Clone, PartialEq, Debug, Copy)]
226 pub fn new(value: f64, noise: f64) -> Metric {
235 pub struct MetricMap(BTreeMap<String, Metric>);
237 impl Clone for MetricMap {
238 fn clone(&self) -> MetricMap {
239 let MetricMap(ref map) = *self;
240 MetricMap(map.clone())
244 // The default console test runner. It accepts the command line
245 // arguments and a vector of test_descs.
246 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>) {
247 let opts = match parse_opts(args) {
249 Some(Err(msg)) => panic!("{:?}", msg),
252 match run_tests_console(&opts, tests) {
254 Ok(false) => std::process::exit(101),
255 Err(e) => panic!("io error when running tests: {:?}", e),
259 // A variant optimized for invocation with a static test vector.
260 // This will panic (intentionally) when fed any dynamic tests, because
261 // it is copying the static values out into a dynamic vector and cannot
262 // copy dynamic values. It is doing this because from this point on
263 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
264 // semantics into parallel test runners, which in turn requires a Vec<>
265 // rather than a &[].
266 pub fn test_main_static(tests: &[TestDescAndFn]) {
267 let args = env::args().collect::<Vec<_>>();
268 let owned_tests = tests.iter()
273 testfn: StaticTestFn(f),
274 desc: t.desc.clone(),
277 StaticBenchFn(f) => {
279 testfn: StaticBenchFn(f),
280 desc: t.desc.clone(),
283 _ => panic!("non-static tests passed to test::test_main_static"),
287 test_main(&args, owned_tests)
290 #[derive(Copy, Clone)]
291 pub enum ColorConfig {
297 pub struct TestOpts {
298 pub filter: Option<String>,
299 pub run_ignored: bool,
301 pub bench_benchmarks: bool,
302 pub logfile: Option<PathBuf>,
304 pub color: ColorConfig,
306 pub test_threads: Option<usize>,
311 fn new() -> TestOpts {
316 bench_benchmarks: false,
326 /// Result of parsing the options.
327 pub type OptRes = Result<TestOpts, String>;
329 #[cfg_attr(rustfmt, rustfmt_skip)]
330 fn optgroups() -> Vec<getopts::OptGroup> {
331 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
332 getopts::optflag("", "test", "Run tests and not benchmarks"),
333 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
334 getopts::optflag("h", "help", "Display this message (longer with --help)"),
335 getopts::optopt("", "logfile", "Write logs to the specified file instead \
337 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
338 task, allow printing directly"),
339 getopts::optopt("", "test-threads", "Number of threads used for running tests \
340 in parallel", "n_threads"),
341 getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
342 getopts::optopt("", "color", "Configure coloring of output:
343 auto = colorize if stdout is a tty and tests are run on serially (default);
344 always = always colorize output;
345 never = never colorize output;", "auto|always|never"))
348 fn usage(binary: &str) {
349 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
352 The FILTER string is tested against the name of all tests, and only those
353 tests whose names contain the filter are run.
355 By default, all tests are run in parallel. This can be altered with the
356 --test-threads flag or the RUST_TEST_THREADS environment variable when running
359 All tests have their standard output and standard error captured by default.
360 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
361 environment variable to a value other than "0". Logging is not captured by default.
365 #[test] - Indicates a function is a test to be run. This function
367 #[bench] - Indicates a function is a benchmark to be run. This
368 function takes one argument (test::Bencher).
369 #[should_panic] - This function (also labeled with #[test]) will only pass if
370 the code causes a panic (an assertion failure or panic!)
371 A message may be provided, which the failure string must
372 contain: #[should_panic(expected = "foo")].
373 #[ignore] - When applied to a function which is already attributed as a
374 test, then the test runner will ignore these tests during
375 normal test runs. Running with --ignored will run these
377 usage = getopts::usage(&message, &optgroups()));
380 // Parses command line arguments into test options
381 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
382 let args_ = &args[1..];
383 let matches = match getopts::getopts(args_, &optgroups()) {
385 Err(f) => return Some(Err(f.to_string())),
388 if matches.opt_present("h") {
393 let filter = if !matches.free.is_empty() {
394 Some(matches.free[0].clone())
399 let run_ignored = matches.opt_present("ignored");
400 let quiet = matches.opt_present("quiet");
402 let logfile = matches.opt_str("logfile");
403 let logfile = logfile.map(|s| PathBuf::from(&s));
405 let bench_benchmarks = matches.opt_present("bench");
406 let run_tests = !bench_benchmarks || matches.opt_present("test");
408 let mut nocapture = matches.opt_present("nocapture");
410 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
411 Ok(val) => &val != "0",
416 let test_threads = match matches.opt_str("test-threads") {
418 match n_str.parse::<usize>() {
421 return Some(Err(format!("argument for --test-threads must be a number > 0 \
428 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
429 Some("auto") | None => AutoColor,
430 Some("always") => AlwaysColor,
431 Some("never") => NeverColor,
434 return Some(Err(format!("argument for --color must be auto, always, or never (was \
440 let test_opts = TestOpts {
442 run_ignored: run_ignored,
443 run_tests: run_tests,
444 bench_benchmarks: bench_benchmarks,
446 nocapture: nocapture,
449 test_threads: test_threads,
455 #[derive(Clone, PartialEq)]
456 pub struct BenchSamples {
457 ns_iter_summ: stats::Summary,
461 #[derive(Clone, PartialEq)]
462 pub enum TestResult {
466 TrMetrics(MetricMap),
467 TrBench(BenchSamples),
470 unsafe impl Send for TestResult {}
472 enum OutputLocation<T> {
473 Pretty(Box<term::StdoutTerminal>),
477 struct ConsoleTestState<T> {
478 log_out: Option<File>,
479 out: OutputLocation<T>,
488 failures: Vec<(TestDesc, Vec<u8>)>,
489 max_name_len: usize, // number of columns to fill when aligning names
492 impl<T: Write> ConsoleTestState<T> {
493 pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
494 let log_out = match opts.logfile {
495 Some(ref path) => Some(File::create(path)?),
498 let out = match term::stdout() {
499 None => Raw(io::stdout()),
500 Some(t) => Pretty(t),
503 Ok(ConsoleTestState {
506 use_color: use_color(opts),
513 metrics: MetricMap::new(),
514 failures: Vec::new(),
519 pub fn write_ok(&mut self) -> io::Result<()> {
520 self.write_short_result("ok", ".", term::color::GREEN)
523 pub fn write_failed(&mut self) -> io::Result<()> {
524 self.write_short_result("FAILED", "F", term::color::RED)
527 pub fn write_ignored(&mut self) -> io::Result<()> {
528 self.write_short_result("ignored", "i", term::color::YELLOW)
531 pub fn write_metric(&mut self) -> io::Result<()> {
532 self.write_pretty("metric", term::color::CYAN)
535 pub fn write_bench(&mut self) -> io::Result<()> {
536 self.write_pretty("bench", term::color::CYAN)
539 pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
542 self.write_pretty(quiet, color)
544 self.write_pretty(verbose, color)?;
545 self.write_plain("\n")
549 pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
551 Pretty(ref mut term) => {
555 term.write_all(word.as_bytes())?;
561 Raw(ref mut stdout) => {
562 stdout.write_all(word.as_bytes())?;
568 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
570 Pretty(ref mut term) => {
571 term.write_all(s.as_bytes())?;
574 Raw(ref mut stdout) => {
575 stdout.write_all(s.as_bytes())?;
581 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
583 let noun = if len != 1 {
588 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
591 pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
592 if self.quiet && align != PadOnRight {
595 let name = test.padded_name(self.max_name_len, align);
596 self.write_plain(&format!("test {} ... ", name))
600 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
602 TrOk => self.write_ok(),
603 TrFailed => self.write_failed(),
604 TrIgnored => self.write_ignored(),
605 TrMetrics(ref mm) => {
606 self.write_metric()?;
607 self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
611 self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
616 pub fn write_timeout(&mut self, desc: &TestDesc) -> io::Result<()> {
617 self.write_plain(&format!("test {} has been running for over {} seconds\n",
619 TEST_WARN_TIMEOUT_S))
622 pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
626 let s = format!("{} {}\n",
628 TrOk => "ok".to_owned(),
629 TrFailed => "failed".to_owned(),
630 TrIgnored => "ignored".to_owned(),
631 TrMetrics(ref mm) => mm.fmt_metrics(),
632 TrBench(ref bs) => fmt_bench_samples(bs),
635 o.write_all(s.as_bytes())
640 pub fn write_failures(&mut self) -> io::Result<()> {
641 self.write_plain("\nfailures:\n")?;
642 let mut failures = Vec::new();
643 let mut fail_out = String::new();
644 for &(ref f, ref stdout) in &self.failures {
645 failures.push(f.name.to_string());
646 if !stdout.is_empty() {
647 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
648 let output = String::from_utf8_lossy(stdout);
649 fail_out.push_str(&output);
650 fail_out.push_str("\n");
653 if !fail_out.is_empty() {
654 self.write_plain("\n")?;
655 self.write_plain(&fail_out)?;
658 self.write_plain("\nfailures:\n")?;
660 for name in &failures {
661 self.write_plain(&format!(" {}\n", name))?;
666 pub fn write_run_finish(&mut self) -> io::Result<bool> {
667 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
669 let success = self.failed == 0;
671 self.write_failures()?;
674 self.write_plain("\ntest result: ")?;
676 // There's no parallelism at this point so it's safe to use color
677 self.write_pretty("ok", term::color::GREEN)?;
679 self.write_pretty("FAILED", term::color::RED)?;
681 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
686 self.write_plain(&s)?;
691 // Format a number with thousands separators
692 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
694 let mut output = String::new();
695 let mut trailing = false;
696 for &pow in &[9, 6, 3, 0] {
697 let base = 10_usize.pow(pow);
698 if pow == 0 || trailing || n / base != 0 {
700 output.write_fmt(format_args!("{}", n / base)).unwrap();
702 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
715 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
717 let mut output = String::new();
719 let median = bs.ns_iter_summ.median as usize;
720 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
722 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
723 fmt_thousands_sep(median, ','),
724 fmt_thousands_sep(deviation, ',')))
727 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
732 // A simple console test runner
733 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
735 fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
736 match (*event).clone() {
737 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
738 TeWait(ref test, padding) => st.write_test_start(test, padding),
739 TeTimeout(ref test) => st.write_timeout(test),
740 TeResult(test, result, stdout) => {
741 st.write_log(&test, &result)?;
742 st.write_result(&result)?;
744 TrOk => st.passed += 1,
745 TrIgnored => st.ignored += 1,
747 let tname = test.name;
748 let MetricMap(mm) = mm;
751 .insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
756 st.metrics.insert_metric(test.name.as_slice(),
757 bs.ns_iter_summ.median,
758 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
763 st.failures.push((test, stdout));
771 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
772 fn len_if_padded(t: &TestDescAndFn) -> usize {
773 match t.testfn.padding() {
775 PadOnRight => t.desc.name.as_slice().len(),
778 if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
779 let n = t.desc.name.as_slice();
780 st.max_name_len = n.len();
782 run_tests(opts, tests, |x| callback(&x, &mut st))?;
783 return st.write_run_finish();
787 fn should_sort_failures_before_printing_them() {
788 let test_a = TestDesc {
789 name: StaticTestName("a"),
791 should_panic: ShouldPanic::No,
794 let test_b = TestDesc {
795 name: StaticTestName("b"),
797 should_panic: ShouldPanic::No,
800 let mut st = ConsoleTestState {
802 out: Raw(Vec::new()),
811 metrics: MetricMap::new(),
812 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
815 st.write_failures().unwrap();
816 let s = match st.out {
817 Raw(ref m) => String::from_utf8_lossy(&m[..]),
818 Pretty(_) => unreachable!(),
821 let apos = s.find("a").unwrap();
822 let bpos = s.find("b").unwrap();
823 assert!(apos < bpos);
826 fn use_color(opts: &TestOpts) -> bool {
828 AutoColor => !opts.nocapture && stdout_isatty(),
835 fn stdout_isatty() -> bool {
836 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
839 fn stdout_isatty() -> bool {
842 type HANDLE = *mut u8;
843 type LPDWORD = *mut u32;
844 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
846 fn GetStdHandle(which: DWORD) -> HANDLE;
847 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
850 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
852 GetConsoleMode(handle, &mut out) != 0
858 TeFiltered(Vec<TestDesc>),
859 TeWait(TestDesc, NamePadding),
860 TeResult(TestDesc, TestResult, Vec<u8>),
864 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
867 fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
868 where F: FnMut(TestEvent) -> io::Result<()>
870 use std::collections::HashMap;
871 use std::sync::mpsc::RecvTimeoutError;
873 let mut filtered_tests = filter_tests(opts, tests);
874 if !opts.bench_benchmarks {
875 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
878 let filtered_descs = filtered_tests.iter()
879 .map(|t| t.desc.clone())
882 callback(TeFiltered(filtered_descs))?;
884 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
885 filtered_tests.into_iter().partition(|e| {
887 StaticTestFn(_) | DynTestFn(_) => true,
892 let concurrency = match opts.test_threads {
894 None => get_concurrency(),
897 let mut remaining = filtered_tests;
901 let (tx, rx) = channel::<MonitorMsg>();
903 let mut running_tests: HashMap<TestDesc, Instant> = HashMap::new();
905 fn get_timed_out_tests(running_tests: &mut HashMap<TestDesc, Instant>) -> Vec<TestDesc> {
906 let now = Instant::now();
907 let timed_out = running_tests.iter()
908 .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone())} else { None })
910 for test in &timed_out {
911 running_tests.remove(test);
916 fn calc_timeout(running_tests: &HashMap<TestDesc, Instant>) -> Option<Duration> {
917 running_tests.values().min().map(|next_timeout| {
918 let now = Instant::now();
919 if *next_timeout >= now {
926 while pending > 0 || !remaining.is_empty() {
927 while pending < concurrency && !remaining.is_empty() {
928 let test = remaining.pop().unwrap();
929 if concurrency == 1 {
930 // We are doing one test at a time so we can print the name
931 // of the test before we run it. Useful for debugging tests
932 // that hang forever.
933 callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
935 let timeout = Instant::now() + Duration::from_secs(TEST_WARN_TIMEOUT_S);
936 running_tests.insert(test.desc.clone(), timeout);
937 run_test(opts, !opts.run_tests, test, tx.clone());
943 if let Some(timeout) = calc_timeout(&running_tests) {
944 res = rx.recv_timeout(timeout);
945 for test in get_timed_out_tests(&mut running_tests) {
946 callback(TeTimeout(test))?;
948 if res != Err(RecvTimeoutError::Timeout) {
952 res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
957 let (desc, result, stdout) = res.unwrap();
958 running_tests.remove(&desc);
960 if concurrency != 1 {
961 callback(TeWait(desc.clone(), PadNone))?;
963 callback(TeResult(desc, result, stdout))?;
967 if opts.bench_benchmarks {
968 // All benchmarks run at the end, in serial.
969 // (this includes metric fns)
970 for b in filtered_benchs_and_metrics {
971 callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
972 run_test(opts, false, b, tx.clone());
973 let (test, result, stdout) = rx.recv().unwrap();
974 callback(TeResult(test, result, stdout))?;
981 fn get_concurrency() -> usize {
982 return match env::var("RUST_TEST_THREADS") {
984 let opt_n: Option<usize> = s.parse().ok();
986 Some(n) if n > 0 => n,
988 panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
993 Err(..) => num_cpus(),
998 fn num_cpus() -> usize {
1000 struct SYSTEM_INFO {
1001 wProcessorArchitecture: u16,
1004 lpMinimumApplicationAddress: *mut u8,
1005 lpMaximumApplicationAddress: *mut u8,
1006 dwActiveProcessorMask: *mut u8,
1007 dwNumberOfProcessors: u32,
1008 dwProcessorType: u32,
1009 dwAllocationGranularity: u32,
1010 wProcessorLevel: u16,
1011 wProcessorRevision: u16,
1014 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
1017 let mut sysinfo = std::mem::zeroed();
1018 GetSystemInfo(&mut sysinfo);
1019 sysinfo.dwNumberOfProcessors as usize
1023 #[cfg(any(target_os = "linux",
1024 target_os = "macos",
1026 target_os = "android",
1027 target_os = "solaris",
1028 target_os = "emscripten"))]
1029 fn num_cpus() -> usize {
1030 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
1033 #[cfg(any(target_os = "freebsd",
1034 target_os = "dragonfly",
1035 target_os = "bitrig",
1036 target_os = "netbsd"))]
1037 fn num_cpus() -> usize {
1040 let mut cpus: libc::c_uint = 0;
1041 let mut cpus_size = std::mem::size_of_val(&cpus);
1044 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
1047 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1049 libc::sysctl(mib.as_mut_ptr(),
1051 &mut cpus as *mut _ as *mut _,
1052 &mut cpus_size as *mut _ as *mut _,
1063 #[cfg(target_os = "openbsd")]
1064 fn num_cpus() -> usize {
1067 let mut cpus: libc::c_uint = 0;
1068 let mut cpus_size = std::mem::size_of_val(&cpus);
1069 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
1072 libc::sysctl(mib.as_mut_ptr(),
1074 &mut cpus as *mut _ as *mut _,
1075 &mut cpus_size as *mut _ as *mut _,
1086 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1087 let mut filtered = tests;
1089 // Remove tests that don't match the test filter
1090 filtered = match opts.filter {
1092 Some(ref filter) => {
1093 filtered.into_iter()
1094 .filter(|test| test.desc.name.as_slice().contains(&filter[..]))
1099 // Maybe pull out the ignored test and unignore them
1100 filtered = if !opts.run_ignored {
1103 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1104 if test.desc.ignore {
1105 let TestDescAndFn {desc, testfn} = test;
1106 Some(TestDescAndFn {
1107 desc: TestDesc { ignore: false, ..desc },
1114 filtered.into_iter().filter_map(filter).collect()
1117 // Sort the tests alphabetically
1118 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1123 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1124 // convert benchmarks to tests, if we're not benchmarking them
1127 let testfn = match x.testfn {
1128 DynBenchFn(bench) => {
1129 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
1131 StaticBenchFn(benchfn) => {
1132 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
1144 pub fn run_test(opts: &TestOpts,
1146 test: TestDescAndFn,
1147 monitor_ch: Sender<MonitorMsg>) {
1149 let TestDescAndFn {desc, testfn} = test;
1151 if force_ignore || desc.ignore {
1152 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1156 fn run_test_inner(desc: TestDesc,
1157 monitor_ch: Sender<MonitorMsg>,
1159 testfn: Box<FnBox() + Send>) {
1160 struct Sink(Arc<Mutex<Vec<u8>>>);
1161 impl Write for Sink {
1162 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1163 Write::write(&mut *self.0.lock().unwrap(), data)
1165 fn flush(&mut self) -> io::Result<()> {
1170 thread::spawn(move || {
1171 let data = Arc::new(Mutex::new(Vec::new()));
1172 let data2 = data.clone();
1173 let cfg = thread::Builder::new().name(match desc.name {
1174 DynTestName(ref name) => name.clone(),
1175 StaticTestName(name) => name.to_owned(),
1178 let result_guard = cfg.spawn(move || {
1180 io::set_print(box Sink(data2.clone()));
1181 io::set_panic(box Sink(data2));
1186 let test_result = calc_result(&desc, result_guard.join());
1187 let stdout = data.lock().unwrap().to_vec();
1188 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1193 DynBenchFn(bencher) => {
1194 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1195 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1198 StaticBenchFn(benchfn) => {
1199 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1200 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1204 let mut mm = MetricMap::new();
1205 f.call_box((&mut mm,));
1206 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1209 StaticMetricFn(f) => {
1210 let mut mm = MetricMap::new();
1212 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1215 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1216 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(f)),
1220 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
1221 match (&desc.should_panic, task_result) {
1222 (&ShouldPanic::No, Ok(())) |
1223 (&ShouldPanic::Yes, Err(_)) => TrOk,
1224 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1225 if err.downcast_ref::<String>()
1227 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1228 .map(|e| e.contains(msg))
1229 .unwrap_or(false) => TrOk,
1235 pub fn new() -> MetricMap {
1236 MetricMap(BTreeMap::new())
1239 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1240 /// must be non-negative. The `noise` indicates the uncertainty of the
1241 /// metric, which doubles as the "noise range" of acceptable
1242 /// pairwise-regressions on this named value, when comparing from one
1243 /// metric to the next using `compare_to_old`.
1245 /// If `noise` is positive, then it means this metric is of a value
1246 /// you want to see grow smaller, so a change larger than `noise` in the
1247 /// positive direction represents a regression.
1249 /// If `noise` is negative, then it means this metric is of a value
1250 /// you want to see grow larger, so a change larger than `noise` in the
1251 /// negative direction represents a regression.
1252 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1257 let MetricMap(ref mut map) = *self;
1258 map.insert(name.to_owned(), m);
1261 pub fn fmt_metrics(&self) -> String {
1262 let MetricMap(ref mm) = *self;
1263 let v: Vec<String> = mm.iter()
1264 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1273 /// A function that is opaque to the optimizer, to allow benchmarks to
1274 /// pretend to use outputs to assist in avoiding dead-code
1277 /// This function is a no-op, and does not even read from `dummy`.
1278 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1279 target_arch = "asmjs")))]
1280 pub fn black_box<T>(dummy: T) -> T {
1281 // we need to "use" the argument in some way LLVM can't
1283 unsafe { asm!("" : : "r"(&dummy)) }
1286 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1287 target_arch = "asmjs"))]
1289 pub fn black_box<T>(dummy: T) -> T {
1295 /// Callback for benchmark functions to run in their body.
1296 pub fn iter<T, F>(&mut self, mut inner: F)
1297 where F: FnMut() -> T
1299 let start = Instant::now();
1300 let k = self.iterations;
1304 self.dur = start.elapsed();
1307 pub fn ns_elapsed(&mut self) -> u64 {
1308 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1311 pub fn ns_per_iter(&mut self) -> u64 {
1312 if self.iterations == 0 {
1315 self.ns_elapsed() / cmp::max(self.iterations, 1)
1319 pub fn bench_n<F>(&mut self, n: u64, f: F)
1320 where F: FnOnce(&mut Bencher)
1322 self.iterations = n;
1326 // This is a more statistics-driven benchmark algorithm
1327 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary
1328 where F: FnMut(&mut Bencher)
1330 // Initial bench run to get ballpark figure.
1332 self.bench_n(n, |x| f(x));
1334 // Try to estimate iter count for 1ms falling back to 1m
1335 // iterations if first run took < 1ns.
1336 if self.ns_per_iter() == 0 {
1339 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1341 // if the first run took more than 1ms we don't want to just
1342 // be left doing 0 iterations on every loop. The unfortunate
1343 // side effect of not being able to do as many runs is
1344 // automatically handled by the statistical analysis below
1345 // (i.e. larger error bars).
1350 let mut total_run = Duration::new(0, 0);
1351 let samples: &mut [f64] = &mut [0.0_f64; 50];
1353 let loop_start = Instant::now();
1355 for p in &mut *samples {
1356 self.bench_n(n, |x| f(x));
1357 *p = self.ns_per_iter() as f64;
1360 stats::winsorize(samples, 5.0);
1361 let summ = stats::Summary::new(samples);
1363 for p in &mut *samples {
1364 self.bench_n(5 * n, |x| f(x));
1365 *p = self.ns_per_iter() as f64;
1368 stats::winsorize(samples, 5.0);
1369 let summ5 = stats::Summary::new(samples);
1370 let loop_run = loop_start.elapsed();
1372 // If we've run for 100ms and seem to have converged to a
1374 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
1375 summ.median - summ5.median < summ5.median_abs_dev {
1379 total_run = total_run + loop_run;
1380 // Longest we ever run for is 3s.
1381 if total_run > Duration::from_secs(3) {
1385 // If we overflow here just return the results so far. We check a
1386 // multiplier of 10 because we're about to multiply by 2 and the
1387 // next iteration of the loop will also multiply by 5 (to calculate
1388 // the summ5 result)
1389 n = match n.checked_mul(10) {
1391 None => return summ5,
1399 use std::time::Duration;
1400 use super::{Bencher, BenchSamples};
1402 pub fn benchmark<F>(f: F) -> BenchSamples
1403 where F: FnMut(&mut Bencher)
1405 let mut bs = Bencher {
1407 dur: Duration::new(0, 0),
1411 let ns_iter_summ = bs.auto_bench(f);
1413 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1414 let mb_s = bs.bytes * 1000 / ns_iter;
1417 ns_iter_summ: ns_iter_summ,
1418 mb_s: mb_s as usize,
1422 pub fn run_once<F>(f: F)
1423 where F: FnOnce(&mut Bencher)
1425 let mut bs = Bencher {
1427 dur: Duration::new(0, 0),
1436 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc, TestDescAndFn,
1437 TestOpts, run_test, MetricMap, StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1438 use std::sync::mpsc::channel;
1441 pub fn do_not_run_ignored_tests() {
1445 let desc = TestDescAndFn {
1447 name: StaticTestName("whatever"),
1449 should_panic: ShouldPanic::No,
1451 testfn: DynTestFn(Box::new(move || f())),
1453 let (tx, rx) = channel();
1454 run_test(&TestOpts::new(), false, desc, tx);
1455 let (_, res, _) = rx.recv().unwrap();
1456 assert!(res != TrOk);
1460 pub fn ignored_tests_result_in_ignored() {
1462 let desc = TestDescAndFn {
1464 name: StaticTestName("whatever"),
1466 should_panic: ShouldPanic::No,
1468 testfn: DynTestFn(Box::new(move || f())),
1470 let (tx, rx) = channel();
1471 run_test(&TestOpts::new(), false, desc, tx);
1472 let (_, res, _) = rx.recv().unwrap();
1473 assert!(res == TrIgnored);
1477 fn test_should_panic() {
1481 let desc = TestDescAndFn {
1483 name: StaticTestName("whatever"),
1485 should_panic: ShouldPanic::Yes,
1487 testfn: DynTestFn(Box::new(move || f())),
1489 let (tx, rx) = channel();
1490 run_test(&TestOpts::new(), false, desc, tx);
1491 let (_, res, _) = rx.recv().unwrap();
1492 assert!(res == TrOk);
1496 fn test_should_panic_good_message() {
1498 panic!("an error message");
1500 let desc = TestDescAndFn {
1502 name: StaticTestName("whatever"),
1504 should_panic: ShouldPanic::YesWithMessage("error message"),
1506 testfn: DynTestFn(Box::new(move || f())),
1508 let (tx, rx) = channel();
1509 run_test(&TestOpts::new(), false, desc, tx);
1510 let (_, res, _) = rx.recv().unwrap();
1511 assert!(res == TrOk);
1515 fn test_should_panic_bad_message() {
1517 panic!("an error message");
1519 let desc = TestDescAndFn {
1521 name: StaticTestName("whatever"),
1523 should_panic: ShouldPanic::YesWithMessage("foobar"),
1525 testfn: DynTestFn(Box::new(move || f())),
1527 let (tx, rx) = channel();
1528 run_test(&TestOpts::new(), false, desc, tx);
1529 let (_, res, _) = rx.recv().unwrap();
1530 assert!(res == TrFailed);
1534 fn test_should_panic_but_succeeds() {
1536 let desc = TestDescAndFn {
1538 name: StaticTestName("whatever"),
1540 should_panic: ShouldPanic::Yes,
1542 testfn: DynTestFn(Box::new(move || f())),
1544 let (tx, rx) = channel();
1545 run_test(&TestOpts::new(), false, desc, tx);
1546 let (_, res, _) = rx.recv().unwrap();
1547 assert!(res == TrFailed);
1551 fn parse_ignored_flag() {
1552 let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1553 let opts = match parse_opts(&args) {
1555 _ => panic!("Malformed arg in parse_ignored_flag"),
1557 assert!((opts.run_ignored));
1561 pub fn filter_for_ignored_option() {
1562 // When we run ignored tests the test filter should filter out all the
1563 // unignored tests and flip the ignore flag on the rest to false
1565 let mut opts = TestOpts::new();
1566 opts.run_tests = true;
1567 opts.run_ignored = true;
1569 let tests = vec![TestDescAndFn {
1571 name: StaticTestName("1"),
1573 should_panic: ShouldPanic::No,
1575 testfn: DynTestFn(Box::new(move || {})),
1579 name: StaticTestName("2"),
1581 should_panic: ShouldPanic::No,
1583 testfn: DynTestFn(Box::new(move || {})),
1585 let filtered = filter_tests(&opts, tests);
1587 assert_eq!(filtered.len(), 1);
1588 assert_eq!(filtered[0].desc.name.to_string(), "1");
1589 assert!(!filtered[0].desc.ignore);
1593 pub fn sort_tests() {
1594 let mut opts = TestOpts::new();
1595 opts.run_tests = true;
1597 let names = vec!["sha1::test".to_string(),
1598 "isize::test_to_str".to_string(),
1599 "isize::test_pow".to_string(),
1600 "test::do_not_run_ignored_tests".to_string(),
1601 "test::ignored_tests_result_in_ignored".to_string(),
1602 "test::first_free_arg_should_be_a_filter".to_string(),
1603 "test::parse_ignored_flag".to_string(),
1604 "test::filter_for_ignored_option".to_string(),
1605 "test::sort_tests".to_string()];
1608 let mut tests = Vec::new();
1609 for name in &names {
1610 let test = TestDescAndFn {
1612 name: DynTestName((*name).clone()),
1614 should_panic: ShouldPanic::No,
1616 testfn: DynTestFn(Box::new(testfn)),
1622 let filtered = filter_tests(&opts, tests);
1624 let expected = vec!["isize::test_pow".to_string(),
1625 "isize::test_to_str".to_string(),
1626 "sha1::test".to_string(),
1627 "test::do_not_run_ignored_tests".to_string(),
1628 "test::filter_for_ignored_option".to_string(),
1629 "test::first_free_arg_should_be_a_filter".to_string(),
1630 "test::ignored_tests_result_in_ignored".to_string(),
1631 "test::parse_ignored_flag".to_string(),
1632 "test::sort_tests".to_string()];
1634 for (a, b) in expected.iter().zip(filtered) {
1635 assert!(*a == b.desc.name.to_string());
1640 pub fn test_metricmap_compare() {
1641 let mut m1 = MetricMap::new();
1642 let mut m2 = MetricMap::new();
1643 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1644 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1646 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1647 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1649 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1650 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1652 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1653 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1655 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1656 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1658 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1659 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);