1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
34 #![cfg_attr(not(stage0), deny(warnings))]
37 #![feature(box_syntax)]
40 #![feature(rustc_private)]
41 #![feature(set_stdio)]
42 #![feature(staged_api)]
43 #![feature(question_mark)]
44 #![feature(panic_unwind)]
49 extern crate panic_unwind;
51 pub use self::TestFn::*;
52 pub use self::ColorConfig::*;
53 pub use self::TestResult::*;
54 pub use self::TestName::*;
55 use self::TestEvent::*;
56 use self::NamePadding::*;
57 use self::OutputLocation::*;
59 use std::boxed::FnBox;
63 use std::collections::BTreeMap;
67 use std::io::prelude::*;
69 use std::iter::repeat;
70 use std::path::PathBuf;
71 use std::sync::mpsc::{channel, Sender};
72 use std::sync::{Arc, Mutex};
74 use std::time::{Instant, Duration};
76 // to be used by rustc to compile tests in libtest
78 pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
79 TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName, DynTestName,
80 DynTestFn, run_test, test_main, test_main_static, filter_tests, parse_opts,
81 StaticBenchFn, ShouldPanic};
86 // The name of a test. By convention this follows the rules for rust
87 // paths; i.e. it should be a series of identifiers separated by double
88 // colons. This way if some test runner wants to arrange the tests
89 // hierarchically it may.
91 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
93 StaticTestName(&'static str),
97 fn as_slice(&self) -> &str {
99 StaticTestName(s) => s,
100 DynTestName(ref s) => s,
104 impl fmt::Display for TestName {
105 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
106 fmt::Display::fmt(self.as_slice(), f)
110 #[derive(Clone, Copy, PartialEq, Eq)]
117 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
118 let mut name = String::from(self.name.as_slice());
119 let fill = column_count.saturating_sub(name.len());
120 let pad = repeat(" ").take(fill).collect::<String>();
131 /// Represents a benchmark function.
132 pub trait TDynBenchFn: Send {
133 fn run(&self, harness: &mut Bencher);
136 // A function that runs a test. If the function returns successfully,
137 // the test succeeds; if the function panics then the test fails. We
138 // may need to come up with a more clever definition of test in order
139 // to support isolation of tests into threads.
142 StaticBenchFn(fn(&mut Bencher)),
143 StaticMetricFn(fn(&mut MetricMap)),
144 DynTestFn(Box<FnBox() + Send>),
145 DynMetricFn(Box<FnBox(&mut MetricMap) + Send>),
146 DynBenchFn(Box<TDynBenchFn + 'static>),
150 fn padding(&self) -> NamePadding {
152 StaticTestFn(..) => PadNone,
153 StaticBenchFn(..) => PadOnRight,
154 StaticMetricFn(..) => PadOnRight,
155 DynTestFn(..) => PadNone,
156 DynMetricFn(..) => PadOnRight,
157 DynBenchFn(..) => PadOnRight,
162 impl fmt::Debug for TestFn {
163 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
164 f.write_str(match *self {
165 StaticTestFn(..) => "StaticTestFn(..)",
166 StaticBenchFn(..) => "StaticBenchFn(..)",
167 StaticMetricFn(..) => "StaticMetricFn(..)",
168 DynTestFn(..) => "DynTestFn(..)",
169 DynMetricFn(..) => "DynMetricFn(..)",
170 DynBenchFn(..) => "DynBenchFn(..)",
175 /// Manager of the benchmarking runs.
177 /// This is fed into functions marked with `#[bench]` to allow for
178 /// set-up & tear-down before running a piece of code repeatedly via a
180 #[derive(Copy, Clone)]
187 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
188 pub enum ShouldPanic {
191 YesWithMessage(&'static str),
194 // The definition of a single test. A test runner will run a list of
196 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
197 pub struct TestDesc {
200 pub should_panic: ShouldPanic,
204 pub struct TestPaths {
205 pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
206 pub base: PathBuf, // e.g., compile-test, auxiliary
207 pub relative_dir: PathBuf, // e.g., foo/bar
211 pub struct TestDescAndFn {
216 #[derive(Clone, PartialEq, Debug, Copy)]
223 pub fn new(value: f64, noise: f64) -> Metric {
232 pub struct MetricMap(BTreeMap<String, Metric>);
234 impl Clone for MetricMap {
235 fn clone(&self) -> MetricMap {
236 let MetricMap(ref map) = *self;
237 MetricMap(map.clone())
241 // The default console test runner. It accepts the command line
242 // arguments and a vector of test_descs.
243 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>) {
244 let opts = match parse_opts(args) {
246 Some(Err(msg)) => panic!("{:?}", msg),
249 match run_tests_console(&opts, tests) {
251 Ok(false) => std::process::exit(101),
252 Err(e) => panic!("io error when running tests: {:?}", e),
256 // A variant optimized for invocation with a static test vector.
257 // This will panic (intentionally) when fed any dynamic tests, because
258 // it is copying the static values out into a dynamic vector and cannot
259 // copy dynamic values. It is doing this because from this point on
260 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
261 // semantics into parallel test runners, which in turn requires a Vec<>
262 // rather than a &[].
263 pub fn test_main_static(tests: &[TestDescAndFn]) {
264 let args = env::args().collect::<Vec<_>>();
265 let owned_tests = tests.iter()
270 testfn: StaticTestFn(f),
271 desc: t.desc.clone(),
274 StaticBenchFn(f) => {
276 testfn: StaticBenchFn(f),
277 desc: t.desc.clone(),
280 _ => panic!("non-static tests passed to test::test_main_static"),
284 test_main(&args, owned_tests)
287 #[derive(Copy, Clone)]
288 pub enum ColorConfig {
294 pub struct TestOpts {
295 pub filter: Option<String>,
296 pub run_ignored: bool,
298 pub bench_benchmarks: bool,
299 pub logfile: Option<PathBuf>,
301 pub color: ColorConfig,
307 fn new() -> TestOpts {
312 bench_benchmarks: false,
321 /// Result of parsing the options.
322 pub type OptRes = Result<TestOpts, String>;
324 #[cfg_attr(rustfmt, rustfmt_skip)]
325 fn optgroups() -> Vec<getopts::OptGroup> {
326 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
327 getopts::optflag("", "test", "Run tests and not benchmarks"),
328 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
329 getopts::optflag("h", "help", "Display this message (longer with --help)"),
330 getopts::optopt("", "logfile", "Write logs to the specified file instead \
332 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
333 task, allow printing directly"),
334 getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
335 getopts::optopt("", "color", "Configure coloring of output:
336 auto = colorize if stdout is a tty and tests are run on serially (default);
337 always = always colorize output;
338 never = never colorize output;", "auto|always|never"))
341 fn usage(binary: &str) {
342 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
345 The FILTER string is tested against the name of all tests, and only those
346 tests whose names contain the filter are run.
348 By default, all tests are run in parallel. This can be altered with the
349 RUST_TEST_THREADS environment variable when running tests (set it to 1).
351 All tests have their standard output and standard error captured by default.
352 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
353 environment variable to a value other than "0". Logging is not captured by default.
357 #[test] - Indicates a function is a test to be run. This function
359 #[bench] - Indicates a function is a benchmark to be run. This
360 function takes one argument (test::Bencher).
361 #[should_panic] - This function (also labeled with #[test]) will only pass if
362 the code causes a panic (an assertion failure or panic!)
363 A message may be provided, which the failure string must
364 contain: #[should_panic(expected = "foo")].
365 #[ignore] - When applied to a function which is already attributed as a
366 test, then the test runner will ignore these tests during
367 normal test runs. Running with --ignored will run these
369 usage = getopts::usage(&message, &optgroups()));
372 // Parses command line arguments into test options
373 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
374 let args_ = &args[1..];
375 let matches = match getopts::getopts(args_, &optgroups()) {
377 Err(f) => return Some(Err(f.to_string())),
380 if matches.opt_present("h") {
385 let filter = if !matches.free.is_empty() {
386 Some(matches.free[0].clone())
391 let run_ignored = matches.opt_present("ignored");
392 let quiet = matches.opt_present("quiet");
394 let logfile = matches.opt_str("logfile");
395 let logfile = logfile.map(|s| PathBuf::from(&s));
397 let bench_benchmarks = matches.opt_present("bench");
398 let run_tests = !bench_benchmarks || matches.opt_present("test");
400 let mut nocapture = matches.opt_present("nocapture");
402 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
403 Ok(val) => &val != "0",
408 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
409 Some("auto") | None => AutoColor,
410 Some("always") => AlwaysColor,
411 Some("never") => NeverColor,
414 return Some(Err(format!("argument for --color must be auto, always, or never (was \
420 let test_opts = TestOpts {
422 run_ignored: run_ignored,
423 run_tests: run_tests,
424 bench_benchmarks: bench_benchmarks,
426 nocapture: nocapture,
434 #[derive(Clone, PartialEq)]
435 pub struct BenchSamples {
436 ns_iter_summ: stats::Summary,
440 #[derive(Clone, PartialEq)]
441 pub enum TestResult {
445 TrMetrics(MetricMap),
446 TrBench(BenchSamples),
449 unsafe impl Send for TestResult {}
451 enum OutputLocation<T> {
452 Pretty(Box<term::StdoutTerminal>),
456 struct ConsoleTestState<T> {
457 log_out: Option<File>,
458 out: OutputLocation<T>,
467 failures: Vec<(TestDesc, Vec<u8>)>,
468 max_name_len: usize, // number of columns to fill when aligning names
471 impl<T: Write> ConsoleTestState<T> {
472 pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
473 let log_out = match opts.logfile {
474 Some(ref path) => Some(File::create(path)?),
477 let out = match term::stdout() {
478 None => Raw(io::stdout()),
479 Some(t) => Pretty(t),
482 Ok(ConsoleTestState {
485 use_color: use_color(opts),
492 metrics: MetricMap::new(),
493 failures: Vec::new(),
498 pub fn write_ok(&mut self) -> io::Result<()> {
499 self.write_short_result("ok", ".", term::color::GREEN)
502 pub fn write_failed(&mut self) -> io::Result<()> {
503 self.write_short_result("FAILED", "F", term::color::RED)
506 pub fn write_ignored(&mut self) -> io::Result<()> {
507 self.write_short_result("ignored", "i", term::color::YELLOW)
510 pub fn write_metric(&mut self) -> io::Result<()> {
511 self.write_pretty("metric", term::color::CYAN)
514 pub fn write_bench(&mut self) -> io::Result<()> {
515 self.write_pretty("bench", term::color::CYAN)
518 pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
521 self.write_pretty(quiet, color)
523 self.write_pretty(verbose, color)?;
524 self.write_plain("\n")
528 pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
530 Pretty(ref mut term) => {
534 term.write_all(word.as_bytes())?;
540 Raw(ref mut stdout) => {
541 stdout.write_all(word.as_bytes())?;
547 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
549 Pretty(ref mut term) => {
550 term.write_all(s.as_bytes())?;
553 Raw(ref mut stdout) => {
554 stdout.write_all(s.as_bytes())?;
560 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
562 let noun = if len != 1 {
567 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
570 pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
571 if self.quiet && align != PadOnRight {
574 let name = test.padded_name(self.max_name_len, align);
575 self.write_plain(&format!("test {} ... ", name))
579 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
581 TrOk => self.write_ok(),
582 TrFailed => self.write_failed(),
583 TrIgnored => self.write_ignored(),
584 TrMetrics(ref mm) => {
585 self.write_metric()?;
586 self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
590 self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
595 pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
599 let s = format!("{} {}\n",
601 TrOk => "ok".to_owned(),
602 TrFailed => "failed".to_owned(),
603 TrIgnored => "ignored".to_owned(),
604 TrMetrics(ref mm) => mm.fmt_metrics(),
605 TrBench(ref bs) => fmt_bench_samples(bs),
608 o.write_all(s.as_bytes())
613 pub fn write_failures(&mut self) -> io::Result<()> {
614 self.write_plain("\nfailures:\n")?;
615 let mut failures = Vec::new();
616 let mut fail_out = String::new();
617 for &(ref f, ref stdout) in &self.failures {
618 failures.push(f.name.to_string());
619 if !stdout.is_empty() {
620 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
621 let output = String::from_utf8_lossy(stdout);
622 fail_out.push_str(&output);
623 fail_out.push_str("\n");
626 if !fail_out.is_empty() {
627 self.write_plain("\n")?;
628 self.write_plain(&fail_out)?;
631 self.write_plain("\nfailures:\n")?;
633 for name in &failures {
634 self.write_plain(&format!(" {}\n", name))?;
639 pub fn write_run_finish(&mut self) -> io::Result<bool> {
640 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
642 let success = self.failed == 0;
644 self.write_failures()?;
647 self.write_plain("\ntest result: ")?;
649 // There's no parallelism at this point so it's safe to use color
650 self.write_pretty("ok", term::color::GREEN)?;
652 self.write_pretty("FAILED", term::color::RED)?;
654 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
659 self.write_plain(&s)?;
664 // Format a number with thousands separators
665 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
667 let mut output = String::new();
668 let mut trailing = false;
669 for &pow in &[9, 6, 3, 0] {
670 let base = 10_usize.pow(pow);
671 if pow == 0 || trailing || n / base != 0 {
673 output.write_fmt(format_args!("{}", n / base)).unwrap();
675 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
688 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
690 let mut output = String::new();
692 let median = bs.ns_iter_summ.median as usize;
693 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
695 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
696 fmt_thousands_sep(median, ','),
697 fmt_thousands_sep(deviation, ',')))
700 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
705 // A simple console test runner
706 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
708 fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
709 match (*event).clone() {
710 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
711 TeWait(ref test, padding) => st.write_test_start(test, padding),
712 TeResult(test, result, stdout) => {
713 st.write_log(&test, &result)?;
714 st.write_result(&result)?;
716 TrOk => st.passed += 1,
717 TrIgnored => st.ignored += 1,
719 let tname = test.name;
720 let MetricMap(mm) = mm;
723 .insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
728 st.metrics.insert_metric(test.name.as_slice(),
729 bs.ns_iter_summ.median,
730 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
735 st.failures.push((test, stdout));
743 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
744 fn len_if_padded(t: &TestDescAndFn) -> usize {
745 match t.testfn.padding() {
747 PadOnRight => t.desc.name.as_slice().len(),
750 if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
751 let n = t.desc.name.as_slice();
752 st.max_name_len = n.len();
754 run_tests(opts, tests, |x| callback(&x, &mut st))?;
755 return st.write_run_finish();
759 fn should_sort_failures_before_printing_them() {
760 let test_a = TestDesc {
761 name: StaticTestName("a"),
763 should_panic: ShouldPanic::No,
766 let test_b = TestDesc {
767 name: StaticTestName("b"),
769 should_panic: ShouldPanic::No,
772 let mut st = ConsoleTestState {
774 out: Raw(Vec::new()),
783 metrics: MetricMap::new(),
784 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
787 st.write_failures().unwrap();
788 let s = match st.out {
789 Raw(ref m) => String::from_utf8_lossy(&m[..]),
790 Pretty(_) => unreachable!(),
793 let apos = s.find("a").unwrap();
794 let bpos = s.find("b").unwrap();
795 assert!(apos < bpos);
798 fn use_color(opts: &TestOpts) -> bool {
800 AutoColor => !opts.nocapture && stdout_isatty(),
807 fn stdout_isatty() -> bool {
808 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
811 fn stdout_isatty() -> bool {
814 type HANDLE = *mut u8;
815 type LPDWORD = *mut u32;
816 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
818 fn GetStdHandle(which: DWORD) -> HANDLE;
819 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
822 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
824 GetConsoleMode(handle, &mut out) != 0
830 TeFiltered(Vec<TestDesc>),
831 TeWait(TestDesc, NamePadding),
832 TeResult(TestDesc, TestResult, Vec<u8>),
835 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
838 fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
839 where F: FnMut(TestEvent) -> io::Result<()>
841 let mut filtered_tests = filter_tests(opts, tests);
842 if !opts.bench_benchmarks {
843 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
846 let filtered_descs = filtered_tests.iter()
847 .map(|t| t.desc.clone())
850 callback(TeFiltered(filtered_descs))?;
852 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
853 filtered_tests.into_iter().partition(|e| {
855 StaticTestFn(_) | DynTestFn(_) => true,
860 // It's tempting to just spawn all the tests at once, but since we have
861 // many tests that run in other processes we would be making a big mess.
862 let concurrency = get_concurrency();
864 let mut remaining = filtered_tests;
868 let (tx, rx) = channel::<MonitorMsg>();
870 while pending > 0 || !remaining.is_empty() {
871 while pending < concurrency && !remaining.is_empty() {
872 let test = remaining.pop().unwrap();
873 if concurrency == 1 {
874 // We are doing one test at a time so we can print the name
875 // of the test before we run it. Useful for debugging tests
876 // that hang forever.
877 callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
879 run_test(opts, !opts.run_tests, test, tx.clone());
883 let (desc, result, stdout) = rx.recv().unwrap();
884 if concurrency != 1 {
885 callback(TeWait(desc.clone(), PadNone))?;
887 callback(TeResult(desc, result, stdout))?;
891 if opts.bench_benchmarks {
892 // All benchmarks run at the end, in serial.
893 // (this includes metric fns)
894 for b in filtered_benchs_and_metrics {
895 callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
896 run_test(opts, false, b, tx.clone());
897 let (test, result, stdout) = rx.recv().unwrap();
898 callback(TeResult(test, result, stdout))?;
905 fn get_concurrency() -> usize {
906 return match env::var("RUST_TEST_THREADS") {
908 let opt_n: Option<usize> = s.parse().ok();
910 Some(n) if n > 0 => n,
912 panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
917 Err(..) => num_cpus(),
922 fn num_cpus() -> usize {
925 wProcessorArchitecture: u16,
928 lpMinimumApplicationAddress: *mut u8,
929 lpMaximumApplicationAddress: *mut u8,
930 dwActiveProcessorMask: *mut u8,
931 dwNumberOfProcessors: u32,
932 dwProcessorType: u32,
933 dwAllocationGranularity: u32,
934 wProcessorLevel: u16,
935 wProcessorRevision: u16,
938 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
941 let mut sysinfo = std::mem::zeroed();
942 GetSystemInfo(&mut sysinfo);
943 sysinfo.dwNumberOfProcessors as usize
947 #[cfg(any(target_os = "linux",
950 target_os = "android",
951 target_os = "solaris",
952 target_os = "emscripten"))]
953 fn num_cpus() -> usize {
954 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
957 #[cfg(any(target_os = "freebsd",
958 target_os = "dragonfly",
959 target_os = "bitrig",
960 target_os = "netbsd"))]
961 fn num_cpus() -> usize {
962 let mut cpus: libc::c_uint = 0;
963 let mut cpus_size = std::mem::size_of_val(&cpus);
966 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
969 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
971 libc::sysctl(mib.as_mut_ptr(),
973 &mut cpus as *mut _ as *mut _,
974 &mut cpus_size as *mut _ as *mut _,
985 #[cfg(target_os = "openbsd")]
986 fn num_cpus() -> usize {
987 let mut cpus: libc::c_uint = 0;
988 let mut cpus_size = std::mem::size_of_val(&cpus);
989 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
992 libc::sysctl(mib.as_mut_ptr(),
994 &mut cpus as *mut _ as *mut _,
995 &mut cpus_size as *mut _ as *mut _,
1006 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1007 let mut filtered = tests;
1009 // Remove tests that don't match the test filter
1010 filtered = match opts.filter {
1012 Some(ref filter) => {
1013 filtered.into_iter()
1014 .filter(|test| test.desc.name.as_slice().contains(&filter[..]))
1019 // Maybe pull out the ignored test and unignore them
1020 filtered = if !opts.run_ignored {
1023 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1024 if test.desc.ignore {
1025 let TestDescAndFn {desc, testfn} = test;
1026 Some(TestDescAndFn {
1027 desc: TestDesc { ignore: false, ..desc },
1034 filtered.into_iter().filter_map(filter).collect()
1037 // Sort the tests alphabetically
1038 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1043 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1044 // convert benchmarks to tests, if we're not benchmarking them
1047 let testfn = match x.testfn {
1048 DynBenchFn(bench) => {
1049 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
1051 StaticBenchFn(benchfn) => {
1052 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
1064 pub fn run_test(opts: &TestOpts,
1066 test: TestDescAndFn,
1067 monitor_ch: Sender<MonitorMsg>) {
1069 let TestDescAndFn {desc, testfn} = test;
1071 if force_ignore || desc.ignore {
1072 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1076 fn run_test_inner(desc: TestDesc,
1077 monitor_ch: Sender<MonitorMsg>,
1079 testfn: Box<FnBox() + Send>) {
1080 struct Sink(Arc<Mutex<Vec<u8>>>);
1081 impl Write for Sink {
1082 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1083 Write::write(&mut *self.0.lock().unwrap(), data)
1085 fn flush(&mut self) -> io::Result<()> {
1090 thread::spawn(move || {
1091 let data = Arc::new(Mutex::new(Vec::new()));
1092 let data2 = data.clone();
1093 let cfg = thread::Builder::new().name(match desc.name {
1094 DynTestName(ref name) => name.clone(),
1095 StaticTestName(name) => name.to_owned(),
1098 let result_guard = cfg.spawn(move || {
1100 io::set_print(box Sink(data2.clone()));
1101 io::set_panic(box Sink(data2));
1106 let test_result = calc_result(&desc, result_guard.join());
1107 let stdout = data.lock().unwrap().to_vec();
1108 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1113 DynBenchFn(bencher) => {
1114 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1115 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1118 StaticBenchFn(benchfn) => {
1119 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1120 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1124 let mut mm = MetricMap::new();
1125 f.call_box((&mut mm,));
1126 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1129 StaticMetricFn(f) => {
1130 let mut mm = MetricMap::new();
1132 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1135 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1136 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(f)),
1140 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
1141 match (&desc.should_panic, task_result) {
1142 (&ShouldPanic::No, Ok(())) |
1143 (&ShouldPanic::Yes, Err(_)) => TrOk,
1144 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1145 if err.downcast_ref::<String>()
1147 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1148 .map(|e| e.contains(msg))
1149 .unwrap_or(false) => TrOk,
1155 pub fn new() -> MetricMap {
1156 MetricMap(BTreeMap::new())
1159 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1160 /// must be non-negative. The `noise` indicates the uncertainty of the
1161 /// metric, which doubles as the "noise range" of acceptable
1162 /// pairwise-regressions on this named value, when comparing from one
1163 /// metric to the next using `compare_to_old`.
1165 /// If `noise` is positive, then it means this metric is of a value
1166 /// you want to see grow smaller, so a change larger than `noise` in the
1167 /// positive direction represents a regression.
1169 /// If `noise` is negative, then it means this metric is of a value
1170 /// you want to see grow larger, so a change larger than `noise` in the
1171 /// negative direction represents a regression.
1172 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1177 let MetricMap(ref mut map) = *self;
1178 map.insert(name.to_owned(), m);
1181 pub fn fmt_metrics(&self) -> String {
1182 let MetricMap(ref mm) = *self;
1183 let v: Vec<String> = mm.iter()
1184 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1193 /// A function that is opaque to the optimizer, to allow benchmarks to
1194 /// pretend to use outputs to assist in avoiding dead-code
1197 /// This function is a no-op, and does not even read from `dummy`.
1198 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1199 target_arch = "asmjs")))]
1200 pub fn black_box<T>(dummy: T) -> T {
1201 // we need to "use" the argument in some way LLVM can't
1203 unsafe { asm!("" : : "r"(&dummy)) }
1206 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1207 target_arch = "asmjs"))]
1209 pub fn black_box<T>(dummy: T) -> T {
1215 /// Callback for benchmark functions to run in their body.
1216 pub fn iter<T, F>(&mut self, mut inner: F)
1217 where F: FnMut() -> T
1219 let start = Instant::now();
1220 let k = self.iterations;
1224 self.dur = start.elapsed();
1227 pub fn ns_elapsed(&mut self) -> u64 {
1228 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1231 pub fn ns_per_iter(&mut self) -> u64 {
1232 if self.iterations == 0 {
1235 self.ns_elapsed() / cmp::max(self.iterations, 1)
1239 pub fn bench_n<F>(&mut self, n: u64, f: F)
1240 where F: FnOnce(&mut Bencher)
1242 self.iterations = n;
1246 // This is a more statistics-driven benchmark algorithm
1247 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary
1248 where F: FnMut(&mut Bencher)
1250 // Initial bench run to get ballpark figure.
1252 self.bench_n(n, |x| f(x));
1254 // Try to estimate iter count for 1ms falling back to 1m
1255 // iterations if first run took < 1ns.
1256 if self.ns_per_iter() == 0 {
1259 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1261 // if the first run took more than 1ms we don't want to just
1262 // be left doing 0 iterations on every loop. The unfortunate
1263 // side effect of not being able to do as many runs is
1264 // automatically handled by the statistical analysis below
1265 // (i.e. larger error bars).
1270 let mut total_run = Duration::new(0, 0);
1271 let samples: &mut [f64] = &mut [0.0_f64; 50];
1273 let loop_start = Instant::now();
1275 for p in &mut *samples {
1276 self.bench_n(n, |x| f(x));
1277 *p = self.ns_per_iter() as f64;
1280 stats::winsorize(samples, 5.0);
1281 let summ = stats::Summary::new(samples);
1283 for p in &mut *samples {
1284 self.bench_n(5 * n, |x| f(x));
1285 *p = self.ns_per_iter() as f64;
1288 stats::winsorize(samples, 5.0);
1289 let summ5 = stats::Summary::new(samples);
1290 let loop_run = loop_start.elapsed();
1292 // If we've run for 100ms and seem to have converged to a
1294 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
1295 summ.median - summ5.median < summ5.median_abs_dev {
1299 total_run = total_run + loop_run;
1300 // Longest we ever run for is 3s.
1301 if total_run > Duration::from_secs(3) {
1305 // If we overflow here just return the results so far. We check a
1306 // multiplier of 10 because we're about to multiply by 2 and the
1307 // next iteration of the loop will also multiply by 5 (to calculate
1308 // the summ5 result)
1309 n = match n.checked_mul(10) {
1311 None => return summ5,
1319 use std::time::Duration;
1320 use super::{Bencher, BenchSamples};
1322 pub fn benchmark<F>(f: F) -> BenchSamples
1323 where F: FnMut(&mut Bencher)
1325 let mut bs = Bencher {
1327 dur: Duration::new(0, 0),
1331 let ns_iter_summ = bs.auto_bench(f);
1333 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1334 let mb_s = bs.bytes * 1000 / ns_iter;
1337 ns_iter_summ: ns_iter_summ,
1338 mb_s: mb_s as usize,
1342 pub fn run_once<F>(f: F)
1343 where F: FnOnce(&mut Bencher)
1345 let mut bs = Bencher {
1347 dur: Duration::new(0, 0),
1356 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc, TestDescAndFn,
1357 TestOpts, run_test, MetricMap, StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1358 use std::sync::mpsc::channel;
1361 pub fn do_not_run_ignored_tests() {
1365 let desc = TestDescAndFn {
1367 name: StaticTestName("whatever"),
1369 should_panic: ShouldPanic::No,
1371 testfn: DynTestFn(Box::new(move || f())),
1373 let (tx, rx) = channel();
1374 run_test(&TestOpts::new(), false, desc, tx);
1375 let (_, res, _) = rx.recv().unwrap();
1376 assert!(res != TrOk);
1380 pub fn ignored_tests_result_in_ignored() {
1382 let desc = TestDescAndFn {
1384 name: StaticTestName("whatever"),
1386 should_panic: ShouldPanic::No,
1388 testfn: DynTestFn(Box::new(move || f())),
1390 let (tx, rx) = channel();
1391 run_test(&TestOpts::new(), false, desc, tx);
1392 let (_, res, _) = rx.recv().unwrap();
1393 assert!(res == TrIgnored);
1397 fn test_should_panic() {
1401 let desc = TestDescAndFn {
1403 name: StaticTestName("whatever"),
1405 should_panic: ShouldPanic::Yes,
1407 testfn: DynTestFn(Box::new(move || f())),
1409 let (tx, rx) = channel();
1410 run_test(&TestOpts::new(), false, desc, tx);
1411 let (_, res, _) = rx.recv().unwrap();
1412 assert!(res == TrOk);
1416 fn test_should_panic_good_message() {
1418 panic!("an error message");
1420 let desc = TestDescAndFn {
1422 name: StaticTestName("whatever"),
1424 should_panic: ShouldPanic::YesWithMessage("error message"),
1426 testfn: DynTestFn(Box::new(move || f())),
1428 let (tx, rx) = channel();
1429 run_test(&TestOpts::new(), false, desc, tx);
1430 let (_, res, _) = rx.recv().unwrap();
1431 assert!(res == TrOk);
1435 fn test_should_panic_bad_message() {
1437 panic!("an error message");
1439 let desc = TestDescAndFn {
1441 name: StaticTestName("whatever"),
1443 should_panic: ShouldPanic::YesWithMessage("foobar"),
1445 testfn: DynTestFn(Box::new(move || f())),
1447 let (tx, rx) = channel();
1448 run_test(&TestOpts::new(), false, desc, tx);
1449 let (_, res, _) = rx.recv().unwrap();
1450 assert!(res == TrFailed);
1454 fn test_should_panic_but_succeeds() {
1456 let desc = TestDescAndFn {
1458 name: StaticTestName("whatever"),
1460 should_panic: ShouldPanic::Yes,
1462 testfn: DynTestFn(Box::new(move || f())),
1464 let (tx, rx) = channel();
1465 run_test(&TestOpts::new(), false, desc, tx);
1466 let (_, res, _) = rx.recv().unwrap();
1467 assert!(res == TrFailed);
1471 fn parse_ignored_flag() {
1472 let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1473 let opts = match parse_opts(&args) {
1475 _ => panic!("Malformed arg in parse_ignored_flag"),
1477 assert!((opts.run_ignored));
1481 pub fn filter_for_ignored_option() {
1482 // When we run ignored tests the test filter should filter out all the
1483 // unignored tests and flip the ignore flag on the rest to false
1485 let mut opts = TestOpts::new();
1486 opts.run_tests = true;
1487 opts.run_ignored = true;
1489 let tests = vec![TestDescAndFn {
1491 name: StaticTestName("1"),
1493 should_panic: ShouldPanic::No,
1495 testfn: DynTestFn(Box::new(move || {})),
1499 name: StaticTestName("2"),
1501 should_panic: ShouldPanic::No,
1503 testfn: DynTestFn(Box::new(move || {})),
1505 let filtered = filter_tests(&opts, tests);
1507 assert_eq!(filtered.len(), 1);
1508 assert_eq!(filtered[0].desc.name.to_string(), "1");
1509 assert!(!filtered[0].desc.ignore);
1513 pub fn sort_tests() {
1514 let mut opts = TestOpts::new();
1515 opts.run_tests = true;
1517 let names = vec!["sha1::test".to_string(),
1518 "isize::test_to_str".to_string(),
1519 "isize::test_pow".to_string(),
1520 "test::do_not_run_ignored_tests".to_string(),
1521 "test::ignored_tests_result_in_ignored".to_string(),
1522 "test::first_free_arg_should_be_a_filter".to_string(),
1523 "test::parse_ignored_flag".to_string(),
1524 "test::filter_for_ignored_option".to_string(),
1525 "test::sort_tests".to_string()];
1528 let mut tests = Vec::new();
1529 for name in &names {
1530 let test = TestDescAndFn {
1532 name: DynTestName((*name).clone()),
1534 should_panic: ShouldPanic::No,
1536 testfn: DynTestFn(Box::new(testfn)),
1542 let filtered = filter_tests(&opts, tests);
1544 let expected = vec!["isize::test_pow".to_string(),
1545 "isize::test_to_str".to_string(),
1546 "sha1::test".to_string(),
1547 "test::do_not_run_ignored_tests".to_string(),
1548 "test::filter_for_ignored_option".to_string(),
1549 "test::first_free_arg_should_be_a_filter".to_string(),
1550 "test::ignored_tests_result_in_ignored".to_string(),
1551 "test::parse_ignored_flag".to_string(),
1552 "test::sort_tests".to_string()];
1554 for (a, b) in expected.iter().zip(filtered) {
1555 assert!(*a == b.desc.name.to_string());
1560 pub fn test_metricmap_compare() {
1561 let mut m1 = MetricMap::new();
1562 let mut m2 = MetricMap::new();
1563 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1564 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1566 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1567 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1569 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1570 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1572 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1573 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1575 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1576 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1578 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1579 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);