1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
34 #![cfg_attr(not(stage0), deny(warnings))]
37 #![feature(box_syntax)]
40 #![feature(rustc_private)]
41 #![feature(set_stdio)]
42 #![feature(staged_api)]
43 #![feature(question_mark)]
44 #![feature(panic_unwind)]
49 extern crate panic_unwind;
51 pub use self::TestFn::*;
52 pub use self::ColorConfig::*;
53 pub use self::TestResult::*;
54 pub use self::TestName::*;
55 use self::TestEvent::*;
56 use self::NamePadding::*;
57 use self::OutputLocation::*;
59 use std::boxed::FnBox;
63 use std::collections::BTreeMap;
67 use std::io::prelude::*;
69 use std::iter::repeat;
70 use std::path::PathBuf;
71 use std::sync::mpsc::{channel, Sender};
72 use std::sync::{Arc, Mutex};
74 use std::time::{Instant, Duration};
76 // to be used by rustc to compile tests in libtest
78 pub use {Bencher, TestName, TestResult, TestDesc, TestDescAndFn, TestOpts, TrFailed,
79 TrIgnored, TrOk, Metric, MetricMap, StaticTestFn, StaticTestName, DynTestName,
80 DynTestFn, run_test, test_main, test_main_static, filter_tests, parse_opts,
81 StaticBenchFn, ShouldPanic};
86 // The name of a test. By convention this follows the rules for rust
87 // paths; i.e. it should be a series of identifiers separated by double
88 // colons. This way if some test runner wants to arrange the tests
89 // hierarchically it may.
91 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
93 StaticTestName(&'static str),
97 fn as_slice(&self) -> &str {
99 StaticTestName(s) => s,
100 DynTestName(ref s) => s,
104 impl fmt::Display for TestName {
105 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
106 fmt::Display::fmt(self.as_slice(), f)
110 #[derive(Clone, Copy, PartialEq, Eq)]
117 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
118 let mut name = String::from(self.name.as_slice());
119 let fill = column_count.saturating_sub(name.len());
120 let pad = repeat(" ").take(fill).collect::<String>();
131 /// Represents a benchmark function.
132 pub trait TDynBenchFn: Send {
133 fn run(&self, harness: &mut Bencher);
136 // A function that runs a test. If the function returns successfully,
137 // the test succeeds; if the function panics then the test fails. We
138 // may need to come up with a more clever definition of test in order
139 // to support isolation of tests into threads.
142 StaticBenchFn(fn(&mut Bencher)),
143 StaticMetricFn(fn(&mut MetricMap)),
144 DynTestFn(Box<FnBox() + Send>),
145 DynMetricFn(Box<FnBox(&mut MetricMap) + Send>),
146 DynBenchFn(Box<TDynBenchFn + 'static>),
150 fn padding(&self) -> NamePadding {
152 StaticTestFn(..) => PadNone,
153 StaticBenchFn(..) => PadOnRight,
154 StaticMetricFn(..) => PadOnRight,
155 DynTestFn(..) => PadNone,
156 DynMetricFn(..) => PadOnRight,
157 DynBenchFn(..) => PadOnRight,
162 impl fmt::Debug for TestFn {
163 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
164 f.write_str(match *self {
165 StaticTestFn(..) => "StaticTestFn(..)",
166 StaticBenchFn(..) => "StaticBenchFn(..)",
167 StaticMetricFn(..) => "StaticMetricFn(..)",
168 DynTestFn(..) => "DynTestFn(..)",
169 DynMetricFn(..) => "DynMetricFn(..)",
170 DynBenchFn(..) => "DynBenchFn(..)",
175 /// Manager of the benchmarking runs.
177 /// This is fed into functions marked with `#[bench]` to allow for
178 /// set-up & tear-down before running a piece of code repeatedly via a
180 #[derive(Copy, Clone)]
187 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
188 pub enum ShouldPanic {
191 YesWithMessage(&'static str),
194 // The definition of a single test. A test runner will run a list of
196 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
197 pub struct TestDesc {
200 pub should_panic: ShouldPanic,
204 pub struct TestPaths {
205 pub file: PathBuf, // e.g., compile-test/foo/bar/baz.rs
206 pub base: PathBuf, // e.g., compile-test, auxiliary
207 pub relative_dir: PathBuf, // e.g., foo/bar
211 pub struct TestDescAndFn {
216 #[derive(Clone, PartialEq, Debug, Copy)]
223 pub fn new(value: f64, noise: f64) -> Metric {
232 pub struct MetricMap(BTreeMap<String, Metric>);
234 impl Clone for MetricMap {
235 fn clone(&self) -> MetricMap {
236 let MetricMap(ref map) = *self;
237 MetricMap(map.clone())
241 // The default console test runner. It accepts the command line
242 // arguments and a vector of test_descs.
243 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>) {
244 let opts = match parse_opts(args) {
246 Some(Err(msg)) => panic!("{:?}", msg),
249 match run_tests_console(&opts, tests) {
251 Ok(false) => std::process::exit(101),
252 Err(e) => panic!("io error when running tests: {:?}", e),
256 // A variant optimized for invocation with a static test vector.
257 // This will panic (intentionally) when fed any dynamic tests, because
258 // it is copying the static values out into a dynamic vector and cannot
259 // copy dynamic values. It is doing this because from this point on
260 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
261 // semantics into parallel test runners, which in turn requires a Vec<>
262 // rather than a &[].
263 pub fn test_main_static(tests: &[TestDescAndFn]) {
264 let args = env::args().collect::<Vec<_>>();
265 let owned_tests = tests.iter()
270 testfn: StaticTestFn(f),
271 desc: t.desc.clone(),
274 StaticBenchFn(f) => {
276 testfn: StaticBenchFn(f),
277 desc: t.desc.clone(),
280 _ => panic!("non-static tests passed to test::test_main_static"),
284 test_main(&args, owned_tests)
287 #[derive(Copy, Clone)]
288 pub enum ColorConfig {
294 pub struct TestOpts {
295 pub filter: Option<String>,
296 pub run_ignored: bool,
298 pub bench_benchmarks: bool,
299 pub logfile: Option<PathBuf>,
301 pub color: ColorConfig,
307 fn new() -> TestOpts {
312 bench_benchmarks: false,
321 /// Result of parsing the options.
322 pub type OptRes = Result<TestOpts, String>;
324 #[cfg_attr(rustfmt, rustfmt_skip)]
325 fn optgroups() -> Vec<getopts::OptGroup> {
326 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
327 getopts::optflag("", "test", "Run tests and not benchmarks"),
328 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
329 getopts::optflag("h", "help", "Display this message (longer with --help)"),
330 getopts::optopt("", "logfile", "Write logs to the specified file instead \
332 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
333 task, allow printing directly"),
334 getopts::optflag("q", "quiet", "Display one character per test instead of one line"),
335 getopts::optopt("", "color", "Configure coloring of output:
336 auto = colorize if stdout is a tty and tests are run on serially (default);
337 always = always colorize output;
338 never = never colorize output;", "auto|always|never"))
341 fn usage(binary: &str) {
342 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
345 The FILTER string is tested against the name of all tests, and only those
346 tests whose names contain the filter are run.
348 By default, all tests are run in parallel. This can be altered with the
349 RUST_TEST_THREADS environment variable when running tests (set it to 1).
351 All tests have their standard output and standard error captured by default.
352 This can be overridden with the --nocapture flag or setting RUST_TEST_NOCAPTURE
353 environment variable to a value other than "0". Logging is not captured by default.
357 #[test] - Indicates a function is a test to be run. This function
359 #[bench] - Indicates a function is a benchmark to be run. This
360 function takes one argument (test::Bencher).
361 #[should_panic] - This function (also labeled with #[test]) will only pass if
362 the code causes a panic (an assertion failure or panic!)
363 A message may be provided, which the failure string must
364 contain: #[should_panic(expected = "foo")].
365 #[ignore] - When applied to a function which is already attributed as a
366 test, then the test runner will ignore these tests during
367 normal test runs. Running with --ignored will run these
369 usage = getopts::usage(&message, &optgroups()));
372 // Parses command line arguments into test options
373 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
374 let args_ = &args[1..];
375 let matches = match getopts::getopts(args_, &optgroups()) {
377 Err(f) => return Some(Err(f.to_string())),
380 if matches.opt_present("h") {
385 let filter = if !matches.free.is_empty() {
386 Some(matches.free[0].clone())
391 let run_ignored = matches.opt_present("ignored");
392 let quiet = matches.opt_present("quiet");
394 let logfile = matches.opt_str("logfile");
395 let logfile = logfile.map(|s| PathBuf::from(&s));
397 let bench_benchmarks = matches.opt_present("bench");
398 let run_tests = !bench_benchmarks || matches.opt_present("test");
400 let mut nocapture = matches.opt_present("nocapture");
402 nocapture = match env::var("RUST_TEST_NOCAPTURE") {
403 Ok(val) => &val != "0",
408 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
409 Some("auto") | None => AutoColor,
410 Some("always") => AlwaysColor,
411 Some("never") => NeverColor,
414 return Some(Err(format!("argument for --color must be auto, always, or never (was \
420 let test_opts = TestOpts {
422 run_ignored: run_ignored,
423 run_tests: run_tests,
424 bench_benchmarks: bench_benchmarks,
426 nocapture: nocapture,
434 #[derive(Clone, PartialEq)]
435 pub struct BenchSamples {
436 ns_iter_summ: stats::Summary,
440 #[derive(Clone, PartialEq)]
441 pub enum TestResult {
445 TrMetrics(MetricMap),
446 TrBench(BenchSamples),
449 unsafe impl Send for TestResult {}
451 enum OutputLocation<T> {
452 Pretty(Box<term::StdoutTerminal>),
456 struct ConsoleTestState<T> {
457 log_out: Option<File>,
458 out: OutputLocation<T>,
467 failures: Vec<(TestDesc, Vec<u8>)>,
468 max_name_len: usize, // number of columns to fill when aligning names
471 impl<T: Write> ConsoleTestState<T> {
472 pub fn new(opts: &TestOpts, _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
473 let log_out = match opts.logfile {
474 Some(ref path) => Some(File::create(path)?),
477 let out = match term::stdout() {
478 None => Raw(io::stdout()),
479 Some(t) => Pretty(t),
482 Ok(ConsoleTestState {
485 use_color: use_color(opts),
492 metrics: MetricMap::new(),
493 failures: Vec::new(),
498 pub fn write_ok(&mut self) -> io::Result<()> {
499 self.write_short_result("ok", ".", term::color::GREEN)
502 pub fn write_failed(&mut self) -> io::Result<()> {
503 self.write_short_result("FAILED", "F", term::color::RED)
506 pub fn write_ignored(&mut self) -> io::Result<()> {
507 self.write_short_result("ignored", "i", term::color::YELLOW)
510 pub fn write_metric(&mut self) -> io::Result<()> {
511 self.write_pretty("metric", term::color::CYAN)
514 pub fn write_bench(&mut self) -> io::Result<()> {
515 self.write_pretty("bench", term::color::CYAN)
518 pub fn write_short_result(&mut self, verbose: &str, quiet: &str, color: term::color::Color)
521 self.write_pretty(quiet, color)
523 self.write_pretty(verbose, color)?;
524 self.write_plain("\n")
528 pub fn write_pretty(&mut self, word: &str, color: term::color::Color) -> io::Result<()> {
530 Pretty(ref mut term) => {
534 term.write_all(word.as_bytes())?;
540 Raw(ref mut stdout) => {
541 stdout.write_all(word.as_bytes())?;
547 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
549 Pretty(ref mut term) => {
550 term.write_all(s.as_bytes())?;
553 Raw(ref mut stdout) => {
554 stdout.write_all(s.as_bytes())?;
560 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
562 let noun = if len != 1 {
567 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
570 pub fn write_test_start(&mut self, test: &TestDesc, align: NamePadding) -> io::Result<()> {
571 if self.quiet && align != PadOnRight {
574 let name = test.padded_name(self.max_name_len, align);
575 self.write_plain(&format!("test {} ... ", name))
579 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
581 TrOk => self.write_ok(),
582 TrFailed => self.write_failed(),
583 TrIgnored => self.write_ignored(),
584 TrMetrics(ref mm) => {
585 self.write_metric()?;
586 self.write_plain(&format!(": {}\n", mm.fmt_metrics()))
590 self.write_plain(&format!(": {}\n", fmt_bench_samples(bs)))
595 pub fn write_log(&mut self, test: &TestDesc, result: &TestResult) -> io::Result<()> {
599 let s = format!("{} {}\n",
601 TrOk => "ok".to_owned(),
602 TrFailed => "failed".to_owned(),
603 TrIgnored => "ignored".to_owned(),
604 TrMetrics(ref mm) => mm.fmt_metrics(),
605 TrBench(ref bs) => fmt_bench_samples(bs),
608 o.write_all(s.as_bytes())
613 pub fn write_failures(&mut self) -> io::Result<()> {
614 self.write_plain("\nfailures:\n")?;
615 let mut failures = Vec::new();
616 let mut fail_out = String::new();
617 for &(ref f, ref stdout) in &self.failures {
618 failures.push(f.name.to_string());
619 if !stdout.is_empty() {
620 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
621 let output = String::from_utf8_lossy(stdout);
622 fail_out.push_str(&output);
623 fail_out.push_str("\n");
626 if !fail_out.is_empty() {
627 self.write_plain("\n")?;
628 self.write_plain(&fail_out)?;
631 self.write_plain("\nfailures:\n")?;
633 for name in &failures {
634 self.write_plain(&format!(" {}\n", name))?;
639 pub fn write_run_finish(&mut self) -> io::Result<bool> {
640 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
642 let success = self.failed == 0;
644 self.write_failures()?;
647 self.write_plain("\ntest result: ")?;
649 // There's no parallelism at this point so it's safe to use color
650 self.write_pretty("ok", term::color::GREEN)?;
652 self.write_pretty("FAILED", term::color::RED)?;
654 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
659 self.write_plain(&s)?;
664 // Format a number with thousands separators
665 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
667 let mut output = String::new();
668 let mut trailing = false;
669 for &pow in &[9, 6, 3, 0] {
670 let base = 10_usize.pow(pow);
671 if pow == 0 || trailing || n / base != 0 {
673 output.write_fmt(format_args!("{}", n / base)).unwrap();
675 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
688 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
690 let mut output = String::new();
692 let median = bs.ns_iter_summ.median as usize;
693 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
695 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
696 fmt_thousands_sep(median, ','),
697 fmt_thousands_sep(deviation, ',')))
700 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
705 // A simple console test runner
706 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> io::Result<bool> {
708 fn callback<T: Write>(event: &TestEvent, st: &mut ConsoleTestState<T>) -> io::Result<()> {
709 match (*event).clone() {
710 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
711 TeWait(ref test, padding) => st.write_test_start(test, padding),
712 TeResult(test, result, stdout) => {
713 st.write_log(&test, &result)?;
714 st.write_result(&result)?;
716 TrOk => st.passed += 1,
717 TrIgnored => st.ignored += 1,
719 let tname = test.name;
720 let MetricMap(mm) = mm;
723 .insert_metric(&format!("{}.{}", tname, k), v.value, v.noise);
728 st.metrics.insert_metric(test.name.as_slice(),
729 bs.ns_iter_summ.median,
730 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
735 st.failures.push((test, stdout));
743 let mut st = ConsoleTestState::new(opts, None::<io::Stdout>)?;
744 fn len_if_padded(t: &TestDescAndFn) -> usize {
745 match t.testfn.padding() {
747 PadOnRight => t.desc.name.as_slice().len(),
750 if let Some(t) = tests.iter().max_by_key(|t| len_if_padded(*t)) {
751 let n = t.desc.name.as_slice();
752 st.max_name_len = n.len();
754 run_tests(opts, tests, |x| callback(&x, &mut st))?;
755 return st.write_run_finish();
759 fn should_sort_failures_before_printing_them() {
760 let test_a = TestDesc {
761 name: StaticTestName("a"),
763 should_panic: ShouldPanic::No,
766 let test_b = TestDesc {
767 name: StaticTestName("b"),
769 should_panic: ShouldPanic::No,
772 let mut st = ConsoleTestState {
774 out: Raw(Vec::new()),
783 metrics: MetricMap::new(),
784 failures: vec![(test_b, Vec::new()), (test_a, Vec::new())],
787 st.write_failures().unwrap();
788 let s = match st.out {
789 Raw(ref m) => String::from_utf8_lossy(&m[..]),
790 Pretty(_) => unreachable!(),
793 let apos = s.find("a").unwrap();
794 let bpos = s.find("b").unwrap();
795 assert!(apos < bpos);
798 fn use_color(opts: &TestOpts) -> bool {
800 AutoColor => !opts.nocapture && stdout_isatty(),
807 fn stdout_isatty() -> bool {
808 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
811 fn stdout_isatty() -> bool {
814 type HANDLE = *mut u8;
815 type LPDWORD = *mut u32;
816 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
818 fn GetStdHandle(which: DWORD) -> HANDLE;
819 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
822 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
824 GetConsoleMode(handle, &mut out) != 0
830 TeFiltered(Vec<TestDesc>),
831 TeWait(TestDesc, NamePadding),
832 TeResult(TestDesc, TestResult, Vec<u8>),
835 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8>);
838 fn run_tests<F>(opts: &TestOpts, tests: Vec<TestDescAndFn>, mut callback: F) -> io::Result<()>
839 where F: FnMut(TestEvent) -> io::Result<()>
841 let mut filtered_tests = filter_tests(opts, tests);
842 if !opts.bench_benchmarks {
843 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
846 let filtered_descs = filtered_tests.iter()
847 .map(|t| t.desc.clone())
850 callback(TeFiltered(filtered_descs))?;
852 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
853 filtered_tests.into_iter().partition(|e| {
855 StaticTestFn(_) | DynTestFn(_) => true,
860 // It's tempting to just spawn all the tests at once, but since we have
861 // many tests that run in other processes we would be making a big mess.
862 let concurrency = get_concurrency();
864 let mut remaining = filtered_tests;
868 let (tx, rx) = channel::<MonitorMsg>();
870 while pending > 0 || !remaining.is_empty() {
871 while pending < concurrency && !remaining.is_empty() {
872 let test = remaining.pop().unwrap();
873 if concurrency == 1 {
874 // We are doing one test at a time so we can print the name
875 // of the test before we run it. Useful for debugging tests
876 // that hang forever.
877 callback(TeWait(test.desc.clone(), test.testfn.padding()))?;
879 run_test(opts, !opts.run_tests, test, tx.clone());
883 let (desc, result, stdout) = rx.recv().unwrap();
884 if concurrency != 1 {
885 callback(TeWait(desc.clone(), PadNone))?;
887 callback(TeResult(desc, result, stdout))?;
891 if opts.bench_benchmarks {
892 // All benchmarks run at the end, in serial.
893 // (this includes metric fns)
894 for b in filtered_benchs_and_metrics {
895 callback(TeWait(b.desc.clone(), b.testfn.padding()))?;
896 run_test(opts, false, b, tx.clone());
897 let (test, result, stdout) = rx.recv().unwrap();
898 callback(TeResult(test, result, stdout))?;
905 fn get_concurrency() -> usize {
906 return match env::var("RUST_TEST_THREADS") {
908 let opt_n: Option<usize> = s.parse().ok();
910 Some(n) if n > 0 => n,
912 panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.",
917 Err(..) => num_cpus(),
922 fn num_cpus() -> usize {
925 wProcessorArchitecture: u16,
928 lpMinimumApplicationAddress: *mut u8,
929 lpMaximumApplicationAddress: *mut u8,
930 dwActiveProcessorMask: *mut u8,
931 dwNumberOfProcessors: u32,
932 dwProcessorType: u32,
933 dwAllocationGranularity: u32,
934 wProcessorLevel: u16,
935 wProcessorRevision: u16,
938 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
941 let mut sysinfo = std::mem::zeroed();
942 GetSystemInfo(&mut sysinfo);
943 sysinfo.dwNumberOfProcessors as usize
947 #[cfg(any(target_os = "linux",
950 target_os = "android",
951 target_os = "solaris",
952 target_os = "emscripten"))]
953 fn num_cpus() -> usize {
954 unsafe { libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as usize }
957 #[cfg(any(target_os = "freebsd",
958 target_os = "dragonfly",
959 target_os = "bitrig",
960 target_os = "netbsd"))]
961 fn num_cpus() -> usize {
964 let mut cpus: libc::c_uint = 0;
965 let mut cpus_size = std::mem::size_of_val(&cpus);
968 cpus = libc::sysconf(libc::_SC_NPROCESSORS_ONLN) as libc::c_uint;
971 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
973 libc::sysctl(mib.as_mut_ptr(),
975 &mut cpus as *mut _ as *mut _,
976 &mut cpus_size as *mut _ as *mut _,
987 #[cfg(target_os = "openbsd")]
988 fn num_cpus() -> usize {
991 let mut cpus: libc::c_uint = 0;
992 let mut cpus_size = std::mem::size_of_val(&cpus);
993 let mut mib = [libc::CTL_HW, libc::HW_NCPU, 0, 0];
996 libc::sysctl(mib.as_mut_ptr(),
998 &mut cpus as *mut _ as *mut _,
999 &mut cpus_size as *mut _ as *mut _,
1010 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1011 let mut filtered = tests;
1013 // Remove tests that don't match the test filter
1014 filtered = match opts.filter {
1016 Some(ref filter) => {
1017 filtered.into_iter()
1018 .filter(|test| test.desc.name.as_slice().contains(&filter[..]))
1023 // Maybe pull out the ignored test and unignore them
1024 filtered = if !opts.run_ignored {
1027 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
1028 if test.desc.ignore {
1029 let TestDescAndFn {desc, testfn} = test;
1030 Some(TestDescAndFn {
1031 desc: TestDesc { ignore: false, ..desc },
1038 filtered.into_iter().filter_map(filter).collect()
1041 // Sort the tests alphabetically
1042 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
1047 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
1048 // convert benchmarks to tests, if we're not benchmarking them
1051 let testfn = match x.testfn {
1052 DynBenchFn(bench) => {
1053 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
1055 StaticBenchFn(benchfn) => {
1056 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
1068 pub fn run_test(opts: &TestOpts,
1070 test: TestDescAndFn,
1071 monitor_ch: Sender<MonitorMsg>) {
1073 let TestDescAndFn {desc, testfn} = test;
1075 if force_ignore || desc.ignore {
1076 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
1080 fn run_test_inner(desc: TestDesc,
1081 monitor_ch: Sender<MonitorMsg>,
1083 testfn: Box<FnBox() + Send>) {
1084 struct Sink(Arc<Mutex<Vec<u8>>>);
1085 impl Write for Sink {
1086 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
1087 Write::write(&mut *self.0.lock().unwrap(), data)
1089 fn flush(&mut self) -> io::Result<()> {
1094 thread::spawn(move || {
1095 let data = Arc::new(Mutex::new(Vec::new()));
1096 let data2 = data.clone();
1097 let cfg = thread::Builder::new().name(match desc.name {
1098 DynTestName(ref name) => name.clone(),
1099 StaticTestName(name) => name.to_owned(),
1102 let result_guard = cfg.spawn(move || {
1104 io::set_print(box Sink(data2.clone()));
1105 io::set_panic(box Sink(data2));
1110 let test_result = calc_result(&desc, result_guard.join());
1111 let stdout = data.lock().unwrap().to_vec();
1112 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1117 DynBenchFn(bencher) => {
1118 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1119 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1122 StaticBenchFn(benchfn) => {
1123 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1124 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1128 let mut mm = MetricMap::new();
1129 f.call_box((&mut mm,));
1130 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1133 StaticMetricFn(f) => {
1134 let mut mm = MetricMap::new();
1136 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1139 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1140 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, Box::new(f)),
1144 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any + Send>>) -> TestResult {
1145 match (&desc.should_panic, task_result) {
1146 (&ShouldPanic::No, Ok(())) |
1147 (&ShouldPanic::Yes, Err(_)) => TrOk,
1148 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1149 if err.downcast_ref::<String>()
1151 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1152 .map(|e| e.contains(msg))
1153 .unwrap_or(false) => TrOk,
1159 pub fn new() -> MetricMap {
1160 MetricMap(BTreeMap::new())
1163 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1164 /// must be non-negative. The `noise` indicates the uncertainty of the
1165 /// metric, which doubles as the "noise range" of acceptable
1166 /// pairwise-regressions on this named value, when comparing from one
1167 /// metric to the next using `compare_to_old`.
1169 /// If `noise` is positive, then it means this metric is of a value
1170 /// you want to see grow smaller, so a change larger than `noise` in the
1171 /// positive direction represents a regression.
1173 /// If `noise` is negative, then it means this metric is of a value
1174 /// you want to see grow larger, so a change larger than `noise` in the
1175 /// negative direction represents a regression.
1176 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1181 let MetricMap(ref mut map) = *self;
1182 map.insert(name.to_owned(), m);
1185 pub fn fmt_metrics(&self) -> String {
1186 let MetricMap(ref mm) = *self;
1187 let v: Vec<String> = mm.iter()
1188 .map(|(k, v)| format!("{}: {} (+/- {})", *k, v.value, v.noise))
1197 /// A function that is opaque to the optimizer, to allow benchmarks to
1198 /// pretend to use outputs to assist in avoiding dead-code
1201 /// This function is a no-op, and does not even read from `dummy`.
1202 #[cfg(not(any(all(target_os = "nacl", target_arch = "le32"),
1203 target_arch = "asmjs")))]
1204 pub fn black_box<T>(dummy: T) -> T {
1205 // we need to "use" the argument in some way LLVM can't
1207 unsafe { asm!("" : : "r"(&dummy)) }
1210 #[cfg(any(all(target_os = "nacl", target_arch = "le32"),
1211 target_arch = "asmjs"))]
1213 pub fn black_box<T>(dummy: T) -> T {
1219 /// Callback for benchmark functions to run in their body.
1220 pub fn iter<T, F>(&mut self, mut inner: F)
1221 where F: FnMut() -> T
1223 let start = Instant::now();
1224 let k = self.iterations;
1228 self.dur = start.elapsed();
1231 pub fn ns_elapsed(&mut self) -> u64 {
1232 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1235 pub fn ns_per_iter(&mut self) -> u64 {
1236 if self.iterations == 0 {
1239 self.ns_elapsed() / cmp::max(self.iterations, 1)
1243 pub fn bench_n<F>(&mut self, n: u64, f: F)
1244 where F: FnOnce(&mut Bencher)
1246 self.iterations = n;
1250 // This is a more statistics-driven benchmark algorithm
1251 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary
1252 where F: FnMut(&mut Bencher)
1254 // Initial bench run to get ballpark figure.
1256 self.bench_n(n, |x| f(x));
1258 // Try to estimate iter count for 1ms falling back to 1m
1259 // iterations if first run took < 1ns.
1260 if self.ns_per_iter() == 0 {
1263 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1265 // if the first run took more than 1ms we don't want to just
1266 // be left doing 0 iterations on every loop. The unfortunate
1267 // side effect of not being able to do as many runs is
1268 // automatically handled by the statistical analysis below
1269 // (i.e. larger error bars).
1274 let mut total_run = Duration::new(0, 0);
1275 let samples: &mut [f64] = &mut [0.0_f64; 50];
1277 let loop_start = Instant::now();
1279 for p in &mut *samples {
1280 self.bench_n(n, |x| f(x));
1281 *p = self.ns_per_iter() as f64;
1284 stats::winsorize(samples, 5.0);
1285 let summ = stats::Summary::new(samples);
1287 for p in &mut *samples {
1288 self.bench_n(5 * n, |x| f(x));
1289 *p = self.ns_per_iter() as f64;
1292 stats::winsorize(samples, 5.0);
1293 let summ5 = stats::Summary::new(samples);
1294 let loop_run = loop_start.elapsed();
1296 // If we've run for 100ms and seem to have converged to a
1298 if loop_run > Duration::from_millis(100) && summ.median_abs_dev_pct < 1.0 &&
1299 summ.median - summ5.median < summ5.median_abs_dev {
1303 total_run = total_run + loop_run;
1304 // Longest we ever run for is 3s.
1305 if total_run > Duration::from_secs(3) {
1309 // If we overflow here just return the results so far. We check a
1310 // multiplier of 10 because we're about to multiply by 2 and the
1311 // next iteration of the loop will also multiply by 5 (to calculate
1312 // the summ5 result)
1313 n = match n.checked_mul(10) {
1315 None => return summ5,
1323 use std::time::Duration;
1324 use super::{Bencher, BenchSamples};
1326 pub fn benchmark<F>(f: F) -> BenchSamples
1327 where F: FnMut(&mut Bencher)
1329 let mut bs = Bencher {
1331 dur: Duration::new(0, 0),
1335 let ns_iter_summ = bs.auto_bench(f);
1337 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1338 let mb_s = bs.bytes * 1000 / ns_iter;
1341 ns_iter_summ: ns_iter_summ,
1342 mb_s: mb_s as usize,
1346 pub fn run_once<F>(f: F)
1347 where F: FnOnce(&mut Bencher)
1349 let mut bs = Bencher {
1351 dur: Duration::new(0, 0),
1360 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts, TestDesc, TestDescAndFn,
1361 TestOpts, run_test, MetricMap, StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1362 use std::sync::mpsc::channel;
1365 pub fn do_not_run_ignored_tests() {
1369 let desc = TestDescAndFn {
1371 name: StaticTestName("whatever"),
1373 should_panic: ShouldPanic::No,
1375 testfn: DynTestFn(Box::new(move || f())),
1377 let (tx, rx) = channel();
1378 run_test(&TestOpts::new(), false, desc, tx);
1379 let (_, res, _) = rx.recv().unwrap();
1380 assert!(res != TrOk);
1384 pub fn ignored_tests_result_in_ignored() {
1386 let desc = TestDescAndFn {
1388 name: StaticTestName("whatever"),
1390 should_panic: ShouldPanic::No,
1392 testfn: DynTestFn(Box::new(move || f())),
1394 let (tx, rx) = channel();
1395 run_test(&TestOpts::new(), false, desc, tx);
1396 let (_, res, _) = rx.recv().unwrap();
1397 assert!(res == TrIgnored);
1401 fn test_should_panic() {
1405 let desc = TestDescAndFn {
1407 name: StaticTestName("whatever"),
1409 should_panic: ShouldPanic::Yes,
1411 testfn: DynTestFn(Box::new(move || f())),
1413 let (tx, rx) = channel();
1414 run_test(&TestOpts::new(), false, desc, tx);
1415 let (_, res, _) = rx.recv().unwrap();
1416 assert!(res == TrOk);
1420 fn test_should_panic_good_message() {
1422 panic!("an error message");
1424 let desc = TestDescAndFn {
1426 name: StaticTestName("whatever"),
1428 should_panic: ShouldPanic::YesWithMessage("error message"),
1430 testfn: DynTestFn(Box::new(move || f())),
1432 let (tx, rx) = channel();
1433 run_test(&TestOpts::new(), false, desc, tx);
1434 let (_, res, _) = rx.recv().unwrap();
1435 assert!(res == TrOk);
1439 fn test_should_panic_bad_message() {
1441 panic!("an error message");
1443 let desc = TestDescAndFn {
1445 name: StaticTestName("whatever"),
1447 should_panic: ShouldPanic::YesWithMessage("foobar"),
1449 testfn: DynTestFn(Box::new(move || f())),
1451 let (tx, rx) = channel();
1452 run_test(&TestOpts::new(), false, desc, tx);
1453 let (_, res, _) = rx.recv().unwrap();
1454 assert!(res == TrFailed);
1458 fn test_should_panic_but_succeeds() {
1460 let desc = TestDescAndFn {
1462 name: StaticTestName("whatever"),
1464 should_panic: ShouldPanic::Yes,
1466 testfn: DynTestFn(Box::new(move || f())),
1468 let (tx, rx) = channel();
1469 run_test(&TestOpts::new(), false, desc, tx);
1470 let (_, res, _) = rx.recv().unwrap();
1471 assert!(res == TrFailed);
1475 fn parse_ignored_flag() {
1476 let args = vec!["progname".to_string(), "filter".to_string(), "--ignored".to_string()];
1477 let opts = match parse_opts(&args) {
1479 _ => panic!("Malformed arg in parse_ignored_flag"),
1481 assert!((opts.run_ignored));
1485 pub fn filter_for_ignored_option() {
1486 // When we run ignored tests the test filter should filter out all the
1487 // unignored tests and flip the ignore flag on the rest to false
1489 let mut opts = TestOpts::new();
1490 opts.run_tests = true;
1491 opts.run_ignored = true;
1493 let tests = vec![TestDescAndFn {
1495 name: StaticTestName("1"),
1497 should_panic: ShouldPanic::No,
1499 testfn: DynTestFn(Box::new(move || {})),
1503 name: StaticTestName("2"),
1505 should_panic: ShouldPanic::No,
1507 testfn: DynTestFn(Box::new(move || {})),
1509 let filtered = filter_tests(&opts, tests);
1511 assert_eq!(filtered.len(), 1);
1512 assert_eq!(filtered[0].desc.name.to_string(), "1");
1513 assert!(!filtered[0].desc.ignore);
1517 pub fn sort_tests() {
1518 let mut opts = TestOpts::new();
1519 opts.run_tests = true;
1521 let names = vec!["sha1::test".to_string(),
1522 "isize::test_to_str".to_string(),
1523 "isize::test_pow".to_string(),
1524 "test::do_not_run_ignored_tests".to_string(),
1525 "test::ignored_tests_result_in_ignored".to_string(),
1526 "test::first_free_arg_should_be_a_filter".to_string(),
1527 "test::parse_ignored_flag".to_string(),
1528 "test::filter_for_ignored_option".to_string(),
1529 "test::sort_tests".to_string()];
1532 let mut tests = Vec::new();
1533 for name in &names {
1534 let test = TestDescAndFn {
1536 name: DynTestName((*name).clone()),
1538 should_panic: ShouldPanic::No,
1540 testfn: DynTestFn(Box::new(testfn)),
1546 let filtered = filter_tests(&opts, tests);
1548 let expected = vec!["isize::test_pow".to_string(),
1549 "isize::test_to_str".to_string(),
1550 "sha1::test".to_string(),
1551 "test::do_not_run_ignored_tests".to_string(),
1552 "test::filter_for_ignored_option".to_string(),
1553 "test::first_free_arg_should_be_a_filter".to_string(),
1554 "test::ignored_tests_result_in_ignored".to_string(),
1555 "test::parse_ignored_flag".to_string(),
1556 "test::sort_tests".to_string()];
1558 for (a, b) in expected.iter().zip(filtered) {
1559 assert!(*a == b.desc.name.to_string());
1564 pub fn test_metricmap_compare() {
1565 let mut m1 = MetricMap::new();
1566 let mut m2 = MetricMap::new();
1567 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1568 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1570 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1571 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1573 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1574 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1576 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1577 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1579 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1580 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1582 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1583 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);