1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test", issue = "27812")]
30 #![cfg_attr(stage0, staged_api)]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
35 html_root_url = "https://doc.rust-lang.org/nightly/",
36 test(attr(deny(warnings))))]
39 #![feature(box_syntax)]
42 #![feature(rustc_private)]
43 #![feature(set_stdio)]
44 #![feature(staged_api)]
48 extern crate serialize;
49 extern crate serialize as rustc_serialize;
53 pub use self::TestFn::*;
54 pub use self::ColorConfig::*;
55 pub use self::TestResult::*;
56 pub use self::TestName::*;
57 use self::TestEvent::*;
58 use self::NamePadding::*;
59 use self::OutputLocation::*;
62 use serialize::Encodable;
63 use std::boxed::FnBox;
68 use std::collections::BTreeMap;
72 use std::io::prelude::*;
74 use std::iter::repeat;
75 use std::path::PathBuf;
76 use std::sync::mpsc::{channel, Sender};
77 use std::sync::{Arc, Mutex};
79 use std::time::{Instant, Duration};
81 // to be used by rustc to compile tests in libtest
83 pub use {Bencher, TestName, TestResult, TestDesc,
84 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
86 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
87 run_test, test_main, test_main_static, filter_tests,
88 parse_opts, StaticBenchFn, ShouldPanic};
93 // The name of a test. By convention this follows the rules for rust
94 // paths; i.e. it should be a series of identifiers separated by double
95 // colons. This way if some test runner wants to arrange the tests
96 // hierarchically it may.
98 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
100 StaticTestName(&'static str),
104 fn as_slice(&self) -> &str {
106 StaticTestName(s) => s,
107 DynTestName(ref s) => s
111 impl fmt::Display for TestName {
112 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
113 fmt::Display::fmt(self.as_slice(), f)
117 #[derive(Clone, Copy)]
124 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
125 let mut name = String::from(self.name.as_slice());
126 let fill = column_count.saturating_sub(name.len());
127 let pad = repeat(" ").take(fill).collect::<String>();
138 /// Represents a benchmark function.
139 pub trait TDynBenchFn: Send {
140 fn run(&self, harness: &mut Bencher);
143 // A function that runs a test. If the function returns successfully,
144 // the test succeeds; if the function panics then the test fails. We
145 // may need to come up with a more clever definition of test in order
146 // to support isolation of tests into threads.
149 StaticBenchFn(fn(&mut Bencher)),
150 StaticMetricFn(fn(&mut MetricMap)),
151 DynTestFn(Box<FnBox() + Send>),
152 DynMetricFn(Box<FnBox(&mut MetricMap)+Send>),
153 DynBenchFn(Box<TDynBenchFn+'static>)
157 fn padding(&self) -> NamePadding {
159 StaticTestFn(..) => PadNone,
160 StaticBenchFn(..) => PadOnRight,
161 StaticMetricFn(..) => PadOnRight,
162 DynTestFn(..) => PadNone,
163 DynMetricFn(..) => PadOnRight,
164 DynBenchFn(..) => PadOnRight,
169 impl fmt::Debug for TestFn {
170 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
171 f.write_str(match *self {
172 StaticTestFn(..) => "StaticTestFn(..)",
173 StaticBenchFn(..) => "StaticBenchFn(..)",
174 StaticMetricFn(..) => "StaticMetricFn(..)",
175 DynTestFn(..) => "DynTestFn(..)",
176 DynMetricFn(..) => "DynMetricFn(..)",
177 DynBenchFn(..) => "DynBenchFn(..)"
182 /// Manager of the benchmarking runs.
184 /// This is fed into functions marked with `#[bench]` to allow for
185 /// set-up & tear-down before running a piece of code repeatedly via a
187 #[derive(Copy, Clone)]
194 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
195 pub enum ShouldPanic {
198 YesWithMessage(&'static str)
201 // The definition of a single test. A test runner will run a list of
203 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
204 pub struct TestDesc {
207 pub should_panic: ShouldPanic,
210 unsafe impl Send for TestDesc {}
213 pub struct TestDescAndFn {
218 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
225 pub fn new(value: f64, noise: f64) -> Metric {
226 Metric {value: value, noise: noise}
231 pub struct MetricMap(BTreeMap<String,Metric>);
233 impl Clone for MetricMap {
234 fn clone(&self) -> MetricMap {
235 let MetricMap(ref map) = *self;
236 MetricMap(map.clone())
240 // The default console test runner. It accepts the command line
241 // arguments and a vector of test_descs.
242 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
244 match parse_opts(args) {
246 Some(Err(msg)) => panic!("{:?}", msg),
249 match run_tests_console(&opts, tests) {
251 Ok(false) => std::process::exit(101),
252 Err(e) => panic!("io error when running tests: {:?}", e),
256 // A variant optimized for invocation with a static test vector.
257 // This will panic (intentionally) when fed any dynamic tests, because
258 // it is copying the static values out into a dynamic vector and cannot
259 // copy dynamic values. It is doing this because from this point on
260 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
261 // semantics into parallel test runners, which in turn requires a Vec<>
262 // rather than a &[].
263 pub fn test_main_static(tests: &[TestDescAndFn]) {
264 let args = env::args().collect::<Vec<_>>();
265 let owned_tests = tests.iter().map(|t| {
267 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
268 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
269 _ => panic!("non-static tests passed to test::test_main_static")
272 test_main(&args, owned_tests)
275 #[derive(Copy, Clone)]
276 pub enum ColorConfig {
282 pub struct TestOpts {
283 pub filter: Option<String>,
284 pub run_ignored: bool,
286 pub bench_benchmarks: bool,
287 pub logfile: Option<PathBuf>,
289 pub color: ColorConfig,
294 fn new() -> TestOpts {
299 bench_benchmarks: false,
307 /// Result of parsing the options.
308 pub type OptRes = Result<TestOpts, String>;
310 fn optgroups() -> Vec<getopts::OptGroup> {
311 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
312 getopts::optflag("", "test", "Run tests and not benchmarks"),
313 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
314 getopts::optflag("h", "help", "Display this message (longer with --help)"),
315 getopts::optopt("", "logfile", "Write logs to the specified file instead \
317 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
318 task, allow printing directly"),
319 getopts::optopt("", "color", "Configure coloring of output:
320 auto = colorize if stdout is a tty and tests are run on serially (default);
321 always = always colorize output;
322 never = never colorize output;", "auto|always|never"))
325 fn usage(binary: &str) {
326 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
329 The FILTER regex is tested against the name of all tests to run, and
330 only those tests that match are run.
332 By default, all tests are run in parallel. This can be altered with the
333 RUST_TEST_THREADS environment variable when running tests (set it to 1).
335 All tests have their standard output and standard error captured by default.
336 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
337 environment variable. Logging is not captured by default.
341 #[test] - Indicates a function is a test to be run. This function
343 #[bench] - Indicates a function is a benchmark to be run. This
344 function takes one argument (test::Bencher).
345 #[should_panic] - This function (also labeled with #[test]) will only pass if
346 the code causes a panic (an assertion failure or panic!)
347 A message may be provided, which the failure string must
348 contain: #[should_panic(expected = "foo")].
349 #[ignore] - When applied to a function which is already attributed as a
350 test, then the test runner will ignore these tests during
351 normal test runs. Running with --ignored will run these
353 usage = getopts::usage(&message, &optgroups()));
356 // Parses command line arguments into test options
357 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
358 let args_ = &args[1..];
360 match getopts::getopts(args_, &optgroups()) {
362 Err(f) => return Some(Err(f.to_string()))
365 if matches.opt_present("h") { usage(&args[0]); return None; }
367 let filter = if !matches.free.is_empty() {
368 Some(matches.free[0].clone())
373 let run_ignored = matches.opt_present("ignored");
375 let logfile = matches.opt_str("logfile");
376 let logfile = logfile.map(|s| PathBuf::from(&s));
378 let bench_benchmarks = matches.opt_present("bench");
379 let run_tests = ! bench_benchmarks ||
380 matches.opt_present("test");
382 let mut nocapture = matches.opt_present("nocapture");
384 nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
387 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
388 Some("auto") | None => AutoColor,
389 Some("always") => AlwaysColor,
390 Some("never") => NeverColor,
392 Some(v) => return Some(Err(format!("argument for --color must be \
393 auto, always, or never (was {})",
397 let test_opts = TestOpts {
399 run_ignored: run_ignored,
400 run_tests: run_tests,
401 bench_benchmarks: bench_benchmarks,
403 nocapture: nocapture,
410 #[derive(Clone, PartialEq)]
411 pub struct BenchSamples {
412 ns_iter_summ: stats::Summary,
416 #[derive(Clone, PartialEq)]
417 pub enum TestResult {
421 TrMetrics(MetricMap),
422 TrBench(BenchSamples),
425 unsafe impl Send for TestResult {}
427 enum OutputLocation<T> {
428 Pretty(Box<term::StdoutTerminal>),
432 struct ConsoleTestState<T> {
433 log_out: Option<File>,
434 out: OutputLocation<T>,
442 failures: Vec<(TestDesc, Vec<u8> )> ,
443 max_name_len: usize, // number of columns to fill when aligning names
446 impl<T: Write> ConsoleTestState<T> {
447 pub fn new(opts: &TestOpts,
448 _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
449 let log_out = match opts.logfile {
450 Some(ref path) => Some(try!(File::create(path))),
453 let out = match term::stdout() {
454 None => Raw(io::stdout()),
458 Ok(ConsoleTestState {
461 use_color: use_color(opts),
467 metrics: MetricMap::new(),
468 failures: Vec::new(),
473 pub fn write_ok(&mut self) -> io::Result<()> {
474 self.write_pretty("ok", term::color::GREEN)
477 pub fn write_failed(&mut self) -> io::Result<()> {
478 self.write_pretty("FAILED", term::color::RED)
481 pub fn write_ignored(&mut self) -> io::Result<()> {
482 self.write_pretty("ignored", term::color::YELLOW)
485 pub fn write_metric(&mut self) -> io::Result<()> {
486 self.write_pretty("metric", term::color::CYAN)
489 pub fn write_bench(&mut self) -> io::Result<()> {
490 self.write_pretty("bench", term::color::CYAN)
493 pub fn write_pretty(&mut self,
495 color: term::color::Color) -> io::Result<()> {
497 Pretty(ref mut term) => {
499 try!(term.fg(color));
501 try!(term.write_all(word.as_bytes()));
507 Raw(ref mut stdout) => {
508 try!(stdout.write_all(word.as_bytes()));
514 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
516 Pretty(ref mut term) => {
517 try!(term.write_all(s.as_bytes()));
520 Raw(ref mut stdout) => {
521 try!(stdout.write_all(s.as_bytes()));
527 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
529 let noun = if len != 1 { "tests" } else { "test" };
530 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
533 pub fn write_test_start(&mut self, test: &TestDesc,
534 align: NamePadding) -> io::Result<()> {
535 let name = test.padded_name(self.max_name_len, align);
536 self.write_plain(&format!("test {} ... ", name))
539 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
541 TrOk => self.write_ok(),
542 TrFailed => self.write_failed(),
543 TrIgnored => self.write_ignored(),
544 TrMetrics(ref mm) => {
545 try!(self.write_metric());
546 self.write_plain(&format!(": {}", mm.fmt_metrics()))
549 try!(self.write_bench());
551 try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
556 self.write_plain("\n")
559 pub fn write_log(&mut self, test: &TestDesc,
560 result: &TestResult) -> io::Result<()> {
564 let s = format!("{} {}\n", match *result {
565 TrOk => "ok".to_owned(),
566 TrFailed => "failed".to_owned(),
567 TrIgnored => "ignored".to_owned(),
568 TrMetrics(ref mm) => mm.fmt_metrics(),
569 TrBench(ref bs) => fmt_bench_samples(bs)
571 o.write_all(s.as_bytes())
576 pub fn write_failures(&mut self) -> io::Result<()> {
577 try!(self.write_plain("\nfailures:\n"));
578 let mut failures = Vec::new();
579 let mut fail_out = String::new();
580 for &(ref f, ref stdout) in &self.failures {
581 failures.push(f.name.to_string());
582 if !stdout.is_empty() {
583 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
584 let output = String::from_utf8_lossy(stdout);
585 fail_out.push_str(&output);
586 fail_out.push_str("\n");
589 if !fail_out.is_empty() {
590 try!(self.write_plain("\n"));
591 try!(self.write_plain(&fail_out));
594 try!(self.write_plain("\nfailures:\n"));
596 for name in &failures {
597 try!(self.write_plain(&format!(" {}\n", name)));
602 pub fn write_run_finish(&mut self) -> io::Result<bool> {
603 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
605 let success = self.failed == 0;
607 try!(self.write_failures());
610 try!(self.write_plain("\ntest result: "));
612 // There's no parallelism at this point so it's safe to use color
613 try!(self.write_ok());
615 try!(self.write_failed());
617 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
618 self.passed, self.failed, self.ignored, self.measured);
619 try!(self.write_plain(&s));
624 // Format a number with thousands separators
625 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
627 let mut output = String::new();
628 let mut trailing = false;
629 for &pow in &[9, 6, 3, 0] {
630 let base = 10_usize.pow(pow);
631 if pow == 0 || trailing || n / base != 0 {
633 output.write_fmt(format_args!("{}", n / base)).unwrap();
635 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
648 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
650 let mut output = String::new();
652 let median = bs.ns_iter_summ.median as usize;
653 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
655 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
656 fmt_thousands_sep(median, ','),
657 fmt_thousands_sep(deviation, ','))).unwrap();
659 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
664 // A simple console test runner
665 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Result<bool> {
667 fn callback<T: Write>(event: &TestEvent,
668 st: &mut ConsoleTestState<T>) -> io::Result<()> {
669 match (*event).clone() {
670 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
671 TeWait(ref test, padding) => st.write_test_start(test, padding),
672 TeResult(test, result, stdout) => {
673 try!(st.write_log(&test, &result));
674 try!(st.write_result(&result));
676 TrOk => st.passed += 1,
677 TrIgnored => st.ignored += 1,
679 let tname = test.name;
680 let MetricMap(mm) = mm;
683 .insert_metric(&format!("{}.{}",
692 st.metrics.insert_metric(test.name.as_slice(),
693 bs.ns_iter_summ.median,
694 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
699 st.failures.push((test, stdout));
707 let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
708 fn len_if_padded(t: &TestDescAndFn) -> usize {
709 match t.testfn.padding() {
711 PadOnRight => t.desc.name.as_slice().len(),
714 match tests.iter().max_by_key(|t|len_if_padded(*t)) {
716 let n = t.desc.name.as_slice();
717 st.max_name_len = n.len();
721 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
722 return st.write_run_finish();
726 fn should_sort_failures_before_printing_them() {
727 let test_a = TestDesc {
728 name: StaticTestName("a"),
730 should_panic: ShouldPanic::No
733 let test_b = TestDesc {
734 name: StaticTestName("b"),
736 should_panic: ShouldPanic::No
739 let mut st = ConsoleTestState {
741 out: Raw(Vec::new()),
749 metrics: MetricMap::new(),
750 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
753 st.write_failures().unwrap();
754 let s = match st.out {
755 Raw(ref m) => String::from_utf8_lossy(&m[..]),
756 Pretty(_) => unreachable!()
759 let apos = s.find("a").unwrap();
760 let bpos = s.find("b").unwrap();
761 assert!(apos < bpos);
764 fn use_color(opts: &TestOpts) -> bool {
766 AutoColor => !opts.nocapture && stdout_isatty(),
773 fn stdout_isatty() -> bool {
774 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
777 fn stdout_isatty() -> bool {
780 type HANDLE = *mut u8;
781 type LPDWORD = *mut u32;
782 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
784 fn GetStdHandle(which: DWORD) -> HANDLE;
785 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
788 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
790 GetConsoleMode(handle, &mut out) != 0
796 TeFiltered(Vec<TestDesc> ),
797 TeWait(TestDesc, NamePadding),
798 TeResult(TestDesc, TestResult, Vec<u8> ),
801 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
804 fn run_tests<F>(opts: &TestOpts,
805 tests: Vec<TestDescAndFn> ,
806 mut callback: F) -> io::Result<()> where
807 F: FnMut(TestEvent) -> io::Result<()>,
809 let mut filtered_tests = filter_tests(opts, tests);
810 if !opts.bench_benchmarks {
811 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
814 let filtered_descs = filtered_tests.iter()
815 .map(|t| t.desc.clone())
818 try!(callback(TeFiltered(filtered_descs)));
820 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
821 filtered_tests.into_iter().partition(|e| {
823 StaticTestFn(_) | DynTestFn(_) => true,
828 // It's tempting to just spawn all the tests at once, but since we have
829 // many tests that run in other processes we would be making a big mess.
830 let concurrency = get_concurrency();
832 let mut remaining = filtered_tests;
836 let (tx, rx) = channel::<MonitorMsg>();
838 while pending > 0 || !remaining.is_empty() {
839 while pending < concurrency && !remaining.is_empty() {
840 let test = remaining.pop().unwrap();
841 if concurrency == 1 {
842 // We are doing one test at a time so we can print the name
843 // of the test before we run it. Useful for debugging tests
844 // that hang forever.
845 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
847 run_test(opts, !opts.run_tests, test, tx.clone());
851 let (desc, result, stdout) = rx.recv().unwrap();
852 if concurrency != 1 {
853 try!(callback(TeWait(desc.clone(), PadNone)));
855 try!(callback(TeResult(desc, result, stdout)));
859 if opts.bench_benchmarks {
860 // All benchmarks run at the end, in serial.
861 // (this includes metric fns)
862 for b in filtered_benchs_and_metrics {
863 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
864 run_test(opts, false, b, tx.clone());
865 let (test, result, stdout) = rx.recv().unwrap();
866 try!(callback(TeResult(test, result, stdout)));
873 fn get_concurrency() -> usize {
874 return match env::var("RUST_TEST_THREADS") {
876 let opt_n: Option<usize> = s.parse().ok();
878 Some(n) if n > 0 => n,
879 _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
882 Err(..) => num_cpus(),
887 fn num_cpus() -> usize {
890 wProcessorArchitecture: u16,
893 lpMinimumApplicationAddress: *mut u8,
894 lpMaximumApplicationAddress: *mut u8,
895 dwActiveProcessorMask: *mut u8,
896 dwNumberOfProcessors: u32,
897 dwProcessorType: u32,
898 dwAllocationGranularity: u32,
899 wProcessorLevel: u16,
900 wProcessorRevision: u16,
903 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
906 let mut sysinfo = std::mem::zeroed();
907 GetSystemInfo(&mut sysinfo);
908 sysinfo.dwNumberOfProcessors as usize
913 fn num_cpus() -> usize {
914 extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
915 unsafe { rust_get_num_cpus() as usize }
919 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
920 let mut filtered = tests;
922 // Remove tests that don't match the test filter
923 filtered = match opts.filter {
925 Some(ref filter) => {
926 filtered.into_iter().filter(|test| {
927 test.desc.name.as_slice().contains(&filter[..])
932 // Maybe pull out the ignored test and unignore them
933 filtered = if !opts.run_ignored {
936 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
937 if test.desc.ignore {
938 let TestDescAndFn {desc, testfn} = test;
940 desc: TestDesc {ignore: false, ..desc},
947 filtered.into_iter().filter_map(filter).collect()
950 // Sort the tests alphabetically
951 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
956 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
957 // convert benchmarks to tests, if we're not benchmarking them
958 tests.into_iter().map(|x| {
959 let testfn = match x.testfn {
960 DynBenchFn(bench) => {
961 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
963 StaticBenchFn(benchfn) => {
964 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
968 TestDescAndFn { desc: x.desc, testfn: testfn }
972 pub fn run_test(opts: &TestOpts,
975 monitor_ch: Sender<MonitorMsg>) {
977 let TestDescAndFn {desc, testfn} = test;
979 if force_ignore || desc.ignore {
980 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
984 fn run_test_inner(desc: TestDesc,
985 monitor_ch: Sender<MonitorMsg>,
987 testfn: Box<FnBox() + Send>) {
988 struct Sink(Arc<Mutex<Vec<u8>>>);
989 impl Write for Sink {
990 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
991 Write::write(&mut *self.0.lock().unwrap(), data)
993 fn flush(&mut self) -> io::Result<()> { Ok(()) }
996 thread::spawn(move || {
997 let data = Arc::new(Mutex::new(Vec::new()));
998 let data2 = data.clone();
999 let cfg = thread::Builder::new().name(match desc.name {
1000 DynTestName(ref name) => name.clone(),
1001 StaticTestName(name) => name.to_owned(),
1004 let result_guard = cfg.spawn(move || {
1006 io::set_print(box Sink(data2.clone()));
1007 io::set_panic(box Sink(data2));
1011 let test_result = calc_result(&desc, result_guard.join());
1012 let stdout = data.lock().unwrap().to_vec();
1013 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1018 DynBenchFn(bencher) => {
1019 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1020 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1023 StaticBenchFn(benchfn) => {
1024 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1025 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1029 let mut mm = MetricMap::new();
1030 f.call_box((&mut mm,));
1031 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1034 StaticMetricFn(f) => {
1035 let mut mm = MetricMap::new();
1037 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1040 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1041 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1046 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1047 match (&desc.should_panic, task_result) {
1048 (&ShouldPanic::No, Ok(())) |
1049 (&ShouldPanic::Yes, Err(_)) => TrOk,
1050 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1051 if err.downcast_ref::<String>()
1053 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1054 .map(|e| e.contains(msg))
1055 .unwrap_or(false) => TrOk,
1062 pub fn new() -> MetricMap {
1063 MetricMap(BTreeMap::new())
1066 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1067 /// must be non-negative. The `noise` indicates the uncertainty of the
1068 /// metric, which doubles as the "noise range" of acceptable
1069 /// pairwise-regressions on this named value, when comparing from one
1070 /// metric to the next using `compare_to_old`.
1072 /// If `noise` is positive, then it means this metric is of a value
1073 /// you want to see grow smaller, so a change larger than `noise` in the
1074 /// positive direction represents a regression.
1076 /// If `noise` is negative, then it means this metric is of a value
1077 /// you want to see grow larger, so a change larger than `noise` in the
1078 /// negative direction represents a regression.
1079 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1084 let MetricMap(ref mut map) = *self;
1085 map.insert(name.to_owned(), m);
1088 pub fn fmt_metrics(&self) -> String {
1089 let MetricMap(ref mm) = *self;
1090 let v : Vec<String> = mm.iter()
1091 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1101 /// A function that is opaque to the optimizer, to allow benchmarks to
1102 /// pretend to use outputs to assist in avoiding dead-code
1105 /// This function is a no-op, and does not even read from `dummy`.
1106 #[cfg(not(all(target_os = "nacl", target_arch = "le32")))]
1107 pub fn black_box<T>(dummy: T) -> T {
1108 // we need to "use" the argument in some way LLVM can't
1110 unsafe {asm!("" : : "r"(&dummy))}
1113 #[cfg(all(target_os = "nacl", target_arch = "le32"))]
1115 pub fn black_box<T>(dummy: T) -> T { dummy }
1119 /// Callback for benchmark functions to run in their body.
1120 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1121 let start = Instant::now();
1122 let k = self.iterations;
1126 self.dur = start.elapsed();
1129 pub fn ns_elapsed(&mut self) -> u64 {
1130 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1133 pub fn ns_per_iter(&mut self) -> u64 {
1134 if self.iterations == 0 {
1137 self.ns_elapsed() / cmp::max(self.iterations, 1)
1141 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1142 self.iterations = n;
1146 // This is a more statistics-driven benchmark algorithm
1147 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary where F: FnMut(&mut Bencher) {
1148 // Initial bench run to get ballpark figure.
1150 self.bench_n(n, |x| f(x));
1152 // Try to estimate iter count for 1ms falling back to 1m
1153 // iterations if first run took < 1ns.
1154 if self.ns_per_iter() == 0 {
1157 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1159 // if the first run took more than 1ms we don't want to just
1160 // be left doing 0 iterations on every loop. The unfortunate
1161 // side effect of not being able to do as many runs is
1162 // automatically handled by the statistical analysis below
1163 // (i.e. larger error bars).
1164 if n == 0 { n = 1; }
1166 let mut total_run = Duration::new(0, 0);
1167 let samples : &mut [f64] = &mut [0.0_f64; 50];
1169 let loop_start = Instant::now();
1171 for p in &mut *samples {
1172 self.bench_n(n, |x| f(x));
1173 *p = self.ns_per_iter() as f64;
1176 stats::winsorize(samples, 5.0);
1177 let summ = stats::Summary::new(samples);
1179 for p in &mut *samples {
1180 self.bench_n(5 * n, |x| f(x));
1181 *p = self.ns_per_iter() as f64;
1184 stats::winsorize(samples, 5.0);
1185 let summ5 = stats::Summary::new(samples);
1186 let loop_run = loop_start.elapsed();
1188 // If we've run for 100ms and seem to have converged to a
1190 if loop_run > Duration::from_millis(100) &&
1191 summ.median_abs_dev_pct < 1.0 &&
1192 summ.median - summ5.median < summ5.median_abs_dev {
1196 total_run = total_run + loop_run;
1197 // Longest we ever run for is 3s.
1198 if total_run > Duration::from_secs(3) {
1202 // If we overflow here just return the results so far. We check a
1203 // multiplier of 10 because we're about to multiply by 2 and the
1204 // next iteration of the loop will also multiply by 5 (to calculate
1205 // the summ5 result)
1206 n = match n.checked_mul(10) {
1208 None => return summ5,
1216 use std::time::Duration;
1217 use super::{Bencher, BenchSamples};
1219 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1220 let mut bs = Bencher {
1222 dur: Duration::new(0, 0),
1226 let ns_iter_summ = bs.auto_bench(f);
1228 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1229 let iter_s = 1_000_000_000 / ns_iter;
1230 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1233 ns_iter_summ: ns_iter_summ,
1238 pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
1239 let mut bs = Bencher {
1241 dur: Duration::new(0, 0),
1250 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1251 TestDesc, TestDescAndFn, TestOpts, run_test,
1253 StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1254 use std::sync::mpsc::channel;
1257 pub fn do_not_run_ignored_tests() {
1258 fn f() { panic!(); }
1259 let desc = TestDescAndFn {
1261 name: StaticTestName("whatever"),
1263 should_panic: ShouldPanic::No,
1265 testfn: DynTestFn(Box::new(move|| f())),
1267 let (tx, rx) = channel();
1268 run_test(&TestOpts::new(), false, desc, tx);
1269 let (_, res, _) = rx.recv().unwrap();
1270 assert!(res != TrOk);
1274 pub fn ignored_tests_result_in_ignored() {
1276 let desc = TestDescAndFn {
1278 name: StaticTestName("whatever"),
1280 should_panic: ShouldPanic::No,
1282 testfn: DynTestFn(Box::new(move|| f())),
1284 let (tx, rx) = channel();
1285 run_test(&TestOpts::new(), false, desc, tx);
1286 let (_, res, _) = rx.recv().unwrap();
1287 assert!(res == TrIgnored);
1291 fn test_should_panic() {
1292 fn f() { panic!(); }
1293 let desc = TestDescAndFn {
1295 name: StaticTestName("whatever"),
1297 should_panic: ShouldPanic::Yes,
1299 testfn: DynTestFn(Box::new(move|| f())),
1301 let (tx, rx) = channel();
1302 run_test(&TestOpts::new(), false, desc, tx);
1303 let (_, res, _) = rx.recv().unwrap();
1304 assert!(res == TrOk);
1308 fn test_should_panic_good_message() {
1309 fn f() { panic!("an error message"); }
1310 let desc = TestDescAndFn {
1312 name: StaticTestName("whatever"),
1314 should_panic: ShouldPanic::YesWithMessage("error message"),
1316 testfn: DynTestFn(Box::new(move|| f())),
1318 let (tx, rx) = channel();
1319 run_test(&TestOpts::new(), false, desc, tx);
1320 let (_, res, _) = rx.recv().unwrap();
1321 assert!(res == TrOk);
1325 fn test_should_panic_bad_message() {
1326 fn f() { panic!("an error message"); }
1327 let desc = TestDescAndFn {
1329 name: StaticTestName("whatever"),
1331 should_panic: ShouldPanic::YesWithMessage("foobar"),
1333 testfn: DynTestFn(Box::new(move|| f())),
1335 let (tx, rx) = channel();
1336 run_test(&TestOpts::new(), false, desc, tx);
1337 let (_, res, _) = rx.recv().unwrap();
1338 assert!(res == TrFailed);
1342 fn test_should_panic_but_succeeds() {
1344 let desc = TestDescAndFn {
1346 name: StaticTestName("whatever"),
1348 should_panic: ShouldPanic::Yes,
1350 testfn: DynTestFn(Box::new(move|| f())),
1352 let (tx, rx) = channel();
1353 run_test(&TestOpts::new(), false, desc, tx);
1354 let (_, res, _) = rx.recv().unwrap();
1355 assert!(res == TrFailed);
1359 fn parse_ignored_flag() {
1360 let args = vec!("progname".to_string(),
1361 "filter".to_string(),
1362 "--ignored".to_string());
1363 let opts = match parse_opts(&args) {
1365 _ => panic!("Malformed arg in parse_ignored_flag")
1367 assert!((opts.run_ignored));
1371 pub fn filter_for_ignored_option() {
1372 // When we run ignored tests the test filter should filter out all the
1373 // unignored tests and flip the ignore flag on the rest to false
1375 let mut opts = TestOpts::new();
1376 opts.run_tests = true;
1377 opts.run_ignored = true;
1382 name: StaticTestName("1"),
1384 should_panic: ShouldPanic::No,
1386 testfn: DynTestFn(Box::new(move|| {})),
1390 name: StaticTestName("2"),
1392 should_panic: ShouldPanic::No,
1394 testfn: DynTestFn(Box::new(move|| {})),
1396 let filtered = filter_tests(&opts, tests);
1398 assert_eq!(filtered.len(), 1);
1399 assert_eq!(filtered[0].desc.name.to_string(),
1401 assert!(filtered[0].desc.ignore == false);
1405 pub fn sort_tests() {
1406 let mut opts = TestOpts::new();
1407 opts.run_tests = true;
1410 vec!("sha1::test".to_string(),
1411 "isize::test_to_str".to_string(),
1412 "isize::test_pow".to_string(),
1413 "test::do_not_run_ignored_tests".to_string(),
1414 "test::ignored_tests_result_in_ignored".to_string(),
1415 "test::first_free_arg_should_be_a_filter".to_string(),
1416 "test::parse_ignored_flag".to_string(),
1417 "test::filter_for_ignored_option".to_string(),
1418 "test::sort_tests".to_string());
1422 let mut tests = Vec::new();
1423 for name in &names {
1424 let test = TestDescAndFn {
1426 name: DynTestName((*name).clone()),
1428 should_panic: ShouldPanic::No,
1430 testfn: DynTestFn(Box::new(testfn)),
1436 let filtered = filter_tests(&opts, tests);
1439 vec!("isize::test_pow".to_string(),
1440 "isize::test_to_str".to_string(),
1441 "sha1::test".to_string(),
1442 "test::do_not_run_ignored_tests".to_string(),
1443 "test::filter_for_ignored_option".to_string(),
1444 "test::first_free_arg_should_be_a_filter".to_string(),
1445 "test::ignored_tests_result_in_ignored".to_string(),
1446 "test::parse_ignored_flag".to_string(),
1447 "test::sort_tests".to_string());
1449 for (a, b) in expected.iter().zip(filtered) {
1450 assert!(*a == b.desc.name.to_string());
1455 pub fn test_metricmap_compare() {
1456 let mut m1 = MetricMap::new();
1457 let mut m2 = MetricMap::new();
1458 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1459 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1461 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1462 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1464 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1465 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1467 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1468 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1470 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1471 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1473 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1474 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);