1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test", issue = "27812")]
28 #![crate_type = "rlib"]
29 #![crate_type = "dylib"]
30 #![doc(html_logo_url = "https://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
31 html_favicon_url = "https://doc.rust-lang.org/favicon.ico",
32 html_root_url = "https://doc.rust-lang.org/nightly/",
33 test(attr(deny(warnings))))]
36 #![feature(box_syntax)]
39 #![feature(rustc_private)]
40 #![feature(set_stdio)]
41 #![feature(staged_api)]
45 extern crate serialize;
46 extern crate serialize as rustc_serialize;
50 pub use self::TestFn::*;
51 pub use self::ColorConfig::*;
52 pub use self::TestResult::*;
53 pub use self::TestName::*;
54 use self::TestEvent::*;
55 use self::NamePadding::*;
56 use self::OutputLocation::*;
59 use serialize::Encodable;
60 use std::boxed::FnBox;
65 use std::collections::BTreeMap;
69 use std::io::prelude::*;
71 use std::iter::repeat;
72 use std::path::PathBuf;
73 use std::sync::mpsc::{channel, Sender};
74 use std::sync::{Arc, Mutex};
76 use std::time::{Instant, Duration};
78 // to be used by rustc to compile tests in libtest
80 pub use {Bencher, TestName, TestResult, TestDesc,
81 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
83 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
84 run_test, test_main, test_main_static, filter_tests,
85 parse_opts, StaticBenchFn, ShouldPanic};
90 // The name of a test. By convention this follows the rules for rust
91 // paths; i.e. it should be a series of identifiers separated by double
92 // colons. This way if some test runner wants to arrange the tests
93 // hierarchically it may.
95 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
97 StaticTestName(&'static str),
101 fn as_slice(&self) -> &str {
103 StaticTestName(s) => s,
104 DynTestName(ref s) => s
108 impl fmt::Display for TestName {
109 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
110 fmt::Display::fmt(self.as_slice(), f)
114 #[derive(Clone, Copy)]
121 fn padded_name(&self, column_count: usize, align: NamePadding) -> String {
122 let mut name = String::from(self.name.as_slice());
123 let fill = column_count.saturating_sub(name.len());
124 let pad = repeat(" ").take(fill).collect::<String>();
135 /// Represents a benchmark function.
136 pub trait TDynBenchFn: Send {
137 fn run(&self, harness: &mut Bencher);
140 // A function that runs a test. If the function returns successfully,
141 // the test succeeds; if the function panics then the test fails. We
142 // may need to come up with a more clever definition of test in order
143 // to support isolation of tests into threads.
146 StaticBenchFn(fn(&mut Bencher)),
147 StaticMetricFn(fn(&mut MetricMap)),
148 DynTestFn(Box<FnBox() + Send>),
149 DynMetricFn(Box<FnBox(&mut MetricMap)+Send>),
150 DynBenchFn(Box<TDynBenchFn+'static>)
154 fn padding(&self) -> NamePadding {
156 StaticTestFn(..) => PadNone,
157 StaticBenchFn(..) => PadOnRight,
158 StaticMetricFn(..) => PadOnRight,
159 DynTestFn(..) => PadNone,
160 DynMetricFn(..) => PadOnRight,
161 DynBenchFn(..) => PadOnRight,
166 impl fmt::Debug for TestFn {
167 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
168 f.write_str(match *self {
169 StaticTestFn(..) => "StaticTestFn(..)",
170 StaticBenchFn(..) => "StaticBenchFn(..)",
171 StaticMetricFn(..) => "StaticMetricFn(..)",
172 DynTestFn(..) => "DynTestFn(..)",
173 DynMetricFn(..) => "DynMetricFn(..)",
174 DynBenchFn(..) => "DynBenchFn(..)"
179 /// Manager of the benchmarking runs.
181 /// This is fed into functions marked with `#[bench]` to allow for
182 /// set-up & tear-down before running a piece of code repeatedly via a
184 #[derive(Copy, Clone)]
191 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
192 pub enum ShouldPanic {
195 YesWithMessage(&'static str)
198 // The definition of a single test. A test runner will run a list of
200 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
201 pub struct TestDesc {
204 pub should_panic: ShouldPanic,
207 unsafe impl Send for TestDesc {}
210 pub struct TestDescAndFn {
215 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
222 pub fn new(value: f64, noise: f64) -> Metric {
223 Metric {value: value, noise: noise}
228 pub struct MetricMap(BTreeMap<String,Metric>);
230 impl Clone for MetricMap {
231 fn clone(&self) -> MetricMap {
232 let MetricMap(ref map) = *self;
233 MetricMap(map.clone())
237 // The default console test runner. It accepts the command line
238 // arguments and a vector of test_descs.
239 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
241 match parse_opts(args) {
243 Some(Err(msg)) => panic!("{:?}", msg),
246 match run_tests_console(&opts, tests) {
248 Ok(false) => std::process::exit(101),
249 Err(e) => panic!("io error when running tests: {:?}", e),
253 // A variant optimized for invocation with a static test vector.
254 // This will panic (intentionally) when fed any dynamic tests, because
255 // it is copying the static values out into a dynamic vector and cannot
256 // copy dynamic values. It is doing this because from this point on
257 // a Vec<TestDescAndFn> is used in order to effect ownership-transfer
258 // semantics into parallel test runners, which in turn requires a Vec<>
259 // rather than a &[].
260 pub fn test_main_static(tests: &[TestDescAndFn]) {
261 let args = env::args().collect::<Vec<_>>();
262 let owned_tests = tests.iter().map(|t| {
264 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
265 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
266 _ => panic!("non-static tests passed to test::test_main_static")
269 test_main(&args, owned_tests)
272 #[derive(Copy, Clone)]
273 pub enum ColorConfig {
279 pub struct TestOpts {
280 pub filter: Option<String>,
281 pub run_ignored: bool,
283 pub bench_benchmarks: bool,
284 pub logfile: Option<PathBuf>,
286 pub color: ColorConfig,
291 fn new() -> TestOpts {
296 bench_benchmarks: false,
304 /// Result of parsing the options.
305 pub type OptRes = Result<TestOpts, String>;
307 fn optgroups() -> Vec<getopts::OptGroup> {
308 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
309 getopts::optflag("", "test", "Run tests and not benchmarks"),
310 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
311 getopts::optflag("h", "help", "Display this message (longer with --help)"),
312 getopts::optopt("", "logfile", "Write logs to the specified file instead \
314 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
315 task, allow printing directly"),
316 getopts::optopt("", "color", "Configure coloring of output:
317 auto = colorize if stdout is a tty and tests are run on serially (default);
318 always = always colorize output;
319 never = never colorize output;", "auto|always|never"))
322 fn usage(binary: &str) {
323 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
326 The FILTER string is tested against the name of all tests, and only those
327 tests whose names contain the filter are run.
329 By default, all tests are run in parallel. This can be altered with the
330 RUST_TEST_THREADS environment variable when running tests (set it to 1).
332 All tests have their standard output and standard error captured by default.
333 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
334 environment variable. Logging is not captured by default.
338 #[test] - Indicates a function is a test to be run. This function
340 #[bench] - Indicates a function is a benchmark to be run. This
341 function takes one argument (test::Bencher).
342 #[should_panic] - This function (also labeled with #[test]) will only pass if
343 the code causes a panic (an assertion failure or panic!)
344 A message may be provided, which the failure string must
345 contain: #[should_panic(expected = "foo")].
346 #[ignore] - When applied to a function which is already attributed as a
347 test, then the test runner will ignore these tests during
348 normal test runs. Running with --ignored will run these
350 usage = getopts::usage(&message, &optgroups()));
353 // Parses command line arguments into test options
354 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
355 let args_ = &args[1..];
357 match getopts::getopts(args_, &optgroups()) {
359 Err(f) => return Some(Err(f.to_string()))
362 if matches.opt_present("h") { usage(&args[0]); return None; }
364 let filter = if !matches.free.is_empty() {
365 Some(matches.free[0].clone())
370 let run_ignored = matches.opt_present("ignored");
372 let logfile = matches.opt_str("logfile");
373 let logfile = logfile.map(|s| PathBuf::from(&s));
375 let bench_benchmarks = matches.opt_present("bench");
376 let run_tests = ! bench_benchmarks ||
377 matches.opt_present("test");
379 let mut nocapture = matches.opt_present("nocapture");
381 nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
384 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
385 Some("auto") | None => AutoColor,
386 Some("always") => AlwaysColor,
387 Some("never") => NeverColor,
389 Some(v) => return Some(Err(format!("argument for --color must be \
390 auto, always, or never (was {})",
394 let test_opts = TestOpts {
396 run_ignored: run_ignored,
397 run_tests: run_tests,
398 bench_benchmarks: bench_benchmarks,
400 nocapture: nocapture,
407 #[derive(Clone, PartialEq)]
408 pub struct BenchSamples {
409 ns_iter_summ: stats::Summary,
413 #[derive(Clone, PartialEq)]
414 pub enum TestResult {
418 TrMetrics(MetricMap),
419 TrBench(BenchSamples),
422 unsafe impl Send for TestResult {}
424 enum OutputLocation<T> {
425 Pretty(Box<term::StdoutTerminal>),
429 struct ConsoleTestState<T> {
430 log_out: Option<File>,
431 out: OutputLocation<T>,
439 failures: Vec<(TestDesc, Vec<u8> )> ,
440 max_name_len: usize, // number of columns to fill when aligning names
443 impl<T: Write> ConsoleTestState<T> {
444 pub fn new(opts: &TestOpts,
445 _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
446 let log_out = match opts.logfile {
447 Some(ref path) => Some(try!(File::create(path))),
450 let out = match term::stdout() {
451 None => Raw(io::stdout()),
455 Ok(ConsoleTestState {
458 use_color: use_color(opts),
464 metrics: MetricMap::new(),
465 failures: Vec::new(),
470 pub fn write_ok(&mut self) -> io::Result<()> {
471 self.write_pretty("ok", term::color::GREEN)
474 pub fn write_failed(&mut self) -> io::Result<()> {
475 self.write_pretty("FAILED", term::color::RED)
478 pub fn write_ignored(&mut self) -> io::Result<()> {
479 self.write_pretty("ignored", term::color::YELLOW)
482 pub fn write_metric(&mut self) -> io::Result<()> {
483 self.write_pretty("metric", term::color::CYAN)
486 pub fn write_bench(&mut self) -> io::Result<()> {
487 self.write_pretty("bench", term::color::CYAN)
490 pub fn write_pretty(&mut self,
492 color: term::color::Color) -> io::Result<()> {
494 Pretty(ref mut term) => {
496 try!(term.fg(color));
498 try!(term.write_all(word.as_bytes()));
504 Raw(ref mut stdout) => {
505 try!(stdout.write_all(word.as_bytes()));
511 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
513 Pretty(ref mut term) => {
514 try!(term.write_all(s.as_bytes()));
517 Raw(ref mut stdout) => {
518 try!(stdout.write_all(s.as_bytes()));
524 pub fn write_run_start(&mut self, len: usize) -> io::Result<()> {
526 let noun = if len != 1 { "tests" } else { "test" };
527 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
530 pub fn write_test_start(&mut self, test: &TestDesc,
531 align: NamePadding) -> io::Result<()> {
532 let name = test.padded_name(self.max_name_len, align);
533 self.write_plain(&format!("test {} ... ", name))
536 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
538 TrOk => self.write_ok(),
539 TrFailed => self.write_failed(),
540 TrIgnored => self.write_ignored(),
541 TrMetrics(ref mm) => {
542 try!(self.write_metric());
543 self.write_plain(&format!(": {}", mm.fmt_metrics()))
546 try!(self.write_bench());
548 try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
553 self.write_plain("\n")
556 pub fn write_log(&mut self, test: &TestDesc,
557 result: &TestResult) -> io::Result<()> {
561 let s = format!("{} {}\n", match *result {
562 TrOk => "ok".to_owned(),
563 TrFailed => "failed".to_owned(),
564 TrIgnored => "ignored".to_owned(),
565 TrMetrics(ref mm) => mm.fmt_metrics(),
566 TrBench(ref bs) => fmt_bench_samples(bs)
568 o.write_all(s.as_bytes())
573 pub fn write_failures(&mut self) -> io::Result<()> {
574 try!(self.write_plain("\nfailures:\n"));
575 let mut failures = Vec::new();
576 let mut fail_out = String::new();
577 for &(ref f, ref stdout) in &self.failures {
578 failures.push(f.name.to_string());
579 if !stdout.is_empty() {
580 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
581 let output = String::from_utf8_lossy(stdout);
582 fail_out.push_str(&output);
583 fail_out.push_str("\n");
586 if !fail_out.is_empty() {
587 try!(self.write_plain("\n"));
588 try!(self.write_plain(&fail_out));
591 try!(self.write_plain("\nfailures:\n"));
593 for name in &failures {
594 try!(self.write_plain(&format!(" {}\n", name)));
599 pub fn write_run_finish(&mut self) -> io::Result<bool> {
600 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
602 let success = self.failed == 0;
604 try!(self.write_failures());
607 try!(self.write_plain("\ntest result: "));
609 // There's no parallelism at this point so it's safe to use color
610 try!(self.write_ok());
612 try!(self.write_failed());
614 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
615 self.passed, self.failed, self.ignored, self.measured);
616 try!(self.write_plain(&s));
621 // Format a number with thousands separators
622 fn fmt_thousands_sep(mut n: usize, sep: char) -> String {
624 let mut output = String::new();
625 let mut trailing = false;
626 for &pow in &[9, 6, 3, 0] {
627 let base = 10_usize.pow(pow);
628 if pow == 0 || trailing || n / base != 0 {
630 output.write_fmt(format_args!("{}", n / base)).unwrap();
632 output.write_fmt(format_args!("{:03}", n / base)).unwrap();
645 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
647 let mut output = String::new();
649 let median = bs.ns_iter_summ.median as usize;
650 let deviation = (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as usize;
652 output.write_fmt(format_args!("{:>11} ns/iter (+/- {})",
653 fmt_thousands_sep(median, ','),
654 fmt_thousands_sep(deviation, ','))).unwrap();
656 output.write_fmt(format_args!(" = {} MB/s", bs.mb_s)).unwrap();
661 // A simple console test runner
662 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Result<bool> {
664 fn callback<T: Write>(event: &TestEvent,
665 st: &mut ConsoleTestState<T>) -> io::Result<()> {
666 match (*event).clone() {
667 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
668 TeWait(ref test, padding) => st.write_test_start(test, padding),
669 TeResult(test, result, stdout) => {
670 try!(st.write_log(&test, &result));
671 try!(st.write_result(&result));
673 TrOk => st.passed += 1,
674 TrIgnored => st.ignored += 1,
676 let tname = test.name;
677 let MetricMap(mm) = mm;
680 .insert_metric(&format!("{}.{}",
689 st.metrics.insert_metric(test.name.as_slice(),
690 bs.ns_iter_summ.median,
691 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
696 st.failures.push((test, stdout));
704 let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
705 fn len_if_padded(t: &TestDescAndFn) -> usize {
706 match t.testfn.padding() {
708 PadOnRight => t.desc.name.as_slice().len(),
711 match tests.iter().max_by_key(|t|len_if_padded(*t)) {
713 let n = t.desc.name.as_slice();
714 st.max_name_len = n.len();
718 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
719 return st.write_run_finish();
723 fn should_sort_failures_before_printing_them() {
724 let test_a = TestDesc {
725 name: StaticTestName("a"),
727 should_panic: ShouldPanic::No
730 let test_b = TestDesc {
731 name: StaticTestName("b"),
733 should_panic: ShouldPanic::No
736 let mut st = ConsoleTestState {
738 out: Raw(Vec::new()),
746 metrics: MetricMap::new(),
747 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
750 st.write_failures().unwrap();
751 let s = match st.out {
752 Raw(ref m) => String::from_utf8_lossy(&m[..]),
753 Pretty(_) => unreachable!()
756 let apos = s.find("a").unwrap();
757 let bpos = s.find("b").unwrap();
758 assert!(apos < bpos);
761 fn use_color(opts: &TestOpts) -> bool {
763 AutoColor => !opts.nocapture && stdout_isatty(),
770 fn stdout_isatty() -> bool {
771 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
774 fn stdout_isatty() -> bool {
777 type HANDLE = *mut u8;
778 type LPDWORD = *mut u32;
779 const STD_OUTPUT_HANDLE: DWORD = -11i32 as DWORD;
781 fn GetStdHandle(which: DWORD) -> HANDLE;
782 fn GetConsoleMode(hConsoleHandle: HANDLE, lpMode: LPDWORD) -> BOOL;
785 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
787 GetConsoleMode(handle, &mut out) != 0
793 TeFiltered(Vec<TestDesc> ),
794 TeWait(TestDesc, NamePadding),
795 TeResult(TestDesc, TestResult, Vec<u8> ),
798 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
801 fn run_tests<F>(opts: &TestOpts,
802 tests: Vec<TestDescAndFn> ,
803 mut callback: F) -> io::Result<()> where
804 F: FnMut(TestEvent) -> io::Result<()>,
806 let mut filtered_tests = filter_tests(opts, tests);
807 if !opts.bench_benchmarks {
808 filtered_tests = convert_benchmarks_to_tests(filtered_tests);
811 let filtered_descs = filtered_tests.iter()
812 .map(|t| t.desc.clone())
815 try!(callback(TeFiltered(filtered_descs)));
817 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
818 filtered_tests.into_iter().partition(|e| {
820 StaticTestFn(_) | DynTestFn(_) => true,
825 // It's tempting to just spawn all the tests at once, but since we have
826 // many tests that run in other processes we would be making a big mess.
827 let concurrency = get_concurrency();
829 let mut remaining = filtered_tests;
833 let (tx, rx) = channel::<MonitorMsg>();
835 while pending > 0 || !remaining.is_empty() {
836 while pending < concurrency && !remaining.is_empty() {
837 let test = remaining.pop().unwrap();
838 if concurrency == 1 {
839 // We are doing one test at a time so we can print the name
840 // of the test before we run it. Useful for debugging tests
841 // that hang forever.
842 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
844 run_test(opts, !opts.run_tests, test, tx.clone());
848 let (desc, result, stdout) = rx.recv().unwrap();
849 if concurrency != 1 {
850 try!(callback(TeWait(desc.clone(), PadNone)));
852 try!(callback(TeResult(desc, result, stdout)));
856 if opts.bench_benchmarks {
857 // All benchmarks run at the end, in serial.
858 // (this includes metric fns)
859 for b in filtered_benchs_and_metrics {
860 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
861 run_test(opts, false, b, tx.clone());
862 let (test, result, stdout) = rx.recv().unwrap();
863 try!(callback(TeResult(test, result, stdout)));
870 fn get_concurrency() -> usize {
871 return match env::var("RUST_TEST_THREADS") {
873 let opt_n: Option<usize> = s.parse().ok();
875 Some(n) if n > 0 => n,
876 _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
879 Err(..) => num_cpus(),
884 fn num_cpus() -> usize {
887 wProcessorArchitecture: u16,
890 lpMinimumApplicationAddress: *mut u8,
891 lpMaximumApplicationAddress: *mut u8,
892 dwActiveProcessorMask: *mut u8,
893 dwNumberOfProcessors: u32,
894 dwProcessorType: u32,
895 dwAllocationGranularity: u32,
896 wProcessorLevel: u16,
897 wProcessorRevision: u16,
900 fn GetSystemInfo(info: *mut SYSTEM_INFO) -> i32;
903 let mut sysinfo = std::mem::zeroed();
904 GetSystemInfo(&mut sysinfo);
905 sysinfo.dwNumberOfProcessors as usize
910 fn num_cpus() -> usize {
911 extern { fn rust_get_num_cpus() -> libc::uintptr_t; }
912 unsafe { rust_get_num_cpus() as usize }
916 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
917 let mut filtered = tests;
919 // Remove tests that don't match the test filter
920 filtered = match opts.filter {
922 Some(ref filter) => {
923 filtered.into_iter().filter(|test| {
924 test.desc.name.as_slice().contains(&filter[..])
929 // Maybe pull out the ignored test and unignore them
930 filtered = if !opts.run_ignored {
933 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
934 if test.desc.ignore {
935 let TestDescAndFn {desc, testfn} = test;
937 desc: TestDesc {ignore: false, ..desc},
944 filtered.into_iter().filter_map(filter).collect()
947 // Sort the tests alphabetically
948 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
953 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
954 // convert benchmarks to tests, if we're not benchmarking them
955 tests.into_iter().map(|x| {
956 let testfn = match x.testfn {
957 DynBenchFn(bench) => {
958 DynTestFn(Box::new(move || bench::run_once(|b| bench.run(b))))
960 StaticBenchFn(benchfn) => {
961 DynTestFn(Box::new(move || bench::run_once(|b| benchfn(b))))
965 TestDescAndFn { desc: x.desc, testfn: testfn }
969 pub fn run_test(opts: &TestOpts,
972 monitor_ch: Sender<MonitorMsg>) {
974 let TestDescAndFn {desc, testfn} = test;
976 if force_ignore || desc.ignore {
977 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
981 fn run_test_inner(desc: TestDesc,
982 monitor_ch: Sender<MonitorMsg>,
984 testfn: Box<FnBox() + Send>) {
985 struct Sink(Arc<Mutex<Vec<u8>>>);
986 impl Write for Sink {
987 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
988 Write::write(&mut *self.0.lock().unwrap(), data)
990 fn flush(&mut self) -> io::Result<()> { Ok(()) }
993 thread::spawn(move || {
994 let data = Arc::new(Mutex::new(Vec::new()));
995 let data2 = data.clone();
996 let cfg = thread::Builder::new().name(match desc.name {
997 DynTestName(ref name) => name.clone(),
998 StaticTestName(name) => name.to_owned(),
1001 let result_guard = cfg.spawn(move || {
1003 io::set_print(box Sink(data2.clone()));
1004 io::set_panic(box Sink(data2));
1008 let test_result = calc_result(&desc, result_guard.join());
1009 let stdout = data.lock().unwrap().to_vec();
1010 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
1015 DynBenchFn(bencher) => {
1016 let bs = ::bench::benchmark(|harness| bencher.run(harness));
1017 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1020 StaticBenchFn(benchfn) => {
1021 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
1022 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
1026 let mut mm = MetricMap::new();
1027 f.call_box((&mut mm,));
1028 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1031 StaticMetricFn(f) => {
1032 let mut mm = MetricMap::new();
1034 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
1037 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
1038 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
1043 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
1044 match (&desc.should_panic, task_result) {
1045 (&ShouldPanic::No, Ok(())) |
1046 (&ShouldPanic::Yes, Err(_)) => TrOk,
1047 (&ShouldPanic::YesWithMessage(msg), Err(ref err))
1048 if err.downcast_ref::<String>()
1050 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
1051 .map(|e| e.contains(msg))
1052 .unwrap_or(false) => TrOk,
1059 pub fn new() -> MetricMap {
1060 MetricMap(BTreeMap::new())
1063 /// Insert a named `value` (+/- `noise`) metric into the map. The value
1064 /// must be non-negative. The `noise` indicates the uncertainty of the
1065 /// metric, which doubles as the "noise range" of acceptable
1066 /// pairwise-regressions on this named value, when comparing from one
1067 /// metric to the next using `compare_to_old`.
1069 /// If `noise` is positive, then it means this metric is of a value
1070 /// you want to see grow smaller, so a change larger than `noise` in the
1071 /// positive direction represents a regression.
1073 /// If `noise` is negative, then it means this metric is of a value
1074 /// you want to see grow larger, so a change larger than `noise` in the
1075 /// negative direction represents a regression.
1076 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1081 let MetricMap(ref mut map) = *self;
1082 map.insert(name.to_owned(), m);
1085 pub fn fmt_metrics(&self) -> String {
1086 let MetricMap(ref mm) = *self;
1087 let v : Vec<String> = mm.iter()
1088 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1098 /// A function that is opaque to the optimizer, to allow benchmarks to
1099 /// pretend to use outputs to assist in avoiding dead-code
1102 /// This function is a no-op, and does not even read from `dummy`.
1103 #[cfg(not(all(target_os = "nacl", target_arch = "le32")))]
1104 pub fn black_box<T>(dummy: T) -> T {
1105 // we need to "use" the argument in some way LLVM can't
1107 unsafe {asm!("" : : "r"(&dummy))}
1110 #[cfg(all(target_os = "nacl", target_arch = "le32"))]
1112 pub fn black_box<T>(dummy: T) -> T { dummy }
1116 /// Callback for benchmark functions to run in their body.
1117 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1118 let start = Instant::now();
1119 let k = self.iterations;
1123 self.dur = start.elapsed();
1126 pub fn ns_elapsed(&mut self) -> u64 {
1127 self.dur.as_secs() * 1_000_000_000 + (self.dur.subsec_nanos() as u64)
1130 pub fn ns_per_iter(&mut self) -> u64 {
1131 if self.iterations == 0 {
1134 self.ns_elapsed() / cmp::max(self.iterations, 1)
1138 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1139 self.iterations = n;
1143 // This is a more statistics-driven benchmark algorithm
1144 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary where F: FnMut(&mut Bencher) {
1145 // Initial bench run to get ballpark figure.
1147 self.bench_n(n, |x| f(x));
1149 // Try to estimate iter count for 1ms falling back to 1m
1150 // iterations if first run took < 1ns.
1151 if self.ns_per_iter() == 0 {
1154 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1156 // if the first run took more than 1ms we don't want to just
1157 // be left doing 0 iterations on every loop. The unfortunate
1158 // side effect of not being able to do as many runs is
1159 // automatically handled by the statistical analysis below
1160 // (i.e. larger error bars).
1161 if n == 0 { n = 1; }
1163 let mut total_run = Duration::new(0, 0);
1164 let samples : &mut [f64] = &mut [0.0_f64; 50];
1166 let loop_start = Instant::now();
1168 for p in &mut *samples {
1169 self.bench_n(n, |x| f(x));
1170 *p = self.ns_per_iter() as f64;
1173 stats::winsorize(samples, 5.0);
1174 let summ = stats::Summary::new(samples);
1176 for p in &mut *samples {
1177 self.bench_n(5 * n, |x| f(x));
1178 *p = self.ns_per_iter() as f64;
1181 stats::winsorize(samples, 5.0);
1182 let summ5 = stats::Summary::new(samples);
1183 let loop_run = loop_start.elapsed();
1185 // If we've run for 100ms and seem to have converged to a
1187 if loop_run > Duration::from_millis(100) &&
1188 summ.median_abs_dev_pct < 1.0 &&
1189 summ.median - summ5.median < summ5.median_abs_dev {
1193 total_run = total_run + loop_run;
1194 // Longest we ever run for is 3s.
1195 if total_run > Duration::from_secs(3) {
1199 // If we overflow here just return the results so far. We check a
1200 // multiplier of 10 because we're about to multiply by 2 and the
1201 // next iteration of the loop will also multiply by 5 (to calculate
1202 // the summ5 result)
1203 n = match n.checked_mul(10) {
1205 None => return summ5,
1213 use std::time::Duration;
1214 use super::{Bencher, BenchSamples};
1216 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1217 let mut bs = Bencher {
1219 dur: Duration::new(0, 0),
1223 let ns_iter_summ = bs.auto_bench(f);
1225 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1226 let iter_s = 1_000_000_000 / ns_iter;
1227 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1230 ns_iter_summ: ns_iter_summ,
1235 pub fn run_once<F>(f: F) where F: FnOnce(&mut Bencher) {
1236 let mut bs = Bencher {
1238 dur: Duration::new(0, 0),
1247 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1248 TestDesc, TestDescAndFn, TestOpts, run_test,
1250 StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1251 use std::sync::mpsc::channel;
1254 pub fn do_not_run_ignored_tests() {
1255 fn f() { panic!(); }
1256 let desc = TestDescAndFn {
1258 name: StaticTestName("whatever"),
1260 should_panic: ShouldPanic::No,
1262 testfn: DynTestFn(Box::new(move|| f())),
1264 let (tx, rx) = channel();
1265 run_test(&TestOpts::new(), false, desc, tx);
1266 let (_, res, _) = rx.recv().unwrap();
1267 assert!(res != TrOk);
1271 pub fn ignored_tests_result_in_ignored() {
1273 let desc = TestDescAndFn {
1275 name: StaticTestName("whatever"),
1277 should_panic: ShouldPanic::No,
1279 testfn: DynTestFn(Box::new(move|| f())),
1281 let (tx, rx) = channel();
1282 run_test(&TestOpts::new(), false, desc, tx);
1283 let (_, res, _) = rx.recv().unwrap();
1284 assert!(res == TrIgnored);
1288 fn test_should_panic() {
1289 fn f() { panic!(); }
1290 let desc = TestDescAndFn {
1292 name: StaticTestName("whatever"),
1294 should_panic: ShouldPanic::Yes,
1296 testfn: DynTestFn(Box::new(move|| f())),
1298 let (tx, rx) = channel();
1299 run_test(&TestOpts::new(), false, desc, tx);
1300 let (_, res, _) = rx.recv().unwrap();
1301 assert!(res == TrOk);
1305 fn test_should_panic_good_message() {
1306 fn f() { panic!("an error message"); }
1307 let desc = TestDescAndFn {
1309 name: StaticTestName("whatever"),
1311 should_panic: ShouldPanic::YesWithMessage("error message"),
1313 testfn: DynTestFn(Box::new(move|| f())),
1315 let (tx, rx) = channel();
1316 run_test(&TestOpts::new(), false, desc, tx);
1317 let (_, res, _) = rx.recv().unwrap();
1318 assert!(res == TrOk);
1322 fn test_should_panic_bad_message() {
1323 fn f() { panic!("an error message"); }
1324 let desc = TestDescAndFn {
1326 name: StaticTestName("whatever"),
1328 should_panic: ShouldPanic::YesWithMessage("foobar"),
1330 testfn: DynTestFn(Box::new(move|| f())),
1332 let (tx, rx) = channel();
1333 run_test(&TestOpts::new(), false, desc, tx);
1334 let (_, res, _) = rx.recv().unwrap();
1335 assert!(res == TrFailed);
1339 fn test_should_panic_but_succeeds() {
1341 let desc = TestDescAndFn {
1343 name: StaticTestName("whatever"),
1345 should_panic: ShouldPanic::Yes,
1347 testfn: DynTestFn(Box::new(move|| f())),
1349 let (tx, rx) = channel();
1350 run_test(&TestOpts::new(), false, desc, tx);
1351 let (_, res, _) = rx.recv().unwrap();
1352 assert!(res == TrFailed);
1356 fn parse_ignored_flag() {
1357 let args = vec!("progname".to_string(),
1358 "filter".to_string(),
1359 "--ignored".to_string());
1360 let opts = match parse_opts(&args) {
1362 _ => panic!("Malformed arg in parse_ignored_flag")
1364 assert!((opts.run_ignored));
1368 pub fn filter_for_ignored_option() {
1369 // When we run ignored tests the test filter should filter out all the
1370 // unignored tests and flip the ignore flag on the rest to false
1372 let mut opts = TestOpts::new();
1373 opts.run_tests = true;
1374 opts.run_ignored = true;
1379 name: StaticTestName("1"),
1381 should_panic: ShouldPanic::No,
1383 testfn: DynTestFn(Box::new(move|| {})),
1387 name: StaticTestName("2"),
1389 should_panic: ShouldPanic::No,
1391 testfn: DynTestFn(Box::new(move|| {})),
1393 let filtered = filter_tests(&opts, tests);
1395 assert_eq!(filtered.len(), 1);
1396 assert_eq!(filtered[0].desc.name.to_string(),
1398 assert!(filtered[0].desc.ignore == false);
1402 pub fn sort_tests() {
1403 let mut opts = TestOpts::new();
1404 opts.run_tests = true;
1407 vec!("sha1::test".to_string(),
1408 "isize::test_to_str".to_string(),
1409 "isize::test_pow".to_string(),
1410 "test::do_not_run_ignored_tests".to_string(),
1411 "test::ignored_tests_result_in_ignored".to_string(),
1412 "test::first_free_arg_should_be_a_filter".to_string(),
1413 "test::parse_ignored_flag".to_string(),
1414 "test::filter_for_ignored_option".to_string(),
1415 "test::sort_tests".to_string());
1419 let mut tests = Vec::new();
1420 for name in &names {
1421 let test = TestDescAndFn {
1423 name: DynTestName((*name).clone()),
1425 should_panic: ShouldPanic::No,
1427 testfn: DynTestFn(Box::new(testfn)),
1433 let filtered = filter_tests(&opts, tests);
1436 vec!("isize::test_pow".to_string(),
1437 "isize::test_to_str".to_string(),
1438 "sha1::test".to_string(),
1439 "test::do_not_run_ignored_tests".to_string(),
1440 "test::filter_for_ignored_option".to_string(),
1441 "test::first_free_arg_should_be_a_filter".to_string(),
1442 "test::ignored_tests_result_in_ignored".to_string(),
1443 "test::parse_ignored_flag".to_string(),
1444 "test::sort_tests".to_string());
1446 for (a, b) in expected.iter().zip(filtered) {
1447 assert!(*a == b.desc.name.to_string());
1452 pub fn test_metricmap_compare() {
1453 let mut m1 = MetricMap::new();
1454 let mut m2 = MetricMap::new();
1455 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1456 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1458 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1459 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1461 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1462 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1464 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1465 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1467 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1468 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1470 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1471 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);