1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test")]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
35 html_root_url = "http://doc.rust-lang.org/nightly/")]
38 #![feature(box_syntax)]
39 #![feature(collections)]
42 #![feature(rustc_private)]
43 #![feature(staged_api)]
46 #![feature(set_stdio)]
49 #![cfg_attr(test, feature(old_io))]
52 extern crate serialize;
53 extern crate "serialize" as rustc_serialize;
57 pub use self::TestFn::*;
58 pub use self::ColorConfig::*;
59 pub use self::TestResult::*;
60 pub use self::TestName::*;
61 use self::TestEvent::*;
62 use self::NamePadding::*;
63 use self::OutputLocation::*;
66 use getopts::{OptGroup, optflag, optopt};
67 use serialize::Encodable;
69 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
73 use std::collections::BTreeMap;
77 use std::io::prelude::*;
79 use std::iter::repeat;
80 use std::num::{Float, Int};
81 use std::path::{PathBuf};
82 use std::sync::mpsc::{channel, Sender};
83 use std::sync::{Arc, Mutex};
85 use std::thunk::{Thunk, Invoke};
86 use std::time::Duration;
88 // to be used by rustc to compile tests in libtest
90 pub use {Bencher, TestName, TestResult, TestDesc,
91 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
93 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
94 run_test, test_main, test_main_static, filter_tests,
95 parse_opts, StaticBenchFn, ShouldPanic};
100 // The name of a test. By convention this follows the rules for rust
101 // paths; i.e. it should be a series of identifiers separated by double
102 // colons. This way if some test runner wants to arrange the tests
103 // hierarchically it may.
105 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
107 StaticTestName(&'static str),
111 fn as_slice<'a>(&'a self) -> &'a str {
113 StaticTestName(s) => s,
114 DynTestName(ref s) => s
118 impl fmt::Display for TestName {
119 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
120 fmt::Display::fmt(self.as_slice(), f)
124 #[derive(Clone, Copy)]
132 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
133 let mut name = String::from_str(self.name.as_slice());
134 let fill = column_count.saturating_sub(name.len());
135 let mut pad = repeat(" ").take(fill).collect::<String>();
150 /// Represents a benchmark function.
151 pub trait TDynBenchFn {
152 fn run(&self, harness: &mut Bencher);
155 // A function that runs a test. If the function returns successfully,
156 // the test succeeds; if the function panics then the test fails. We
157 // may need to come up with a more clever definition of test in order
158 // to support isolation of tests into tasks.
161 StaticBenchFn(fn(&mut Bencher)),
162 StaticMetricFn(fn(&mut MetricMap)),
163 DynTestFn(Thunk<'static>),
164 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
165 DynBenchFn(Box<TDynBenchFn+'static>)
169 fn padding(&self) -> NamePadding {
171 &StaticTestFn(..) => PadNone,
172 &StaticBenchFn(..) => PadOnRight,
173 &StaticMetricFn(..) => PadOnRight,
174 &DynTestFn(..) => PadNone,
175 &DynMetricFn(..) => PadOnRight,
176 &DynBenchFn(..) => PadOnRight,
181 impl fmt::Debug for TestFn {
182 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
183 f.write_str(match *self {
184 StaticTestFn(..) => "StaticTestFn(..)",
185 StaticBenchFn(..) => "StaticBenchFn(..)",
186 StaticMetricFn(..) => "StaticMetricFn(..)",
187 DynTestFn(..) => "DynTestFn(..)",
188 DynMetricFn(..) => "DynMetricFn(..)",
189 DynBenchFn(..) => "DynBenchFn(..)"
194 /// Manager of the benchmarking runs.
196 /// This is fed into functions marked with `#[bench]` to allow for
197 /// set-up & tear-down before running a piece of code repeatedly via a
206 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
207 pub enum ShouldPanic {
209 Yes(Option<&'static str>)
212 // The definition of a single test. A test runner will run a list of
214 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
215 pub struct TestDesc {
218 pub should_panic: ShouldPanic,
221 unsafe impl Send for TestDesc {}
224 pub struct TestDescAndFn {
229 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
236 pub fn new(value: f64, noise: f64) -> Metric {
237 Metric {value: value, noise: noise}
242 pub struct MetricMap(BTreeMap<String,Metric>);
244 impl Clone for MetricMap {
245 fn clone(&self) -> MetricMap {
246 let MetricMap(ref map) = *self;
247 MetricMap(map.clone())
251 // The default console test runner. It accepts the command line
252 // arguments and a vector of test_descs.
253 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
255 match parse_opts(args) {
257 Some(Err(msg)) => panic!("{:?}", msg),
260 match run_tests_console(&opts, tests) {
262 Ok(false) => panic!("Some tests failed"),
263 Err(e) => panic!("io error when running tests: {:?}", e),
267 // A variant optimized for invocation with a static test vector.
268 // This will panic (intentionally) when fed any dynamic tests, because
269 // it is copying the static values out into a dynamic vector and cannot
270 // copy dynamic values. It is doing this because from this point on
271 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
272 // semantics into parallel test runners, which in turn requires a ~[]
273 // rather than a &[].
274 pub fn test_main_static(args: env::Args, tests: &[TestDescAndFn]) {
275 let args = args.collect::<Vec<_>>();
276 let owned_tests = tests.iter().map(|t| {
278 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
279 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
280 _ => panic!("non-static tests passed to test::test_main_static")
283 test_main(&args, owned_tests)
287 pub enum ColorConfig {
293 pub struct TestOpts {
294 pub filter: Option<String>,
295 pub run_ignored: bool,
297 pub run_benchmarks: bool,
298 pub logfile: Option<PathBuf>,
300 pub color: ColorConfig,
305 fn new() -> TestOpts {
310 run_benchmarks: false,
318 /// Result of parsing the options.
319 pub type OptRes = Result<TestOpts, String>;
321 fn optgroups() -> Vec<getopts::OptGroup> {
322 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
323 getopts::optflag("", "test", "Run tests and not benchmarks"),
324 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
325 getopts::optflag("h", "help", "Display this message (longer with --help)"),
326 getopts::optopt("", "logfile", "Write logs to the specified file instead \
328 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
329 task, allow printing directly"),
330 getopts::optopt("", "color", "Configure coloring of output:
331 auto = colorize if stdout is a tty and tests are run on serially (default);
332 always = always colorize output;
333 never = never colorize output;", "auto|always|never"))
336 fn usage(binary: &str) {
337 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
340 The FILTER regex is tested against the name of all tests to run, and
341 only those tests that match are run.
343 By default, all tests are run in parallel. This can be altered with the
344 RUST_TEST_THRADS environment variable when running tests (set it to 1).
346 All tests have their standard output and standard error captured by default.
347 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
348 environment variable. Logging is not captured by default.
352 #[test] - Indicates a function is a test to be run. This function
354 #[bench] - Indicates a function is a benchmark to be run. This
355 function takes one argument (test::Bencher).
356 #[should_panic] - This function (also labeled with #[test]) will only pass if
357 the code causes a panic (an assertion failure or panic!)
358 A message may be provided, which the failure string must
359 contain: #[should_panic(expected = "foo")].
360 #[ignore] - When applied to a function which is already attributed as a
361 test, then the test runner will ignore these tests during
362 normal test runs. Running with --ignored will run these
364 usage = getopts::usage(&message, &optgroups()));
367 // Parses command line arguments into test options
368 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
369 let args_ = args.tail();
371 match getopts::getopts(args_, &optgroups()) {
373 Err(f) => return Some(Err(f.to_string()))
376 if matches.opt_present("h") { usage(&args[0]); return None; }
378 let filter = if matches.free.len() > 0 {
379 Some(matches.free[0].clone())
384 let run_ignored = matches.opt_present("ignored");
386 let logfile = matches.opt_str("logfile");
387 let logfile = logfile.map(|s| PathBuf::from(&s));
389 let run_benchmarks = matches.opt_present("bench");
390 let run_tests = ! run_benchmarks ||
391 matches.opt_present("test");
393 let mut nocapture = matches.opt_present("nocapture");
395 nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
398 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
399 Some("auto") | None => AutoColor,
400 Some("always") => AlwaysColor,
401 Some("never") => NeverColor,
403 Some(v) => return Some(Err(format!("argument for --color must be \
404 auto, always, or never (was {})",
408 let test_opts = TestOpts {
410 run_ignored: run_ignored,
411 run_tests: run_tests,
412 run_benchmarks: run_benchmarks,
414 nocapture: nocapture,
421 #[derive(Clone, PartialEq)]
422 pub struct BenchSamples {
423 ns_iter_summ: stats::Summary<f64>,
427 #[derive(Clone, PartialEq)]
428 pub enum TestResult {
432 TrMetrics(MetricMap),
433 TrBench(BenchSamples),
436 unsafe impl Send for TestResult {}
438 enum OutputLocation<T> {
439 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
443 struct ConsoleTestState<T> {
444 log_out: Option<File>,
445 out: OutputLocation<T>,
453 failures: Vec<(TestDesc, Vec<u8> )> ,
454 max_name_len: uint, // number of columns to fill when aligning names
457 impl<T: Write> ConsoleTestState<T> {
458 pub fn new(opts: &TestOpts,
459 _: Option<T>) -> io::Result<ConsoleTestState<io::Stdout>> {
460 let log_out = match opts.logfile {
461 Some(ref path) => Some(try!(File::create(path))),
464 let out = match term::stdout() {
465 None => Raw(io::stdout()),
469 Ok(ConsoleTestState {
472 use_color: use_color(opts),
478 metrics: MetricMap::new(),
479 failures: Vec::new(),
484 pub fn write_ok(&mut self) -> io::Result<()> {
485 self.write_pretty("ok", term::color::GREEN)
488 pub fn write_failed(&mut self) -> io::Result<()> {
489 self.write_pretty("FAILED", term::color::RED)
492 pub fn write_ignored(&mut self) -> io::Result<()> {
493 self.write_pretty("ignored", term::color::YELLOW)
496 pub fn write_metric(&mut self) -> io::Result<()> {
497 self.write_pretty("metric", term::color::CYAN)
500 pub fn write_bench(&mut self) -> io::Result<()> {
501 self.write_pretty("bench", term::color::CYAN)
504 pub fn write_pretty(&mut self,
506 color: term::color::Color) -> io::Result<()> {
508 Pretty(ref mut term) => {
510 try!(term.fg(color));
512 try!(term.write_all(word.as_bytes()));
518 Raw(ref mut stdout) => {
519 try!(stdout.write_all(word.as_bytes()));
525 pub fn write_plain(&mut self, s: &str) -> io::Result<()> {
527 Pretty(ref mut term) => {
528 try!(term.write_all(s.as_bytes()));
531 Raw(ref mut stdout) => {
532 try!(stdout.write_all(s.as_bytes()));
538 pub fn write_run_start(&mut self, len: uint) -> io::Result<()> {
540 let noun = if len != 1 { "tests" } else { "test" };
541 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
544 pub fn write_test_start(&mut self, test: &TestDesc,
545 align: NamePadding) -> io::Result<()> {
546 let name = test.padded_name(self.max_name_len, align);
547 self.write_plain(&format!("test {} ... ", name))
550 pub fn write_result(&mut self, result: &TestResult) -> io::Result<()> {
552 TrOk => self.write_ok(),
553 TrFailed => self.write_failed(),
554 TrIgnored => self.write_ignored(),
555 TrMetrics(ref mm) => {
556 try!(self.write_metric());
557 self.write_plain(&format!(": {}", mm.fmt_metrics()))
560 try!(self.write_bench());
562 try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
567 self.write_plain("\n")
570 pub fn write_log(&mut self, test: &TestDesc,
571 result: &TestResult) -> io::Result<()> {
575 let s = format!("{} {}\n", match *result {
576 TrOk => "ok".to_string(),
577 TrFailed => "failed".to_string(),
578 TrIgnored => "ignored".to_string(),
579 TrMetrics(ref mm) => mm.fmt_metrics(),
580 TrBench(ref bs) => fmt_bench_samples(bs)
582 o.write_all(s.as_bytes())
587 pub fn write_failures(&mut self) -> io::Result<()> {
588 try!(self.write_plain("\nfailures:\n"));
589 let mut failures = Vec::new();
590 let mut fail_out = String::new();
591 for &(ref f, ref stdout) in &self.failures {
592 failures.push(f.name.to_string());
593 if stdout.len() > 0 {
594 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
595 let output = String::from_utf8_lossy(stdout);
596 fail_out.push_str(&output);
597 fail_out.push_str("\n");
600 if fail_out.len() > 0 {
601 try!(self.write_plain("\n"));
602 try!(self.write_plain(&fail_out));
605 try!(self.write_plain("\nfailures:\n"));
607 for name in &failures {
608 try!(self.write_plain(&format!(" {}\n", name)));
613 pub fn write_run_finish(&mut self) -> io::Result<bool> {
614 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
616 let success = self.failed == 0;
618 try!(self.write_failures());
621 try!(self.write_plain("\ntest result: "));
623 // There's no parallelism at this point so it's safe to use color
624 try!(self.write_ok());
626 try!(self.write_failed());
628 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
629 self.passed, self.failed, self.ignored, self.measured);
630 try!(self.write_plain(&s));
635 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
637 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
638 bs.ns_iter_summ.median as uint,
639 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
642 format!("{:>9} ns/iter (+/- {})",
643 bs.ns_iter_summ.median as uint,
644 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
648 // A simple console test runner
649 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> io::Result<bool> {
651 fn callback<T: Write>(event: &TestEvent,
652 st: &mut ConsoleTestState<T>) -> io::Result<()> {
653 match (*event).clone() {
654 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
655 TeWait(ref test, padding) => st.write_test_start(test, padding),
656 TeResult(test, result, stdout) => {
657 try!(st.write_log(&test, &result));
658 try!(st.write_result(&result));
660 TrOk => st.passed += 1,
661 TrIgnored => st.ignored += 1,
663 let tname = test.name;
664 let MetricMap(mm) = mm;
667 .insert_metric(&format!("{}.{}",
676 st.metrics.insert_metric(test.name.as_slice(),
677 bs.ns_iter_summ.median,
678 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
683 st.failures.push((test, stdout));
691 let mut st = try!(ConsoleTestState::new(opts, None::<io::Stdout>));
692 fn len_if_padded(t: &TestDescAndFn) -> uint {
693 match t.testfn.padding() {
695 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
698 match tests.iter().max_by(|t|len_if_padded(*t)) {
700 let n = t.desc.name.as_slice();
701 st.max_name_len = n.len();
705 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
706 return st.write_run_finish();
710 fn should_sort_failures_before_printing_them() {
711 let test_a = TestDesc {
712 name: StaticTestName("a"),
714 should_panic: ShouldPanic::No
717 let test_b = TestDesc {
718 name: StaticTestName("b"),
720 should_panic: ShouldPanic::No
723 let mut st = ConsoleTestState {
725 out: Raw(Vec::new()),
733 metrics: MetricMap::new(),
734 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
737 st.write_failures().unwrap();
738 let s = match st.out {
739 Raw(ref m) => String::from_utf8_lossy(&m[..]),
740 Pretty(_) => unreachable!()
743 let apos = s.find("a").unwrap();
744 let bpos = s.find("b").unwrap();
745 assert!(apos < bpos);
748 fn use_color(opts: &TestOpts) -> bool {
750 AutoColor => get_concurrency() == 1 && stdout_isatty(),
757 fn stdout_isatty() -> bool {
758 unsafe { libc::isatty(libc::STDOUT_FILENO) != 0 }
761 fn stdout_isatty() -> bool {
762 const STD_OUTPUT_HANDLE: libc::DWORD = -11;
764 fn GetStdHandle(which: libc::DWORD) -> libc::HANDLE;
765 fn GetConsoleMode(hConsoleHandle: libc::HANDLE,
766 lpMode: libc::LPDWORD) -> libc::BOOL;
769 let handle = GetStdHandle(STD_OUTPUT_HANDLE);
771 GetConsoleMode(handle, &mut out) != 0
777 TeFiltered(Vec<TestDesc> ),
778 TeWait(TestDesc, NamePadding),
779 TeResult(TestDesc, TestResult, Vec<u8> ),
782 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
785 fn run_tests<F>(opts: &TestOpts,
786 tests: Vec<TestDescAndFn> ,
787 mut callback: F) -> io::Result<()> where
788 F: FnMut(TestEvent) -> io::Result<()>,
790 let filtered_tests = filter_tests(opts, tests);
791 let filtered_descs = filtered_tests.iter()
792 .map(|t| t.desc.clone())
795 try!(callback(TeFiltered(filtered_descs)));
797 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
798 filtered_tests.into_iter().partition(|e| {
800 StaticTestFn(_) | DynTestFn(_) => true,
805 // It's tempting to just spawn all the tests at once, but since we have
806 // many tests that run in other processes we would be making a big mess.
807 let concurrency = get_concurrency();
809 let mut remaining = filtered_tests;
813 let (tx, rx) = channel::<MonitorMsg>();
815 while pending > 0 || !remaining.is_empty() {
816 while pending < concurrency && !remaining.is_empty() {
817 let test = remaining.pop().unwrap();
818 if concurrency == 1 {
819 // We are doing one test at a time so we can print the name
820 // of the test before we run it. Useful for debugging tests
821 // that hang forever.
822 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
824 run_test(opts, !opts.run_tests, test, tx.clone());
828 let (desc, result, stdout) = rx.recv().unwrap();
829 if concurrency != 1 {
830 try!(callback(TeWait(desc.clone(), PadNone)));
832 try!(callback(TeResult(desc, result, stdout)));
836 // All benchmarks run at the end, in serial.
837 // (this includes metric fns)
838 for b in filtered_benchs_and_metrics {
839 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
840 run_test(opts, !opts.run_benchmarks, b, tx.clone());
841 let (test, result, stdout) = rx.recv().unwrap();
842 try!(callback(TeResult(test, result, stdout)));
848 fn get_concurrency() -> uint {
849 match env::var("RUST_TEST_THREADS") {
851 let opt_n: Option<uint> = s.parse().ok();
853 Some(n) if n > 0 => n,
854 _ => panic!("RUST_TEST_THREADS is `{}`, should be a positive integer.", s)
858 if std::rt::util::limit_thread_creation_due_to_osx_and_valgrind() {
867 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
868 let mut filtered = tests;
870 // Remove tests that don't match the test filter
871 filtered = match opts.filter {
873 Some(ref filter) => {
874 filtered.into_iter().filter(|test| {
875 test.desc.name.as_slice().contains(&filter[..])
880 // Maybe pull out the ignored test and unignore them
881 filtered = if !opts.run_ignored {
884 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
885 if test.desc.ignore {
886 let TestDescAndFn {desc, testfn} = test;
888 desc: TestDesc {ignore: false, ..desc},
895 filtered.into_iter().filter_map(|x| filter(x)).collect()
898 // Sort the tests alphabetically
899 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
904 pub fn run_test(opts: &TestOpts,
907 monitor_ch: Sender<MonitorMsg>) {
909 let TestDescAndFn {desc, testfn} = test;
911 if force_ignore || desc.ignore {
912 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
916 fn run_test_inner(desc: TestDesc,
917 monitor_ch: Sender<MonitorMsg>,
919 testfn: Thunk<'static>) {
920 struct Sink(Arc<Mutex<Vec<u8>>>);
921 impl Write for Sink {
922 fn write(&mut self, data: &[u8]) -> io::Result<usize> {
923 Write::write(&mut *self.0.lock().unwrap(), data)
925 fn flush(&mut self) -> io::Result<()> { Ok(()) }
928 thread::spawn(move || {
929 let data = Arc::new(Mutex::new(Vec::new()));
930 let data2 = data.clone();
931 let cfg = thread::Builder::new().name(match desc.name {
932 DynTestName(ref name) => name.clone().to_string(),
933 StaticTestName(name) => name.to_string(),
936 let result_guard = cfg.spawn(move || {
938 io::set_print(box Sink(data2.clone()));
939 io::set_panic(box Sink(data2));
943 let test_result = calc_result(&desc, result_guard.join());
944 let stdout = data.lock().unwrap().to_vec();
945 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
950 DynBenchFn(bencher) => {
951 let bs = ::bench::benchmark(|harness| bencher.run(harness));
952 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
955 StaticBenchFn(benchfn) => {
956 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
957 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
961 let mut mm = MetricMap::new();
963 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
966 StaticMetricFn(f) => {
967 let mut mm = MetricMap::new();
969 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
972 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
973 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
974 Thunk::new(move|| f()))
978 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
979 match (&desc.should_panic, task_result) {
980 (&ShouldPanic::No, Ok(())) |
981 (&ShouldPanic::Yes(None), Err(_)) => TrOk,
982 (&ShouldPanic::Yes(Some(msg)), Err(ref err))
983 if err.downcast_ref::<String>()
985 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
986 .map(|e| e.contains(msg))
987 .unwrap_or(false) => TrOk,
994 pub fn new() -> MetricMap {
995 MetricMap(BTreeMap::new())
998 /// Insert a named `value` (+/- `noise`) metric into the map. The value
999 /// must be non-negative. The `noise` indicates the uncertainty of the
1000 /// metric, which doubles as the "noise range" of acceptable
1001 /// pairwise-regressions on this named value, when comparing from one
1002 /// metric to the next using `compare_to_old`.
1004 /// If `noise` is positive, then it means this metric is of a value
1005 /// you want to see grow smaller, so a change larger than `noise` in the
1006 /// positive direction represents a regression.
1008 /// If `noise` is negative, then it means this metric is of a value
1009 /// you want to see grow larger, so a change larger than `noise` in the
1010 /// negative direction represents a regression.
1011 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
1016 let MetricMap(ref mut map) = *self;
1017 map.insert(name.to_string(), m);
1020 pub fn fmt_metrics(&self) -> String {
1021 let MetricMap(ref mm) = *self;
1022 let v : Vec<String> = mm.iter()
1023 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1033 /// A function that is opaque to the optimizer, to allow benchmarks to
1034 /// pretend to use outputs to assist in avoiding dead-code
1037 /// This function is a no-op, and does not even read from `dummy`.
1038 pub fn black_box<T>(dummy: T) -> T {
1039 // we need to "use" the argument in some way LLVM can't
1041 unsafe {asm!("" : : "r"(&dummy))}
1047 /// Callback for benchmark functions to run in their body.
1048 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1049 self.dur = Duration::span(|| {
1050 let k = self.iterations;
1057 pub fn ns_elapsed(&mut self) -> u64 {
1058 self.dur.num_nanoseconds().unwrap() as u64
1061 pub fn ns_per_iter(&mut self) -> u64 {
1062 if self.iterations == 0 {
1065 self.ns_elapsed() / cmp::max(self.iterations, 1)
1069 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1070 self.iterations = n;
1074 // This is a more statistics-driven benchmark algorithm
1075 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1076 // Initial bench run to get ballpark figure.
1078 self.bench_n(n, |x| f(x));
1080 // Try to estimate iter count for 1ms falling back to 1m
1081 // iterations if first run took < 1ns.
1082 if self.ns_per_iter() == 0 {
1085 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1087 // if the first run took more than 1ms we don't want to just
1088 // be left doing 0 iterations on every loop. The unfortunate
1089 // side effect of not being able to do as many runs is
1090 // automatically handled by the statistical analysis below
1091 // (i.e. larger error bars).
1092 if n == 0 { n = 1; }
1094 let mut total_run = Duration::nanoseconds(0);
1095 let samples : &mut [f64] = &mut [0.0_f64; 50];
1097 let mut summ = None;
1098 let mut summ5 = None;
1100 let loop_run = Duration::span(|| {
1102 for p in &mut *samples {
1103 self.bench_n(n, |x| f(x));
1104 *p = self.ns_per_iter() as f64;
1107 stats::winsorize(samples, 5.0);
1108 summ = Some(stats::Summary::new(samples));
1110 for p in &mut *samples {
1111 self.bench_n(5 * n, |x| f(x));
1112 *p = self.ns_per_iter() as f64;
1115 stats::winsorize(samples, 5.0);
1116 summ5 = Some(stats::Summary::new(samples));
1118 let summ = summ.unwrap();
1119 let summ5 = summ5.unwrap();
1121 // If we've run for 100ms and seem to have converged to a
1123 if loop_run.num_milliseconds() > 100 &&
1124 summ.median_abs_dev_pct < 1.0 &&
1125 summ.median - summ5.median < summ5.median_abs_dev {
1129 total_run = total_run + loop_run;
1130 // Longest we ever run for is 3s.
1131 if total_run.num_seconds() > 3 {
1135 // If we overflow here just return the results so far. We check a
1136 // multiplier of 10 because we're about to multiply by 2 and the
1137 // next iteration of the loop will also multiply by 5 (to calculate
1138 // the summ5 result)
1139 n = match n.checked_mul(10) {
1141 None => return summ5,
1149 use std::time::Duration;
1150 use super::{Bencher, BenchSamples};
1152 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1153 let mut bs = Bencher {
1155 dur: Duration::nanoseconds(0),
1159 let ns_iter_summ = bs.auto_bench(f);
1161 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1162 let iter_s = 1_000_000_000 / ns_iter;
1163 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1166 ns_iter_summ: ns_iter_summ,
1174 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1175 TestDesc, TestDescAndFn, TestOpts, run_test,
1177 StaticTestName, DynTestName, DynTestFn, ShouldPanic};
1178 use std::thunk::Thunk;
1179 use std::sync::mpsc::channel;
1182 pub fn do_not_run_ignored_tests() {
1183 fn f() { panic!(); }
1184 let desc = TestDescAndFn {
1186 name: StaticTestName("whatever"),
1188 should_panic: ShouldPanic::No,
1190 testfn: DynTestFn(Thunk::new(move|| f())),
1192 let (tx, rx) = channel();
1193 run_test(&TestOpts::new(), false, desc, tx);
1194 let (_, res, _) = rx.recv().unwrap();
1195 assert!(res != TrOk);
1199 pub fn ignored_tests_result_in_ignored() {
1201 let desc = TestDescAndFn {
1203 name: StaticTestName("whatever"),
1205 should_panic: ShouldPanic::No,
1207 testfn: DynTestFn(Thunk::new(move|| f())),
1209 let (tx, rx) = channel();
1210 run_test(&TestOpts::new(), false, desc, tx);
1211 let (_, res, _) = rx.recv().unwrap();
1212 assert!(res == TrIgnored);
1216 fn test_should_panic() {
1217 fn f() { panic!(); }
1218 let desc = TestDescAndFn {
1220 name: StaticTestName("whatever"),
1222 should_panic: ShouldPanic::Yes(None)
1224 testfn: DynTestFn(Thunk::new(move|| f())),
1226 let (tx, rx) = channel();
1227 run_test(&TestOpts::new(), false, desc, tx);
1228 let (_, res, _) = rx.recv().unwrap();
1229 assert!(res == TrOk);
1233 fn test_should_panic_good_message() {
1234 fn f() { panic!("an error message"); }
1235 let desc = TestDescAndFn {
1237 name: StaticTestName("whatever"),
1239 should_panic: ShouldPanic::Yes(Some("error message"))
1241 testfn: DynTestFn(Thunk::new(move|| f())),
1243 let (tx, rx) = channel();
1244 run_test(&TestOpts::new(), false, desc, tx);
1245 let (_, res, _) = rx.recv().unwrap();
1246 assert!(res == TrOk);
1250 fn test_should_panic_bad_message() {
1251 fn f() { panic!("an error message"); }
1252 let desc = TestDescAndFn {
1254 name: StaticTestName("whatever"),
1256 should_panic: ShouldPanic::Yes(Some("foobar"))
1258 testfn: DynTestFn(Thunk::new(move|| f())),
1260 let (tx, rx) = channel();
1261 run_test(&TestOpts::new(), false, desc, tx);
1262 let (_, res, _) = rx.recv().unwrap();
1263 assert!(res == TrFailed);
1267 fn test_should_panic_but_succeeds() {
1269 let desc = TestDescAndFn {
1271 name: StaticTestName("whatever"),
1273 should_panic: ShouldPanic::Yes(None)
1275 testfn: DynTestFn(Thunk::new(move|| f())),
1277 let (tx, rx) = channel();
1278 run_test(&TestOpts::new(), false, desc, tx);
1279 let (_, res, _) = rx.recv().unwrap();
1280 assert!(res == TrFailed);
1284 fn parse_ignored_flag() {
1285 let args = vec!("progname".to_string(),
1286 "filter".to_string(),
1287 "--ignored".to_string());
1288 let opts = match parse_opts(&args) {
1290 _ => panic!("Malformed arg in parse_ignored_flag")
1292 assert!((opts.run_ignored));
1296 pub fn filter_for_ignored_option() {
1297 // When we run ignored tests the test filter should filter out all the
1298 // unignored tests and flip the ignore flag on the rest to false
1300 let mut opts = TestOpts::new();
1301 opts.run_tests = true;
1302 opts.run_ignored = true;
1307 name: StaticTestName("1"),
1309 should_panic: ShouldPanic::No,
1311 testfn: DynTestFn(Thunk::new(move|| {})),
1315 name: StaticTestName("2"),
1317 should_panic: ShouldPanic::No,
1319 testfn: DynTestFn(Thunk::new(move|| {})),
1321 let filtered = filter_tests(&opts, tests);
1323 assert_eq!(filtered.len(), 1);
1324 assert_eq!(filtered[0].desc.name.to_string(),
1326 assert!(filtered[0].desc.ignore == false);
1330 pub fn sort_tests() {
1331 let mut opts = TestOpts::new();
1332 opts.run_tests = true;
1335 vec!("sha1::test".to_string(),
1336 "int::test_to_str".to_string(),
1337 "int::test_pow".to_string(),
1338 "test::do_not_run_ignored_tests".to_string(),
1339 "test::ignored_tests_result_in_ignored".to_string(),
1340 "test::first_free_arg_should_be_a_filter".to_string(),
1341 "test::parse_ignored_flag".to_string(),
1342 "test::filter_for_ignored_option".to_string(),
1343 "test::sort_tests".to_string());
1347 let mut tests = Vec::new();
1348 for name in &names {
1349 let test = TestDescAndFn {
1351 name: DynTestName((*name).clone()),
1353 should_panic: ShouldPanic::No,
1355 testfn: DynTestFn(Thunk::new(testfn)),
1361 let filtered = filter_tests(&opts, tests);
1364 vec!("int::test_pow".to_string(),
1365 "int::test_to_str".to_string(),
1366 "sha1::test".to_string(),
1367 "test::do_not_run_ignored_tests".to_string(),
1368 "test::filter_for_ignored_option".to_string(),
1369 "test::first_free_arg_should_be_a_filter".to_string(),
1370 "test::ignored_tests_result_in_ignored".to_string(),
1371 "test::parse_ignored_flag".to_string(),
1372 "test::sort_tests".to_string());
1374 for (a, b) in expected.iter().zip(filtered.iter()) {
1375 assert!(*a == b.desc.name.to_string());
1380 pub fn test_metricmap_compare() {
1381 let mut m1 = MetricMap::new();
1382 let mut m2 = MetricMap::new();
1383 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1384 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1386 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1387 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1389 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1390 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1392 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1393 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1395 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1396 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1398 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1399 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);