1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Chapter](../book/testing.html) of the book for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 // Do not remove on snapshot creation. Needed for bootstrap. (Issue #22364)
27 #![cfg_attr(stage0, feature(custom_attribute))]
28 #![crate_name = "test"]
29 #![unstable(feature = "test")]
31 #![crate_type = "rlib"]
32 #![crate_type = "dylib"]
33 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
34 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
35 html_root_url = "http://doc.rust-lang.org/nightly/")]
38 #![feature(box_syntax)]
39 #![feature(collections)]
44 #![feature(rustc_private)]
45 #![feature(staged_api)]
50 extern crate serialize;
51 extern crate "serialize" as rustc_serialize;
54 pub use self::TestFn::*;
55 pub use self::ColorConfig::*;
56 pub use self::TestResult::*;
57 pub use self::TestName::*;
58 use self::TestEvent::*;
59 use self::NamePadding::*;
60 use self::OutputLocation::*;
63 use getopts::{OptGroup, optflag, optopt};
64 use serialize::Encodable;
66 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
70 use std::collections::BTreeMap;
74 use std::io::{self, Write};
75 use std::iter::repeat;
76 use std::num::{Float, Int};
77 use std::old_io::stdio::StdWriter;
78 use std::old_io::{ChanReader, ChanWriter};
80 use std::path::{PathBuf};
81 use std::sync::mpsc::{channel, Sender};
83 use std::thunk::{Thunk, Invoke};
84 use std::time::Duration;
86 // to be used by rustc to compile tests in libtest
88 pub use {Bencher, TestName, TestResult, TestDesc,
89 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
91 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
92 run_test, test_main, test_main_static, filter_tests,
93 parse_opts, StaticBenchFn, ShouldFail};
98 // The name of a test. By convention this follows the rules for rust
99 // paths; i.e. it should be a series of identifiers separated by double
100 // colons. This way if some test runner wants to arrange the tests
101 // hierarchically it may.
103 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
105 StaticTestName(&'static str),
109 fn as_slice<'a>(&'a self) -> &'a str {
111 StaticTestName(s) => s,
112 DynTestName(ref s) => s
116 impl fmt::Display for TestName {
117 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
118 fmt::Display::fmt(self.as_slice(), f)
122 #[derive(Clone, Copy)]
130 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
131 let mut name = String::from_str(self.name.as_slice());
132 let fill = column_count.saturating_sub(name.len());
133 let mut pad = repeat(" ").take(fill).collect::<String>();
148 /// Represents a benchmark function.
149 pub trait TDynBenchFn {
150 fn run(&self, harness: &mut Bencher);
153 // A function that runs a test. If the function returns successfully,
154 // the test succeeds; if the function panics then the test fails. We
155 // may need to come up with a more clever definition of test in order
156 // to support isolation of tests into tasks.
159 StaticBenchFn(fn(&mut Bencher)),
160 StaticMetricFn(fn(&mut MetricMap)),
161 DynTestFn(Thunk<'static>),
162 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
163 DynBenchFn(Box<TDynBenchFn+'static>)
167 fn padding(&self) -> NamePadding {
169 &StaticTestFn(..) => PadNone,
170 &StaticBenchFn(..) => PadOnRight,
171 &StaticMetricFn(..) => PadOnRight,
172 &DynTestFn(..) => PadNone,
173 &DynMetricFn(..) => PadOnRight,
174 &DynBenchFn(..) => PadOnRight,
179 impl fmt::Debug for TestFn {
180 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
181 f.write_str(match *self {
182 StaticTestFn(..) => "StaticTestFn(..)",
183 StaticBenchFn(..) => "StaticBenchFn(..)",
184 StaticMetricFn(..) => "StaticMetricFn(..)",
185 DynTestFn(..) => "DynTestFn(..)",
186 DynMetricFn(..) => "DynMetricFn(..)",
187 DynBenchFn(..) => "DynBenchFn(..)"
192 /// Manager of the benchmarking runs.
194 /// This is fed into functions marked with `#[bench]` to allow for
195 /// set-up & tear-down before running a piece of code repeatedly via a
204 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
205 pub enum ShouldFail {
207 Yes(Option<&'static str>)
210 // The definition of a single test. A test runner will run a list of
212 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
213 pub struct TestDesc {
216 pub should_fail: ShouldFail,
219 unsafe impl Send for TestDesc {}
222 pub struct TestDescAndFn {
227 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
234 pub fn new(value: f64, noise: f64) -> Metric {
235 Metric {value: value, noise: noise}
240 pub struct MetricMap(BTreeMap<String,Metric>);
242 impl Clone for MetricMap {
243 fn clone(&self) -> MetricMap {
244 let MetricMap(ref map) = *self;
245 MetricMap(map.clone())
249 // The default console test runner. It accepts the command line
250 // arguments and a vector of test_descs.
251 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
253 match parse_opts(args) {
255 Some(Err(msg)) => panic!("{:?}", msg),
258 match run_tests_console(&opts, tests) {
260 Ok(false) => panic!("Some tests failed"),
261 Err(e) => panic!("io error when running tests: {:?}", e),
265 // A variant optimized for invocation with a static test vector.
266 // This will panic (intentionally) when fed any dynamic tests, because
267 // it is copying the static values out into a dynamic vector and cannot
268 // copy dynamic values. It is doing this because from this point on
269 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
270 // semantics into parallel test runners, which in turn requires a ~[]
271 // rather than a &[].
272 pub fn test_main_static(args: env::Args, tests: &[TestDescAndFn]) {
273 let args = args.collect::<Vec<_>>();
274 let owned_tests = tests.iter().map(|t| {
276 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
277 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
278 _ => panic!("non-static tests passed to test::test_main_static")
281 test_main(&args, owned_tests)
285 pub enum ColorConfig {
291 pub struct TestOpts {
292 pub filter: Option<String>,
293 pub run_ignored: bool,
295 pub run_benchmarks: bool,
296 pub logfile: Option<PathBuf>,
298 pub color: ColorConfig,
303 fn new() -> TestOpts {
308 run_benchmarks: false,
316 /// Result of parsing the options.
317 pub type OptRes = Result<TestOpts, String>;
319 fn optgroups() -> Vec<getopts::OptGroup> {
320 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
321 getopts::optflag("", "test", "Run tests and not benchmarks"),
322 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
323 getopts::optflag("h", "help", "Display this message (longer with --help)"),
324 getopts::optopt("", "logfile", "Write logs to the specified file instead \
326 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
327 task, allow printing directly"),
328 getopts::optopt("", "color", "Configure coloring of output:
329 auto = colorize if stdout is a tty and tests are run on serially (default);
330 always = always colorize output;
331 never = never colorize output;", "auto|always|never"))
334 fn usage(binary: &str) {
335 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
338 The FILTER regex is tested against the name of all tests to run, and
339 only those tests that match are run.
341 By default, all tests are run in parallel. This can be altered with the
342 RUST_TEST_TASKS environment variable when running tests (set it to 1).
344 All tests have their standard output and standard error captured by default.
345 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
346 environment variable. Logging is not captured by default.
350 #[test] - Indicates a function is a test to be run. This function
352 #[bench] - Indicates a function is a benchmark to be run. This
353 function takes one argument (test::Bencher).
354 #[should_fail] - This function (also labeled with #[test]) will only pass if
355 the code causes a failure (an assertion failure or panic!)
356 A message may be provided, which the failure string must
357 contain: #[should_fail(expected = "foo")].
358 #[ignore] - When applied to a function which is already attributed as a
359 test, then the test runner will ignore these tests during
360 normal test runs. Running with --ignored will run these
362 usage = getopts::usage(&message, &optgroups()));
365 // Parses command line arguments into test options
366 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
367 let args_ = args.tail();
369 match getopts::getopts(args_, &optgroups()) {
371 Err(f) => return Some(Err(f.to_string()))
374 if matches.opt_present("h") { usage(&args[0]); return None; }
376 let filter = if matches.free.len() > 0 {
377 Some(matches.free[0].clone())
382 let run_ignored = matches.opt_present("ignored");
384 let logfile = matches.opt_str("logfile");
385 let logfile = logfile.map(|s| PathBuf::new(&s));
387 let run_benchmarks = matches.opt_present("bench");
388 let run_tests = ! run_benchmarks ||
389 matches.opt_present("test");
391 let mut nocapture = matches.opt_present("nocapture");
393 nocapture = env::var("RUST_TEST_NOCAPTURE").is_ok();
396 let color = match matches.opt_str("color").as_ref().map(|s| &**s) {
397 Some("auto") | None => AutoColor,
398 Some("always") => AlwaysColor,
399 Some("never") => NeverColor,
401 Some(v) => return Some(Err(format!("argument for --color must be \
402 auto, always, or never (was {})",
406 let test_opts = TestOpts {
408 run_ignored: run_ignored,
409 run_tests: run_tests,
410 run_benchmarks: run_benchmarks,
412 nocapture: nocapture,
419 #[derive(Clone, PartialEq)]
420 pub struct BenchSamples {
421 ns_iter_summ: stats::Summary<f64>,
425 #[derive(Clone, PartialEq)]
426 pub enum TestResult {
430 TrMetrics(MetricMap),
431 TrBench(BenchSamples),
434 unsafe impl Send for TestResult {}
436 enum OutputLocation<T> {
437 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
441 struct ConsoleTestState<T> {
442 log_out: Option<File>,
443 out: OutputLocation<T>,
451 failures: Vec<(TestDesc, Vec<u8> )> ,
452 max_name_len: uint, // number of columns to fill when aligning names
455 fn new2old(new: io::Error) -> old_io::IoError {
457 kind: old_io::OtherIoError,
459 detail: Some(new.to_string()),
463 impl<T: Writer> ConsoleTestState<T> {
464 pub fn new(opts: &TestOpts,
465 _: Option<T>) -> old_io::IoResult<ConsoleTestState<StdWriter>> {
466 let log_out = match opts.logfile {
467 Some(ref path) => Some(try!(File::create(path).map_err(new2old))),
470 let out = match term::stdout() {
471 None => Raw(old_io::stdio::stdout_raw()),
475 Ok(ConsoleTestState {
478 use_color: use_color(opts),
484 metrics: MetricMap::new(),
485 failures: Vec::new(),
490 pub fn write_ok(&mut self) -> old_io::IoResult<()> {
491 self.write_pretty("ok", term::color::GREEN)
494 pub fn write_failed(&mut self) -> old_io::IoResult<()> {
495 self.write_pretty("FAILED", term::color::RED)
498 pub fn write_ignored(&mut self) -> old_io::IoResult<()> {
499 self.write_pretty("ignored", term::color::YELLOW)
502 pub fn write_metric(&mut self) -> old_io::IoResult<()> {
503 self.write_pretty("metric", term::color::CYAN)
506 pub fn write_bench(&mut self) -> old_io::IoResult<()> {
507 self.write_pretty("bench", term::color::CYAN)
510 pub fn write_pretty(&mut self,
512 color: term::color::Color) -> old_io::IoResult<()> {
514 Pretty(ref mut term) => {
516 try!(term.fg(color));
518 try!(term.write_all(word.as_bytes()));
524 Raw(ref mut stdout) => {
525 try!(stdout.write_all(word.as_bytes()));
531 pub fn write_plain(&mut self, s: &str) -> old_io::IoResult<()> {
533 Pretty(ref mut term) => {
534 try!(term.write_all(s.as_bytes()));
537 Raw(ref mut stdout) => {
538 try!(stdout.write_all(s.as_bytes()));
544 pub fn write_run_start(&mut self, len: uint) -> old_io::IoResult<()> {
546 let noun = if len != 1 { "tests" } else { "test" };
547 self.write_plain(&format!("\nrunning {} {}\n", len, noun))
550 pub fn write_test_start(&mut self, test: &TestDesc,
551 align: NamePadding) -> old_io::IoResult<()> {
552 let name = test.padded_name(self.max_name_len, align);
553 self.write_plain(&format!("test {} ... ", name))
556 pub fn write_result(&mut self, result: &TestResult) -> old_io::IoResult<()> {
558 TrOk => self.write_ok(),
559 TrFailed => self.write_failed(),
560 TrIgnored => self.write_ignored(),
561 TrMetrics(ref mm) => {
562 try!(self.write_metric());
563 self.write_plain(&format!(": {}", mm.fmt_metrics()))
566 try!(self.write_bench());
568 try!(self.write_plain(&format!(": {}", fmt_bench_samples(bs))));
573 self.write_plain("\n")
576 pub fn write_log(&mut self, test: &TestDesc,
577 result: &TestResult) -> io::Result<()> {
581 let s = format!("{} {}\n", match *result {
582 TrOk => "ok".to_string(),
583 TrFailed => "failed".to_string(),
584 TrIgnored => "ignored".to_string(),
585 TrMetrics(ref mm) => mm.fmt_metrics(),
586 TrBench(ref bs) => fmt_bench_samples(bs)
588 o.write_all(s.as_bytes())
593 pub fn write_failures(&mut self) -> old_io::IoResult<()> {
594 try!(self.write_plain("\nfailures:\n"));
595 let mut failures = Vec::new();
596 let mut fail_out = String::new();
597 for &(ref f, ref stdout) in &self.failures {
598 failures.push(f.name.to_string());
599 if stdout.len() > 0 {
600 fail_out.push_str(&format!("---- {} stdout ----\n\t", f.name));
601 let output = String::from_utf8_lossy(stdout);
602 fail_out.push_str(&output);
603 fail_out.push_str("\n");
606 if fail_out.len() > 0 {
607 try!(self.write_plain("\n"));
608 try!(self.write_plain(&fail_out));
611 try!(self.write_plain("\nfailures:\n"));
613 for name in &failures {
614 try!(self.write_plain(&format!(" {}\n", name)));
619 pub fn write_run_finish(&mut self) -> old_io::IoResult<bool> {
620 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
622 let success = self.failed == 0;
624 try!(self.write_failures());
627 try!(self.write_plain("\ntest result: "));
629 // There's no parallelism at this point so it's safe to use color
630 try!(self.write_ok());
632 try!(self.write_failed());
634 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
635 self.passed, self.failed, self.ignored, self.measured);
636 try!(self.write_plain(&s));
641 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
643 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
644 bs.ns_iter_summ.median as uint,
645 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
648 format!("{:>9} ns/iter (+/- {})",
649 bs.ns_iter_summ.median as uint,
650 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
654 // A simple console test runner
655 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> old_io::IoResult<bool> {
657 fn callback<T: Writer>(event: &TestEvent,
658 st: &mut ConsoleTestState<T>) -> old_io::IoResult<()> {
659 match (*event).clone() {
660 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
661 TeWait(ref test, padding) => st.write_test_start(test, padding),
662 TeResult(test, result, stdout) => {
663 try!(st.write_log(&test, &result).map_err(new2old));
664 try!(st.write_result(&result));
666 TrOk => st.passed += 1,
667 TrIgnored => st.ignored += 1,
669 let tname = test.name;
670 let MetricMap(mm) = mm;
673 .insert_metric(&format!("{}.{}",
682 st.metrics.insert_metric(test.name.as_slice(),
683 bs.ns_iter_summ.median,
684 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
689 st.failures.push((test, stdout));
697 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
698 fn len_if_padded(t: &TestDescAndFn) -> uint {
699 match t.testfn.padding() {
701 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
704 match tests.iter().max_by(|t|len_if_padded(*t)) {
706 let n = t.desc.name.as_slice();
707 st.max_name_len = n.as_slice().len();
711 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
712 return st.write_run_finish();
716 fn should_sort_failures_before_printing_them() {
717 let test_a = TestDesc {
718 name: StaticTestName("a"),
720 should_fail: ShouldFail::No
723 let test_b = TestDesc {
724 name: StaticTestName("b"),
726 should_fail: ShouldFail::No
729 let mut st = ConsoleTestState {
731 out: Raw(Vec::new()),
739 metrics: MetricMap::new(),
740 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
743 st.write_failures().unwrap();
744 let s = match st.out {
745 Raw(ref m) => String::from_utf8_lossy(&m[..]),
746 Pretty(_) => unreachable!()
749 let apos = s.find("a").unwrap();
750 let bpos = s.find("b").unwrap();
751 assert!(apos < bpos);
754 fn use_color(opts: &TestOpts) -> bool {
756 AutoColor => get_concurrency() == 1 && old_io::stdout().get_ref().isatty(),
764 TeFiltered(Vec<TestDesc> ),
765 TeWait(TestDesc, NamePadding),
766 TeResult(TestDesc, TestResult, Vec<u8> ),
769 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
772 fn run_tests<F>(opts: &TestOpts,
773 tests: Vec<TestDescAndFn> ,
774 mut callback: F) -> old_io::IoResult<()> where
775 F: FnMut(TestEvent) -> old_io::IoResult<()>,
777 let filtered_tests = filter_tests(opts, tests);
778 let filtered_descs = filtered_tests.iter()
779 .map(|t| t.desc.clone())
782 try!(callback(TeFiltered(filtered_descs)));
784 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
785 filtered_tests.into_iter().partition(|e| {
787 StaticTestFn(_) | DynTestFn(_) => true,
792 // It's tempting to just spawn all the tests at once, but since we have
793 // many tests that run in other processes we would be making a big mess.
794 let concurrency = get_concurrency();
796 let mut remaining = filtered_tests;
800 let (tx, rx) = channel::<MonitorMsg>();
802 while pending > 0 || !remaining.is_empty() {
803 while pending < concurrency && !remaining.is_empty() {
804 let test = remaining.pop().unwrap();
805 if concurrency == 1 {
806 // We are doing one test at a time so we can print the name
807 // of the test before we run it. Useful for debugging tests
808 // that hang forever.
809 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
811 run_test(opts, !opts.run_tests, test, tx.clone());
815 let (desc, result, stdout) = rx.recv().unwrap();
816 if concurrency != 1 {
817 try!(callback(TeWait(desc.clone(), PadNone)));
819 try!(callback(TeResult(desc, result, stdout)));
823 // All benchmarks run at the end, in serial.
824 // (this includes metric fns)
825 for b in filtered_benchs_and_metrics {
826 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
827 run_test(opts, !opts.run_benchmarks, b, tx.clone());
828 let (test, result, stdout) = rx.recv().unwrap();
829 try!(callback(TeResult(test, result, stdout)));
834 fn get_concurrency() -> uint {
836 match env::var("RUST_TEST_TASKS") {
838 let opt_n: Option<uint> = s.parse().ok();
840 Some(n) if n > 0 => n,
841 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
845 rt::default_sched_threads()
850 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
851 let mut filtered = tests;
853 // Remove tests that don't match the test filter
854 filtered = match opts.filter {
856 Some(ref filter) => {
857 filtered.into_iter().filter(|test| {
858 test.desc.name.as_slice().contains(&filter[..])
863 // Maybe pull out the ignored test and unignore them
864 filtered = if !opts.run_ignored {
867 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
868 if test.desc.ignore {
869 let TestDescAndFn {desc, testfn} = test;
871 desc: TestDesc {ignore: false, ..desc},
878 filtered.into_iter().filter_map(|x| filter(x)).collect()
881 // Sort the tests alphabetically
882 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
887 pub fn run_test(opts: &TestOpts,
890 monitor_ch: Sender<MonitorMsg>) {
892 let TestDescAndFn {desc, testfn} = test;
894 if force_ignore || desc.ignore {
895 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
899 fn run_test_inner(desc: TestDesc,
900 monitor_ch: Sender<MonitorMsg>,
902 testfn: Thunk<'static>) {
903 thread::spawn(move || {
904 let (tx, rx) = channel();
905 let mut reader = ChanReader::new(rx);
906 let stdout = ChanWriter::new(tx.clone());
907 let stderr = ChanWriter::new(tx);
908 let mut cfg = thread::Builder::new().name(match desc.name {
909 DynTestName(ref name) => name.clone().to_string(),
910 StaticTestName(name) => name.to_string(),
913 drop((stdout, stderr));
915 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
916 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
919 let result_guard = cfg.spawn(move || { testfn.invoke(()) }).unwrap();
920 let stdout = reader.read_to_end().unwrap().into_iter().collect();
921 let test_result = calc_result(&desc, result_guard.join());
922 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
927 DynBenchFn(bencher) => {
928 let bs = ::bench::benchmark(|harness| bencher.run(harness));
929 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
932 StaticBenchFn(benchfn) => {
933 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
934 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
938 let mut mm = MetricMap::new();
940 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
943 StaticMetricFn(f) => {
944 let mut mm = MetricMap::new();
946 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
949 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
950 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
951 Thunk::new(move|| f()))
955 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
956 match (&desc.should_fail, task_result) {
957 (&ShouldFail::No, Ok(())) |
958 (&ShouldFail::Yes(None), Err(_)) => TrOk,
959 (&ShouldFail::Yes(Some(msg)), Err(ref err))
960 if err.downcast_ref::<String>()
962 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
963 .map(|e| e.contains(msg))
964 .unwrap_or(false) => TrOk,
971 pub fn new() -> MetricMap {
972 MetricMap(BTreeMap::new())
975 /// Insert a named `value` (+/- `noise`) metric into the map. The value
976 /// must be non-negative. The `noise` indicates the uncertainty of the
977 /// metric, which doubles as the "noise range" of acceptable
978 /// pairwise-regressions on this named value, when comparing from one
979 /// metric to the next using `compare_to_old`.
981 /// If `noise` is positive, then it means this metric is of a value
982 /// you want to see grow smaller, so a change larger than `noise` in the
983 /// positive direction represents a regression.
985 /// If `noise` is negative, then it means this metric is of a value
986 /// you want to see grow larger, so a change larger than `noise` in the
987 /// negative direction represents a regression.
988 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
993 let MetricMap(ref mut map) = *self;
994 map.insert(name.to_string(), m);
997 pub fn fmt_metrics(&self) -> String {
998 let MetricMap(ref mm) = *self;
999 let v : Vec<String> = mm.iter()
1000 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
1001 v.value as f64, v.noise as f64))
1010 /// A function that is opaque to the optimizer, to allow benchmarks to
1011 /// pretend to use outputs to assist in avoiding dead-code
1014 /// This function is a no-op, and does not even read from `dummy`.
1015 pub fn black_box<T>(dummy: T) -> T {
1016 // we need to "use" the argument in some way LLVM can't
1018 unsafe {asm!("" : : "r"(&dummy))}
1024 /// Callback for benchmark functions to run in their body.
1025 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1026 self.dur = Duration::span(|| {
1027 let k = self.iterations;
1034 pub fn ns_elapsed(&mut self) -> u64 {
1035 self.dur.num_nanoseconds().unwrap() as u64
1038 pub fn ns_per_iter(&mut self) -> u64 {
1039 if self.iterations == 0 {
1042 self.ns_elapsed() / cmp::max(self.iterations, 1)
1046 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1047 self.iterations = n;
1051 // This is a more statistics-driven benchmark algorithm
1052 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1053 // Initial bench run to get ballpark figure.
1055 self.bench_n(n, |x| f(x));
1057 // Try to estimate iter count for 1ms falling back to 1m
1058 // iterations if first run took < 1ns.
1059 if self.ns_per_iter() == 0 {
1062 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1064 // if the first run took more than 1ms we don't want to just
1065 // be left doing 0 iterations on every loop. The unfortunate
1066 // side effect of not being able to do as many runs is
1067 // automatically handled by the statistical analysis below
1068 // (i.e. larger error bars).
1069 if n == 0 { n = 1; }
1071 let mut total_run = Duration::nanoseconds(0);
1072 let samples : &mut [f64] = &mut [0.0_f64; 50];
1074 let mut summ = None;
1075 let mut summ5 = None;
1077 let loop_run = Duration::span(|| {
1079 for p in &mut *samples {
1080 self.bench_n(n, |x| f(x));
1081 *p = self.ns_per_iter() as f64;
1084 stats::winsorize(samples, 5.0);
1085 summ = Some(stats::Summary::new(samples));
1087 for p in &mut *samples {
1088 self.bench_n(5 * n, |x| f(x));
1089 *p = self.ns_per_iter() as f64;
1092 stats::winsorize(samples, 5.0);
1093 summ5 = Some(stats::Summary::new(samples));
1095 let summ = summ.unwrap();
1096 let summ5 = summ5.unwrap();
1098 // If we've run for 100ms and seem to have converged to a
1100 if loop_run.num_milliseconds() > 100 &&
1101 summ.median_abs_dev_pct < 1.0 &&
1102 summ.median - summ5.median < summ5.median_abs_dev {
1106 total_run = total_run + loop_run;
1107 // Longest we ever run for is 3s.
1108 if total_run.num_seconds() > 3 {
1119 use std::time::Duration;
1120 use super::{Bencher, BenchSamples};
1122 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1123 let mut bs = Bencher {
1125 dur: Duration::nanoseconds(0),
1129 let ns_iter_summ = bs.auto_bench(f);
1131 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1132 let iter_s = 1_000_000_000 / ns_iter;
1133 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1136 ns_iter_summ: ns_iter_summ,
1144 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1145 TestDesc, TestDescAndFn, TestOpts, run_test,
1147 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1148 use std::thunk::Thunk;
1149 use std::sync::mpsc::channel;
1152 pub fn do_not_run_ignored_tests() {
1153 fn f() { panic!(); }
1154 let desc = TestDescAndFn {
1156 name: StaticTestName("whatever"),
1158 should_fail: ShouldFail::No,
1160 testfn: DynTestFn(Thunk::new(move|| f())),
1162 let (tx, rx) = channel();
1163 run_test(&TestOpts::new(), false, desc, tx);
1164 let (_, res, _) = rx.recv().unwrap();
1165 assert!(res != TrOk);
1169 pub fn ignored_tests_result_in_ignored() {
1171 let desc = TestDescAndFn {
1173 name: StaticTestName("whatever"),
1175 should_fail: ShouldFail::No,
1177 testfn: DynTestFn(Thunk::new(move|| f())),
1179 let (tx, rx) = channel();
1180 run_test(&TestOpts::new(), false, desc, tx);
1181 let (_, res, _) = rx.recv().unwrap();
1182 assert!(res == TrIgnored);
1186 fn test_should_fail() {
1187 fn f() { panic!(); }
1188 let desc = TestDescAndFn {
1190 name: StaticTestName("whatever"),
1192 should_fail: ShouldFail::Yes(None)
1194 testfn: DynTestFn(Thunk::new(move|| f())),
1196 let (tx, rx) = channel();
1197 run_test(&TestOpts::new(), false, desc, tx);
1198 let (_, res, _) = rx.recv().unwrap();
1199 assert!(res == TrOk);
1203 fn test_should_fail_good_message() {
1204 fn f() { panic!("an error message"); }
1205 let desc = TestDescAndFn {
1207 name: StaticTestName("whatever"),
1209 should_fail: ShouldFail::Yes(Some("error message"))
1211 testfn: DynTestFn(Thunk::new(move|| f())),
1213 let (tx, rx) = channel();
1214 run_test(&TestOpts::new(), false, desc, tx);
1215 let (_, res, _) = rx.recv().unwrap();
1216 assert!(res == TrOk);
1220 fn test_should_fail_bad_message() {
1221 fn f() { panic!("an error message"); }
1222 let desc = TestDescAndFn {
1224 name: StaticTestName("whatever"),
1226 should_fail: ShouldFail::Yes(Some("foobar"))
1228 testfn: DynTestFn(Thunk::new(move|| f())),
1230 let (tx, rx) = channel();
1231 run_test(&TestOpts::new(), false, desc, tx);
1232 let (_, res, _) = rx.recv().unwrap();
1233 assert!(res == TrFailed);
1237 fn test_should_fail_but_succeeds() {
1239 let desc = TestDescAndFn {
1241 name: StaticTestName("whatever"),
1243 should_fail: ShouldFail::Yes(None)
1245 testfn: DynTestFn(Thunk::new(move|| f())),
1247 let (tx, rx) = channel();
1248 run_test(&TestOpts::new(), false, desc, tx);
1249 let (_, res, _) = rx.recv().unwrap();
1250 assert!(res == TrFailed);
1254 fn parse_ignored_flag() {
1255 let args = vec!("progname".to_string(),
1256 "filter".to_string(),
1257 "--ignored".to_string());
1258 let opts = match parse_opts(&args) {
1260 _ => panic!("Malformed arg in parse_ignored_flag")
1262 assert!((opts.run_ignored));
1266 pub fn filter_for_ignored_option() {
1267 // When we run ignored tests the test filter should filter out all the
1268 // unignored tests and flip the ignore flag on the rest to false
1270 let mut opts = TestOpts::new();
1271 opts.run_tests = true;
1272 opts.run_ignored = true;
1277 name: StaticTestName("1"),
1279 should_fail: ShouldFail::No,
1281 testfn: DynTestFn(Thunk::new(move|| {})),
1285 name: StaticTestName("2"),
1287 should_fail: ShouldFail::No,
1289 testfn: DynTestFn(Thunk::new(move|| {})),
1291 let filtered = filter_tests(&opts, tests);
1293 assert_eq!(filtered.len(), 1);
1294 assert_eq!(filtered[0].desc.name.to_string(),
1296 assert!(filtered[0].desc.ignore == false);
1300 pub fn sort_tests() {
1301 let mut opts = TestOpts::new();
1302 opts.run_tests = true;
1305 vec!("sha1::test".to_string(),
1306 "int::test_to_str".to_string(),
1307 "int::test_pow".to_string(),
1308 "test::do_not_run_ignored_tests".to_string(),
1309 "test::ignored_tests_result_in_ignored".to_string(),
1310 "test::first_free_arg_should_be_a_filter".to_string(),
1311 "test::parse_ignored_flag".to_string(),
1312 "test::filter_for_ignored_option".to_string(),
1313 "test::sort_tests".to_string());
1317 let mut tests = Vec::new();
1318 for name in &names {
1319 let test = TestDescAndFn {
1321 name: DynTestName((*name).clone()),
1323 should_fail: ShouldFail::No,
1325 testfn: DynTestFn(Thunk::new(testfn)),
1331 let filtered = filter_tests(&opts, tests);
1334 vec!("int::test_pow".to_string(),
1335 "int::test_to_str".to_string(),
1336 "sha1::test".to_string(),
1337 "test::do_not_run_ignored_tests".to_string(),
1338 "test::filter_for_ignored_option".to_string(),
1339 "test::first_free_arg_should_be_a_filter".to_string(),
1340 "test::ignored_tests_result_in_ignored".to_string(),
1341 "test::parse_ignored_flag".to_string(),
1342 "test::sort_tests".to_string());
1344 for (a, b) in expected.iter().zip(filtered.iter()) {
1345 assert!(*a == b.desc.name.to_string());
1350 pub fn test_metricmap_compare() {
1351 let mut m1 = MetricMap::new();
1352 let mut m2 = MetricMap::new();
1353 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1354 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1356 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1357 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1359 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1360 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1362 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1363 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1365 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1366 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1368 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1369 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);