1 // Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
2 // file at the top-level directory of this distribution and at
3 // http://rust-lang.org/COPYRIGHT.
5 // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
6 // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
7 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
8 // option. This file may not be copied, modified, or distributed
9 // except according to those terms.
11 //! Support code for rustc's built in unit-test and micro-benchmarking
14 //! Almost all user code will only be interested in `Bencher` and
15 //! `black_box`. All other interactions (such as writing tests and
16 //! benchmarks themselves) should be done via the `#[test]` and
17 //! `#[bench]` attributes.
19 //! See the [Testing Guide](../guide-testing.html) for more details.
21 // Currently, not much of this is meant for users. It is intended to
22 // support the simplest interface possible for representing and
23 // running tests while providing a base that other test frameworks may
26 #![crate_name = "test"]
27 #![unstable(feature = "test")]
29 #![crate_type = "rlib"]
30 #![crate_type = "dylib"]
31 #![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
32 html_favicon_url = "http://www.rust-lang.org/favicon.ico",
33 html_root_url = "http://doc.rust-lang.org/nightly/")]
35 #![cfg_attr(not(stage0), allow(unused_mut))] // NOTE: remove after stage0 snap
37 #![feature(asm, slicing_syntax)]
38 #![feature(box_syntax)]
39 #![feature(collections)]
46 #![feature(rustc_private)]
47 #![feature(staged_api)]
51 extern crate serialize;
52 extern crate "serialize" as rustc_serialize;
55 pub use self::TestFn::*;
56 pub use self::ColorConfig::*;
57 pub use self::TestResult::*;
58 pub use self::TestName::*;
59 use self::TestEvent::*;
60 use self::NamePadding::*;
61 use self::OutputLocation::*;
64 use getopts::{OptGroup, optflag, optopt};
65 use serialize::Encodable;
67 use term::color::{Color, RED, YELLOW, GREEN, CYAN};
71 use std::collections::BTreeMap;
73 use std::old_io::stdio::StdWriter;
74 use std::old_io::{File, ChanReader, ChanWriter};
76 use std::iter::repeat;
77 use std::num::{Float, Int};
79 use std::sync::mpsc::{channel, Sender};
80 use std::thread::{self, Thread};
81 use std::thunk::{Thunk, Invoke};
82 use std::time::Duration;
84 // to be used by rustc to compile tests in libtest
86 pub use {Bencher, TestName, TestResult, TestDesc,
87 TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
89 StaticTestFn, StaticTestName, DynTestName, DynTestFn,
90 run_test, test_main, test_main_static, filter_tests,
91 parse_opts, StaticBenchFn, ShouldFail};
96 // The name of a test. By convention this follows the rules for rust
97 // paths; i.e. it should be a series of identifiers separated by double
98 // colons. This way if some test runner wants to arrange the tests
99 // hierarchically it may.
101 #[derive(Clone, PartialEq, Eq, Hash, Debug)]
103 StaticTestName(&'static str),
107 fn as_slice<'a>(&'a self) -> &'a str {
109 StaticTestName(s) => s,
110 DynTestName(ref s) => s.as_slice()
114 impl fmt::Display for TestName {
115 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
116 fmt::Display::fmt(self.as_slice(), f)
120 #[derive(Clone, Copy)]
128 fn padded_name(&self, column_count: uint, align: NamePadding) -> String {
129 let mut name = String::from_str(self.name.as_slice());
130 let fill = column_count.saturating_sub(name.len());
131 let mut pad = repeat(" ").take(fill).collect::<String>();
135 pad.push_str(name.as_slice());
139 name.push_str(pad.as_slice());
146 /// Represents a benchmark function.
147 pub trait TDynBenchFn {
148 fn run(&self, harness: &mut Bencher);
151 // A function that runs a test. If the function returns successfully,
152 // the test succeeds; if the function panics then the test fails. We
153 // may need to come up with a more clever definition of test in order
154 // to support isolation of tests into tasks.
157 StaticBenchFn(fn(&mut Bencher)),
158 StaticMetricFn(fn(&mut MetricMap)),
160 DynMetricFn(Box<for<'a> Invoke<&'a mut MetricMap>+'static>),
161 DynBenchFn(Box<TDynBenchFn+'static>)
165 fn padding(&self) -> NamePadding {
167 &StaticTestFn(..) => PadNone,
168 &StaticBenchFn(..) => PadOnRight,
169 &StaticMetricFn(..) => PadOnRight,
170 &DynTestFn(..) => PadNone,
171 &DynMetricFn(..) => PadOnRight,
172 &DynBenchFn(..) => PadOnRight,
177 impl fmt::Debug for TestFn {
178 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
179 f.write_str(match *self {
180 StaticTestFn(..) => "StaticTestFn(..)",
181 StaticBenchFn(..) => "StaticBenchFn(..)",
182 StaticMetricFn(..) => "StaticMetricFn(..)",
183 DynTestFn(..) => "DynTestFn(..)",
184 DynMetricFn(..) => "DynMetricFn(..)",
185 DynBenchFn(..) => "DynBenchFn(..)"
190 /// Manager of the benchmarking runs.
192 /// This is fed into functions marked with `#[bench]` to allow for
193 /// set-up & tear-down before running a piece of code repeatedly via a
202 #[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
203 pub enum ShouldFail {
205 Yes(Option<&'static str>)
208 // The definition of a single test. A test runner will run a list of
210 #[derive(Clone, Debug, PartialEq, Eq, Hash)]
211 pub struct TestDesc {
214 pub should_fail: ShouldFail,
217 unsafe impl Send for TestDesc {}
220 pub struct TestDescAndFn {
225 #[derive(Clone, RustcEncodable, RustcDecodable, PartialEq, Debug, Copy)]
232 pub fn new(value: f64, noise: f64) -> Metric {
233 Metric {value: value, noise: noise}
238 pub struct MetricMap(BTreeMap<String,Metric>);
240 impl Clone for MetricMap {
241 fn clone(&self) -> MetricMap {
242 let MetricMap(ref map) = *self;
243 MetricMap(map.clone())
247 // The default console test runner. It accepts the command line
248 // arguments and a vector of test_descs.
249 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn> ) {
251 match parse_opts(args) {
253 Some(Err(msg)) => panic!("{:?}", msg),
256 match run_tests_console(&opts, tests) {
258 Ok(false) => panic!("Some tests failed"),
259 Err(e) => panic!("io error when running tests: {:?}", e),
263 // A variant optimized for invocation with a static test vector.
264 // This will panic (intentionally) when fed any dynamic tests, because
265 // it is copying the static values out into a dynamic vector and cannot
266 // copy dynamic values. It is doing this because from this point on
267 // a ~[TestDescAndFn] is used in order to effect ownership-transfer
268 // semantics into parallel test runners, which in turn requires a ~[]
269 // rather than a &[].
270 pub fn test_main_static(args: &[String], tests: &[TestDescAndFn]) {
271 let owned_tests = tests.iter().map(|t| {
273 StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
274 StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
275 _ => panic!("non-static tests passed to test::test_main_static")
278 test_main(args, owned_tests)
282 pub enum ColorConfig {
288 pub struct TestOpts {
289 pub filter: Option<String>,
290 pub run_ignored: bool,
292 pub run_benchmarks: bool,
293 pub logfile: Option<Path>,
295 pub color: ColorConfig,
300 fn new() -> TestOpts {
305 run_benchmarks: false,
313 /// Result of parsing the options.
314 pub type OptRes = Result<TestOpts, String>;
316 fn optgroups() -> Vec<getopts::OptGroup> {
317 vec!(getopts::optflag("", "ignored", "Run ignored tests"),
318 getopts::optflag("", "test", "Run tests and not benchmarks"),
319 getopts::optflag("", "bench", "Run benchmarks instead of tests"),
320 getopts::optflag("h", "help", "Display this message (longer with --help)"),
321 getopts::optopt("", "logfile", "Write logs to the specified file instead \
323 getopts::optflag("", "nocapture", "don't capture stdout/stderr of each \
324 task, allow printing directly"),
325 getopts::optopt("", "color", "Configure coloring of output:
326 auto = colorize if stdout is a tty and tests are run on serially (default);
327 always = always colorize output;
328 never = never colorize output;", "auto|always|never"))
331 fn usage(binary: &str) {
332 let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
335 The FILTER regex is tested against the name of all tests to run, and
336 only those tests that match are run.
338 By default, all tests are run in parallel. This can be altered with the
339 RUST_TEST_TASKS environment variable when running tests (set it to 1).
341 All tests have their standard output and standard error captured by default.
342 This can be overridden with the --nocapture flag or the RUST_TEST_NOCAPTURE=1
343 environment variable. Logging is not captured by default.
347 #[test] - Indicates a function is a test to be run. This function
349 #[bench] - Indicates a function is a benchmark to be run. This
350 function takes one argument (test::Bencher).
351 #[should_fail] - This function (also labeled with #[test]) will only pass if
352 the code causes a failure (an assertion failure or panic!)
353 A message may be provided, which the failure string must
354 contain: #[should_fail(expected = "foo")].
355 #[ignore] - When applied to a function which is already attributed as a
356 test, then the test runner will ignore these tests during
357 normal test runs. Running with --ignored will run these
359 usage = getopts::usage(message.as_slice(),
360 optgroups().as_slice()));
363 // Parses command line arguments into test options
364 pub fn parse_opts(args: &[String]) -> Option<OptRes> {
365 let args_ = args.tail();
367 match getopts::getopts(args_.as_slice(), optgroups().as_slice()) {
369 Err(f) => return Some(Err(f.to_string()))
372 if matches.opt_present("h") { usage(args[0].as_slice()); return None; }
374 let filter = if matches.free.len() > 0 {
375 Some(matches.free[0].clone())
380 let run_ignored = matches.opt_present("ignored");
382 let logfile = matches.opt_str("logfile");
383 let logfile = logfile.map(|s| Path::new(s));
385 let run_benchmarks = matches.opt_present("bench");
386 let run_tests = ! run_benchmarks ||
387 matches.opt_present("test");
389 let mut nocapture = matches.opt_present("nocapture");
391 nocapture = os::getenv("RUST_TEST_NOCAPTURE").is_some();
394 let color = match matches.opt_str("color").as_ref().map(|s| s.as_slice()) {
395 Some("auto") | None => AutoColor,
396 Some("always") => AlwaysColor,
397 Some("never") => NeverColor,
399 Some(v) => return Some(Err(format!("argument for --color must be \
400 auto, always, or never (was {})",
404 let test_opts = TestOpts {
406 run_ignored: run_ignored,
407 run_tests: run_tests,
408 run_benchmarks: run_benchmarks,
410 nocapture: nocapture,
417 #[derive(Clone, PartialEq)]
418 pub struct BenchSamples {
419 ns_iter_summ: stats::Summary<f64>,
423 #[derive(Clone, PartialEq)]
424 pub enum TestResult {
428 TrMetrics(MetricMap),
429 TrBench(BenchSamples),
432 unsafe impl Send for TestResult {}
434 enum OutputLocation<T> {
435 Pretty(Box<term::Terminal<term::WriterWrapper> + Send>),
439 struct ConsoleTestState<T> {
440 log_out: Option<File>,
441 out: OutputLocation<T>,
449 failures: Vec<(TestDesc, Vec<u8> )> ,
450 max_name_len: uint, // number of columns to fill when aligning names
453 impl<T: Writer> ConsoleTestState<T> {
454 pub fn new(opts: &TestOpts,
455 _: Option<T>) -> old_io::IoResult<ConsoleTestState<StdWriter>> {
456 let log_out = match opts.logfile {
457 Some(ref path) => Some(try!(File::create(path))),
460 let out = match term::stdout() {
461 None => Raw(old_io::stdio::stdout_raw()),
465 Ok(ConsoleTestState {
468 use_color: use_color(opts),
474 metrics: MetricMap::new(),
475 failures: Vec::new(),
480 pub fn write_ok(&mut self) -> old_io::IoResult<()> {
481 self.write_pretty("ok", term::color::GREEN)
484 pub fn write_failed(&mut self) -> old_io::IoResult<()> {
485 self.write_pretty("FAILED", term::color::RED)
488 pub fn write_ignored(&mut self) -> old_io::IoResult<()> {
489 self.write_pretty("ignored", term::color::YELLOW)
492 pub fn write_metric(&mut self) -> old_io::IoResult<()> {
493 self.write_pretty("metric", term::color::CYAN)
496 pub fn write_bench(&mut self) -> old_io::IoResult<()> {
497 self.write_pretty("bench", term::color::CYAN)
500 pub fn write_pretty(&mut self,
502 color: term::color::Color) -> old_io::IoResult<()> {
504 Pretty(ref mut term) => {
506 try!(term.fg(color));
508 try!(term.write_all(word.as_bytes()));
514 Raw(ref mut stdout) => stdout.write_all(word.as_bytes())
518 pub fn write_plain(&mut self, s: &str) -> old_io::IoResult<()> {
520 Pretty(ref mut term) => term.write_all(s.as_bytes()),
521 Raw(ref mut stdout) => stdout.write_all(s.as_bytes())
525 pub fn write_run_start(&mut self, len: uint) -> old_io::IoResult<()> {
527 let noun = if len != 1 { "tests" } else { "test" };
528 self.write_plain(format!("\nrunning {} {}\n", len, noun).as_slice())
531 pub fn write_test_start(&mut self, test: &TestDesc,
532 align: NamePadding) -> old_io::IoResult<()> {
533 let name = test.padded_name(self.max_name_len, align);
534 self.write_plain(format!("test {} ... ", name).as_slice())
537 pub fn write_result(&mut self, result: &TestResult) -> old_io::IoResult<()> {
539 TrOk => self.write_ok(),
540 TrFailed => self.write_failed(),
541 TrIgnored => self.write_ignored(),
542 TrMetrics(ref mm) => {
543 try!(self.write_metric());
544 self.write_plain(format!(": {}", mm.fmt_metrics()).as_slice())
547 try!(self.write_bench());
549 try!(self.write_plain(format!(": {}",
550 fmt_bench_samples(bs)).as_slice()));
555 self.write_plain("\n")
558 pub fn write_log(&mut self, test: &TestDesc,
559 result: &TestResult) -> old_io::IoResult<()> {
563 let s = format!("{} {}\n", match *result {
564 TrOk => "ok".to_string(),
565 TrFailed => "failed".to_string(),
566 TrIgnored => "ignored".to_string(),
567 TrMetrics(ref mm) => mm.fmt_metrics(),
568 TrBench(ref bs) => fmt_bench_samples(bs)
569 }, test.name.as_slice());
570 o.write_all(s.as_bytes())
575 pub fn write_failures(&mut self) -> old_io::IoResult<()> {
576 try!(self.write_plain("\nfailures:\n"));
577 let mut failures = Vec::new();
578 let mut fail_out = String::new();
579 for &(ref f, ref stdout) in &self.failures {
580 failures.push(f.name.to_string());
581 if stdout.len() > 0 {
582 fail_out.push_str(format!("---- {} stdout ----\n\t",
583 f.name.as_slice()).as_slice());
584 let output = String::from_utf8_lossy(stdout.as_slice());
585 fail_out.push_str(output.as_slice());
586 fail_out.push_str("\n");
589 if fail_out.len() > 0 {
590 try!(self.write_plain("\n"));
591 try!(self.write_plain(fail_out.as_slice()));
594 try!(self.write_plain("\nfailures:\n"));
596 for name in &failures {
597 try!(self.write_plain(format!(" {}\n",
598 name.as_slice()).as_slice()));
603 pub fn write_run_finish(&mut self) -> old_io::IoResult<bool> {
604 assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
606 let success = self.failed == 0u;
608 try!(self.write_failures());
611 try!(self.write_plain("\ntest result: "));
613 // There's no parallelism at this point so it's safe to use color
614 try!(self.write_ok());
616 try!(self.write_failed());
618 let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
619 self.passed, self.failed, self.ignored, self.measured);
620 try!(self.write_plain(s.as_slice()));
625 pub fn fmt_bench_samples(bs: &BenchSamples) -> String {
627 format!("{:>9} ns/iter (+/- {}) = {} MB/s",
628 bs.ns_iter_summ.median as uint,
629 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
632 format!("{:>9} ns/iter (+/- {})",
633 bs.ns_iter_summ.median as uint,
634 (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
638 // A simple console test runner
639 pub fn run_tests_console(opts: &TestOpts, tests: Vec<TestDescAndFn> ) -> old_io::IoResult<bool> {
641 fn callback<T: Writer>(event: &TestEvent,
642 st: &mut ConsoleTestState<T>) -> old_io::IoResult<()> {
643 match (*event).clone() {
644 TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
645 TeWait(ref test, padding) => st.write_test_start(test, padding),
646 TeResult(test, result, stdout) => {
647 try!(st.write_log(&test, &result));
648 try!(st.write_result(&result));
650 TrOk => st.passed += 1,
651 TrIgnored => st.ignored += 1,
653 let tname = test.name.as_slice();
654 let MetricMap(mm) = mm;
657 .insert_metric(format!("{}.{}",
666 st.metrics.insert_metric(test.name.as_slice(),
667 bs.ns_iter_summ.median,
668 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
673 st.failures.push((test, stdout));
681 let mut st = try!(ConsoleTestState::new(opts, None::<StdWriter>));
682 fn len_if_padded(t: &TestDescAndFn) -> uint {
683 match t.testfn.padding() {
685 PadOnLeft | PadOnRight => t.desc.name.as_slice().len(),
688 match tests.iter().max_by(|t|len_if_padded(*t)) {
690 let n = t.desc.name.as_slice();
691 st.max_name_len = n.len();
695 try!(run_tests(opts, tests, |x| callback(&x, &mut st)));
696 return st.write_run_finish();
700 fn should_sort_failures_before_printing_them() {
701 let test_a = TestDesc {
702 name: StaticTestName("a"),
704 should_fail: ShouldFail::No
707 let test_b = TestDesc {
708 name: StaticTestName("b"),
710 should_fail: ShouldFail::No
713 let mut st = ConsoleTestState {
715 out: Raw(Vec::new()),
723 metrics: MetricMap::new(),
724 failures: vec!((test_b, Vec::new()), (test_a, Vec::new()))
727 st.write_failures().unwrap();
728 let s = match st.out {
729 Raw(ref m) => String::from_utf8_lossy(&m[]),
730 Pretty(_) => unreachable!()
733 let apos = s.find_str("a").unwrap();
734 let bpos = s.find_str("b").unwrap();
735 assert!(apos < bpos);
738 fn use_color(opts: &TestOpts) -> bool {
740 AutoColor => get_concurrency() == 1 && old_io::stdout().get_ref().isatty(),
748 TeFiltered(Vec<TestDesc> ),
749 TeWait(TestDesc, NamePadding),
750 TeResult(TestDesc, TestResult, Vec<u8> ),
753 pub type MonitorMsg = (TestDesc, TestResult, Vec<u8> );
756 fn run_tests<F>(opts: &TestOpts,
757 tests: Vec<TestDescAndFn> ,
758 mut callback: F) -> old_io::IoResult<()> where
759 F: FnMut(TestEvent) -> old_io::IoResult<()>,
761 let filtered_tests = filter_tests(opts, tests);
762 let filtered_descs = filtered_tests.iter()
763 .map(|t| t.desc.clone())
766 try!(callback(TeFiltered(filtered_descs)));
768 let (filtered_tests, filtered_benchs_and_metrics): (Vec<_>, _) =
769 filtered_tests.into_iter().partition(|e| {
771 StaticTestFn(_) | DynTestFn(_) => true,
776 // It's tempting to just spawn all the tests at once, but since we have
777 // many tests that run in other processes we would be making a big mess.
778 let concurrency = get_concurrency();
780 let mut remaining = filtered_tests;
784 let (tx, rx) = channel::<MonitorMsg>();
786 while pending > 0 || !remaining.is_empty() {
787 while pending < concurrency && !remaining.is_empty() {
788 let test = remaining.pop().unwrap();
789 if concurrency == 1 {
790 // We are doing one test at a time so we can print the name
791 // of the test before we run it. Useful for debugging tests
792 // that hang forever.
793 try!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
795 run_test(opts, !opts.run_tests, test, tx.clone());
799 let (desc, result, stdout) = rx.recv().unwrap();
800 if concurrency != 1 {
801 try!(callback(TeWait(desc.clone(), PadNone)));
803 try!(callback(TeResult(desc, result, stdout)));
807 // All benchmarks run at the end, in serial.
808 // (this includes metric fns)
809 for b in filtered_benchs_and_metrics {
810 try!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
811 run_test(opts, !opts.run_benchmarks, b, tx.clone());
812 let (test, result, stdout) = rx.recv().unwrap();
813 try!(callback(TeResult(test, result, stdout)));
818 fn get_concurrency() -> uint {
820 match os::getenv("RUST_TEST_TASKS") {
822 let opt_n: Option<uint> = s.parse().ok();
824 Some(n) if n > 0 => n,
825 _ => panic!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
829 rt::default_sched_threads()
834 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
835 let mut filtered = tests;
837 // Remove tests that don't match the test filter
838 filtered = match opts.filter {
840 Some(ref filter) => {
841 filtered.into_iter().filter(|test| {
842 test.desc.name.as_slice().contains(&filter[])
847 // Maybe pull out the ignored test and unignore them
848 filtered = if !opts.run_ignored {
851 fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
852 if test.desc.ignore {
853 let TestDescAndFn {desc, testfn} = test;
855 desc: TestDesc {ignore: false, ..desc},
862 filtered.into_iter().filter_map(|x| filter(x)).collect()
865 // Sort the tests alphabetically
866 filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
871 pub fn run_test(opts: &TestOpts,
874 monitor_ch: Sender<MonitorMsg>) {
876 let TestDescAndFn {desc, testfn} = test;
878 if force_ignore || desc.ignore {
879 monitor_ch.send((desc, TrIgnored, Vec::new())).unwrap();
883 fn run_test_inner(desc: TestDesc,
884 monitor_ch: Sender<MonitorMsg>,
887 Thread::spawn(move || {
888 let (tx, rx) = channel();
889 let mut reader = ChanReader::new(rx);
890 let stdout = ChanWriter::new(tx.clone());
891 let stderr = ChanWriter::new(tx);
892 let mut cfg = thread::Builder::new().name(match desc.name {
893 DynTestName(ref name) => name.clone().to_string(),
894 StaticTestName(name) => name.to_string(),
897 drop((stdout, stderr));
899 cfg = cfg.stdout(box stdout as Box<Writer + Send>);
900 cfg = cfg.stderr(box stderr as Box<Writer + Send>);
903 let result_guard = cfg.scoped(move || { testfn.invoke(()) });
904 let stdout = reader.read_to_end().unwrap().into_iter().collect();
905 let test_result = calc_result(&desc, result_guard.join());
906 monitor_ch.send((desc.clone(), test_result, stdout)).unwrap();
911 DynBenchFn(bencher) => {
912 let bs = ::bench::benchmark(|harness| bencher.run(harness));
913 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
916 StaticBenchFn(benchfn) => {
917 let bs = ::bench::benchmark(|harness| (benchfn.clone())(harness));
918 monitor_ch.send((desc, TrBench(bs), Vec::new())).unwrap();
922 let mut mm = MetricMap::new();
924 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
927 StaticMetricFn(f) => {
928 let mut mm = MetricMap::new();
930 monitor_ch.send((desc, TrMetrics(mm), Vec::new())).unwrap();
933 DynTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture, f),
934 StaticTestFn(f) => run_test_inner(desc, monitor_ch, opts.nocapture,
935 Thunk::new(move|| f()))
939 fn calc_result(desc: &TestDesc, task_result: Result<(), Box<Any+Send>>) -> TestResult {
940 match (&desc.should_fail, task_result) {
941 (&ShouldFail::No, Ok(())) |
942 (&ShouldFail::Yes(None), Err(_)) => TrOk,
943 (&ShouldFail::Yes(Some(msg)), Err(ref err))
944 if err.downcast_ref::<String>()
946 .or_else(|| err.downcast_ref::<&'static str>().map(|e| *e))
947 .map(|e| e.contains(msg))
948 .unwrap_or(false) => TrOk,
955 pub fn new() -> MetricMap {
956 MetricMap(BTreeMap::new())
959 /// Insert a named `value` (+/- `noise`) metric into the map. The value
960 /// must be non-negative. The `noise` indicates the uncertainty of the
961 /// metric, which doubles as the "noise range" of acceptable
962 /// pairwise-regressions on this named value, when comparing from one
963 /// metric to the next using `compare_to_old`.
965 /// If `noise` is positive, then it means this metric is of a value
966 /// you want to see grow smaller, so a change larger than `noise` in the
967 /// positive direction represents a regression.
969 /// If `noise` is negative, then it means this metric is of a value
970 /// you want to see grow larger, so a change larger than `noise` in the
971 /// negative direction represents a regression.
972 pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
977 let MetricMap(ref mut map) = *self;
978 map.insert(name.to_string(), m);
981 pub fn fmt_metrics(&self) -> String {
982 let MetricMap(ref mm) = *self;
983 let v : Vec<String> = mm.iter()
984 .map(|(k,v)| format!("{}: {} (+/- {})", *k,
985 v.value as f64, v.noise as f64))
994 /// A function that is opaque to the optimizer, to allow benchmarks to
995 /// pretend to use outputs to assist in avoiding dead-code
998 /// This function is a no-op, and does not even read from `dummy`.
999 pub fn black_box<T>(dummy: T) -> T {
1000 // we need to "use" the argument in some way LLVM can't
1002 unsafe {asm!("" : : "r"(&dummy))}
1008 /// Callback for benchmark functions to run in their body.
1009 pub fn iter<T, F>(&mut self, mut inner: F) where F: FnMut() -> T {
1010 self.dur = Duration::span(|| {
1011 let k = self.iterations;
1018 pub fn ns_elapsed(&mut self) -> u64 {
1019 self.dur.num_nanoseconds().unwrap() as u64
1022 pub fn ns_per_iter(&mut self) -> u64 {
1023 if self.iterations == 0 {
1026 self.ns_elapsed() / cmp::max(self.iterations, 1)
1030 pub fn bench_n<F>(&mut self, n: u64, f: F) where F: FnOnce(&mut Bencher) {
1031 self.iterations = n;
1035 // This is a more statistics-driven benchmark algorithm
1036 pub fn auto_bench<F>(&mut self, mut f: F) -> stats::Summary<f64> where F: FnMut(&mut Bencher) {
1037 // Initial bench run to get ballpark figure.
1039 self.bench_n(n, |x| f(x));
1041 // Try to estimate iter count for 1ms falling back to 1m
1042 // iterations if first run took < 1ns.
1043 if self.ns_per_iter() == 0 {
1046 n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
1048 // if the first run took more than 1ms we don't want to just
1049 // be left doing 0 iterations on every loop. The unfortunate
1050 // side effect of not being able to do as many runs is
1051 // automatically handled by the statistical analysis below
1052 // (i.e. larger error bars).
1053 if n == 0 { n = 1; }
1055 let mut total_run = Duration::nanoseconds(0);
1056 let samples : &mut [f64] = &mut [0.0_f64; 50];
1058 let mut summ = None;
1059 let mut summ5 = None;
1061 let loop_run = Duration::span(|| {
1063 for p in &mut *samples {
1064 self.bench_n(n, |x| f(x));
1065 *p = self.ns_per_iter() as f64;
1068 stats::winsorize(samples, 5.0);
1069 summ = Some(stats::Summary::new(samples));
1071 for p in &mut *samples {
1072 self.bench_n(5 * n, |x| f(x));
1073 *p = self.ns_per_iter() as f64;
1076 stats::winsorize(samples, 5.0);
1077 summ5 = Some(stats::Summary::new(samples));
1079 let summ = summ.unwrap();
1080 let summ5 = summ5.unwrap();
1082 // If we've run for 100ms and seem to have converged to a
1084 if loop_run.num_milliseconds() > 100 &&
1085 summ.median_abs_dev_pct < 1.0 &&
1086 summ.median - summ5.median < summ5.median_abs_dev {
1090 total_run = total_run + loop_run;
1091 // Longest we ever run for is 3s.
1092 if total_run.num_seconds() > 3 {
1103 use std::time::Duration;
1104 use super::{Bencher, BenchSamples};
1106 pub fn benchmark<F>(f: F) -> BenchSamples where F: FnMut(&mut Bencher) {
1107 let mut bs = Bencher {
1109 dur: Duration::nanoseconds(0),
1113 let ns_iter_summ = bs.auto_bench(f);
1115 let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
1116 let iter_s = 1_000_000_000 / ns_iter;
1117 let mb_s = (bs.bytes * iter_s) / 1_000_000;
1120 ns_iter_summ: ns_iter_summ,
1128 use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
1129 TestDesc, TestDescAndFn, TestOpts, run_test,
1131 StaticTestName, DynTestName, DynTestFn, ShouldFail};
1132 use std::thunk::Thunk;
1133 use std::sync::mpsc::channel;
1136 pub fn do_not_run_ignored_tests() {
1137 fn f() { panic!(); }
1138 let desc = TestDescAndFn {
1140 name: StaticTestName("whatever"),
1142 should_fail: ShouldFail::No,
1144 testfn: DynTestFn(Thunk::new(move|| f())),
1146 let (tx, rx) = channel();
1147 run_test(&TestOpts::new(), false, desc, tx);
1148 let (_, res, _) = rx.recv().unwrap();
1149 assert!(res != TrOk);
1153 pub fn ignored_tests_result_in_ignored() {
1155 let desc = TestDescAndFn {
1157 name: StaticTestName("whatever"),
1159 should_fail: ShouldFail::No,
1161 testfn: DynTestFn(Thunk::new(move|| f())),
1163 let (tx, rx) = channel();
1164 run_test(&TestOpts::new(), false, desc, tx);
1165 let (_, res, _) = rx.recv().unwrap();
1166 assert!(res == TrIgnored);
1170 fn test_should_fail() {
1171 fn f() { panic!(); }
1172 let desc = TestDescAndFn {
1174 name: StaticTestName("whatever"),
1176 should_fail: ShouldFail::Yes(None)
1178 testfn: DynTestFn(Thunk::new(move|| f())),
1180 let (tx, rx) = channel();
1181 run_test(&TestOpts::new(), false, desc, tx);
1182 let (_, res, _) = rx.recv().unwrap();
1183 assert!(res == TrOk);
1187 fn test_should_fail_good_message() {
1188 fn f() { panic!("an error message"); }
1189 let desc = TestDescAndFn {
1191 name: StaticTestName("whatever"),
1193 should_fail: ShouldFail::Yes(Some("error message"))
1195 testfn: DynTestFn(Thunk::new(move|| f())),
1197 let (tx, rx) = channel();
1198 run_test(&TestOpts::new(), false, desc, tx);
1199 let (_, res, _) = rx.recv().unwrap();
1200 assert!(res == TrOk);
1204 fn test_should_fail_bad_message() {
1205 fn f() { panic!("an error message"); }
1206 let desc = TestDescAndFn {
1208 name: StaticTestName("whatever"),
1210 should_fail: ShouldFail::Yes(Some("foobar"))
1212 testfn: DynTestFn(Thunk::new(move|| f())),
1214 let (tx, rx) = channel();
1215 run_test(&TestOpts::new(), false, desc, tx);
1216 let (_, res, _) = rx.recv().unwrap();
1217 assert!(res == TrFailed);
1221 fn test_should_fail_but_succeeds() {
1223 let desc = TestDescAndFn {
1225 name: StaticTestName("whatever"),
1227 should_fail: ShouldFail::Yes(None)
1229 testfn: DynTestFn(Thunk::new(move|| f())),
1231 let (tx, rx) = channel();
1232 run_test(&TestOpts::new(), false, desc, tx);
1233 let (_, res, _) = rx.recv().unwrap();
1234 assert!(res == TrFailed);
1238 fn parse_ignored_flag() {
1239 let args = vec!("progname".to_string(),
1240 "filter".to_string(),
1241 "--ignored".to_string());
1242 let opts = match parse_opts(args.as_slice()) {
1244 _ => panic!("Malformed arg in parse_ignored_flag")
1246 assert!((opts.run_ignored));
1250 pub fn filter_for_ignored_option() {
1251 // When we run ignored tests the test filter should filter out all the
1252 // unignored tests and flip the ignore flag on the rest to false
1254 let mut opts = TestOpts::new();
1255 opts.run_tests = true;
1256 opts.run_ignored = true;
1261 name: StaticTestName("1"),
1263 should_fail: ShouldFail::No,
1265 testfn: DynTestFn(Thunk::new(move|| {})),
1269 name: StaticTestName("2"),
1271 should_fail: ShouldFail::No,
1273 testfn: DynTestFn(Thunk::new(move|| {})),
1275 let filtered = filter_tests(&opts, tests);
1277 assert_eq!(filtered.len(), 1);
1278 assert_eq!(filtered[0].desc.name.to_string(),
1280 assert!(filtered[0].desc.ignore == false);
1284 pub fn sort_tests() {
1285 let mut opts = TestOpts::new();
1286 opts.run_tests = true;
1289 vec!("sha1::test".to_string(),
1290 "int::test_to_str".to_string(),
1291 "int::test_pow".to_string(),
1292 "test::do_not_run_ignored_tests".to_string(),
1293 "test::ignored_tests_result_in_ignored".to_string(),
1294 "test::first_free_arg_should_be_a_filter".to_string(),
1295 "test::parse_ignored_flag".to_string(),
1296 "test::filter_for_ignored_option".to_string(),
1297 "test::sort_tests".to_string());
1301 let mut tests = Vec::new();
1302 for name in &names {
1303 let test = TestDescAndFn {
1305 name: DynTestName((*name).clone()),
1307 should_fail: ShouldFail::No,
1309 testfn: DynTestFn(Thunk::new(testfn)),
1315 let filtered = filter_tests(&opts, tests);
1318 vec!("int::test_pow".to_string(),
1319 "int::test_to_str".to_string(),
1320 "sha1::test".to_string(),
1321 "test::do_not_run_ignored_tests".to_string(),
1322 "test::filter_for_ignored_option".to_string(),
1323 "test::first_free_arg_should_be_a_filter".to_string(),
1324 "test::ignored_tests_result_in_ignored".to_string(),
1325 "test::parse_ignored_flag".to_string(),
1326 "test::sort_tests".to_string());
1328 for (a, b) in expected.iter().zip(filtered.iter()) {
1329 assert!(*a == b.desc.name.to_string());
1334 pub fn test_metricmap_compare() {
1335 let mut m1 = MetricMap::new();
1336 let mut m2 = MetricMap::new();
1337 m1.insert_metric("in-both-noise", 1000.0, 200.0);
1338 m2.insert_metric("in-both-noise", 1100.0, 200.0);
1340 m1.insert_metric("in-first-noise", 1000.0, 2.0);
1341 m2.insert_metric("in-second-noise", 1000.0, 2.0);
1343 m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
1344 m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
1346 m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
1347 m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
1349 m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
1350 m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
1352 m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
1353 m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);