]> git.lizzy.rs Git - rust.git/commitdiff
move extra::test to libtest
authorLiigo Zhuang <com.liigo@gmail.com>
Fri, 14 Feb 2014 01:49:11 +0000 (09:49 +0800)
committerLiigo Zhuang <com.liigo@gmail.com>
Thu, 20 Feb 2014 08:03:58 +0000 (16:03 +0800)
51 files changed:
mk/crates.mk
src/compiletest/compiletest.rs
src/compiletest/runtest.rs
src/doc/guide-testing.md
src/doc/rustdoc.md
src/libarena/lib.rs
src/libcollections/bitv.rs
src/libcollections/deque.rs
src/libcollections/dlist.rs
src/libcollections/lib.rs
src/libcollections/ringbuf.rs
src/libcollections/smallintmap.rs
src/libcollections/treemap.rs
src/libextra/lib.rs
src/libextra/stats.rs
src/libextra/test.rs [deleted file]
src/libnum/bigint.rs
src/libnum/rational.rs
src/librustc/front/test.rs
src/librustc/util/sha2.rs
src/librustdoc/lib.rs
src/librustdoc/plugins.rs
src/librustdoc/test.rs
src/libserialize/base64.rs
src/libserialize/ebml.rs
src/libserialize/hex.rs
src/libserialize/lib.rs
src/libstd/c_str.rs
src/libstd/io/buffered.rs
src/libstd/io/extensions.rs
src/libstd/mem.rs
src/libstd/num/f32.rs
src/libstd/num/f64.rs
src/libstd/num/mod.rs
src/libstd/num/strconv.rs
src/libstd/ops.rs
src/libstd/path/posix.rs
src/libstd/rand/distributions/exponential.rs
src/libstd/rand/distributions/gamma.rs
src/libstd/rand/distributions/normal.rs
src/libstd/rand/mod.rs
src/libstd/rt/global_heap.rs
src/libstd/rt/local_heap.rs
src/libstd/str.rs
src/libstd/trie.rs
src/libstd/vec.rs
src/libsyntax/ast.rs
src/libsyntax/lib.rs
src/libsyntax/parse/mod.rs
src/libtest/lib.rs [new file with mode: 0644]
src/libuuid/lib.rs

index 8af8f966e30f414e80deb008a4a26ab19bd90876..f8bb9fbb4085b92c96de28f9b4c15a30ba55d335 100644 (file)
@@ -50,7 +50,7 @@
 ################################################################################
 
 TARGET_CRATES := std extra green rustuv native flate arena glob term semver \
-                 uuid serialize sync getopts collections num
+                 uuid serialize sync getopts collections num test
 HOST_CRATES := syntax rustc rustdoc fourcc
 CRATES := $(TARGET_CRATES) $(HOST_CRATES)
 TOOLS := compiletest rustdoc rustc
@@ -63,7 +63,8 @@ DEPS_native := std
 DEPS_syntax := std term serialize collections
 DEPS_rustc := syntax native:rustllvm flate arena serialize sync getopts \
               collections extra
-DEPS_rustdoc := rustc native:sundown serialize sync getopts collections
+DEPS_rustdoc := rustc native:sundown serialize sync getopts collections \
+                test
 DEPS_flate := std native:miniz
 DEPS_arena := std collections
 DEPS_glob := std
@@ -76,8 +77,9 @@ DEPS_getopts := std
 DEPS_collections := std serialize
 DEPS_fourcc := syntax std
 DEPS_num := std extra
+DEPS_test := std extra collections getopts serialize term
 
-TOOL_DEPS_compiletest := extra green rustuv getopts
+TOOL_DEPS_compiletest := test green rustuv getopts
 TOOL_DEPS_rustdoc := rustdoc green rustuv
 TOOL_DEPS_rustc := rustc green rustuv
 TOOL_SOURCE_compiletest := $(S)src/compiletest/compiletest.rs
index 15aa68aaebbdf77b345a25df3e61b12673db1be1..c5ec19813065c9d1e2b5a615bae7cbd27817f5b2 100644 (file)
 #[allow(non_camel_case_types)];
 #[deny(warnings)];
 
-extern crate extra;
+extern crate test;
 extern crate getopts;
 
 use std::os;
 use std::io;
 use std::io::fs;
-
 use getopts::{optopt, optflag, reqopt};
-use extra::test;
-
 use common::config;
 use common::mode_run_pass;
 use common::mode_run_fail;
index 8b45d98786445da3c6b552f48bfad5e7ff47587d..83d6bf742c44f2301c032d43b2f33b0c0be1cf8e 100644 (file)
@@ -34,7 +34,7 @@
 use std::task;
 use std::vec;
 
-use extra::test::MetricMap;
+use test::MetricMap;
 
 pub fn run(config: config, testfile: ~str) {
 
index e51988565b0312c090fd2f48e3005012983b6d67..f129f7db729943e44949e41acfc8c0c3ed3beebc 100644 (file)
@@ -170,7 +170,7 @@ runner.
 
 The type signature of a benchmark function differs from a unit test:
 it takes a mutable reference to type
-`extra::test::BenchHarness`. Inside the benchmark function, any
+`test::BenchHarness`. Inside the benchmark function, any
 time-variable or "setup" code should execute first, followed by a call
 to `iter` on the benchmark harness, passing a closure that contains
 the portion of the benchmark you wish to actually measure the
@@ -185,9 +185,10 @@ amount.
 For example:
 
 ~~~
-extern crate extra;
+extern crate test;
+
 use std::vec;
-use extra::test::BenchHarness;
+use test::BenchHarness;
 
 #[bench]
 fn bench_sum_1024_ints(b: &mut BenchHarness) {
@@ -243,8 +244,8 @@ recognize that some calculation has no external effects and remove
 it entirely.
 
 ~~~
-extern crate extra;
-use extra::test::BenchHarness;
+extern crate test;
+use test::BenchHarness;
 
 #[bench]
 fn bench_xor_1000_ints(bh: &mut BenchHarness) {
@@ -273,15 +274,15 @@ example above by adjusting the `bh.iter` call to
 bh.iter(|| range(0, 1000).fold(0, |old, new| old ^ new))
 ~~~
 
-Or, the other option is to call the generic `extra::test::black_box`
+Or, the other option is to call the generic `test::black_box`
 function, which is an opaque "black box" to the optimizer and so
 forces it to consider any argument as used.
 
 ~~~
-use extra::test::black_box
+extern crate test;
 
 bh.iter(|| {
-        black_box(range(0, 1000).fold(0, |old, new| old ^ new));
+        test::black_box(range(0, 1000).fold(0, |old, new| old ^ new));
     });
 ~~~
 
index 3809dcd3f48ade0325e9e1f80a88591a5b577b45..77aa9273734c3a2f0de43bb30be65af2f787f899 100644 (file)
@@ -154,7 +154,7 @@ testing this code, the `fib` function will be included (so it can compile).
 
 Running tests often requires some special configuration to filter tests, find
 libraries, or try running ignored examples. The testing framework that rustdoc
-uses is build on `extra::test`, which is also used when you compile crates with
+uses is build on crate `test`, which is also used when you compile crates with
 rustc's `--test` flag. Extra arguments can be passed to rustdoc's test harness
 with the `--test-args` flag.
 
index d827e45eddb8777d79424d2b97b17170ae109ebb..0f291a56a7099102764fbc2c5dd506b3643de4d3 100644 (file)
@@ -503,10 +503,10 @@ fn drop(&mut self) {
 }
 
 #[cfg(test)]
-mod test {
-    extern crate extra;
+mod tests {
+    extern crate test;
+    use self::test::BenchHarness;
     use super::{Arena, TypedArena};
-    use self::extra::test::BenchHarness;
 
     struct Point {
         x: int,
index bee9ec6240b1665b53c155c5c31c6a7bda43cb3e..0e14b28eda3266a1f2bacc2a0bda3941af15d65e 100644 (file)
@@ -938,7 +938,8 @@ fn size_hint(&self) -> (uint, Option<uint>) {
 
 #[cfg(test)]
 mod tests {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
 
     use bitv::{Bitv, SmallBitv, BigBitv, BitvSet, from_bools, from_fn,
                from_bytes};
index 14c6bc5ce281abd87ca3440e4a8d4ab4ace7cdb5..ac3861f4e3668ac8b7c44916efc0d764b7d334aa 100644 (file)
@@ -41,10 +41,11 @@ pub trait Deque<T> : Mutable {
 
 #[cfg(test)]
 pub mod bench {
+    extern crate test;
+    use self::test::BenchHarness;
     use std::container::MutableMap;
     use std::{vec, rand};
     use std::rand::Rng;
-    use extra::test::BenchHarness;
 
     pub fn insert_rand_n<M:MutableMap<uint,uint>>(n: uint,
                                                   map: &mut M,
index 28e7b9460dc7aee9571b64829d2d491d81ee2fd7..591561d775eb234544d923a4f2fc737edbd225ae 100644 (file)
@@ -657,8 +657,9 @@ fn decode(d: &mut D) -> DList<T> {
 
 #[cfg(test)]
 mod tests {
+    extern crate test;
+    use self::test::BenchHarness;
     use deque::Deque;
-    use extra::test;
     use std::rand;
     use super::{DList, Node, ListInsertion};
 
index e97eeac4f6628b4ce84f7c04ac3b58f5db42c384..5b9ded3d1c7db2e29aef817abdabb5f63ce065eb 100644 (file)
@@ -20,7 +20,7 @@
 #[feature(macro_rules, managed_boxes)];
 
 extern crate serialize;
-#[cfg(test)] extern crate extra; // benchmark tests need this
+#[cfg(test)] extern crate test;
 
 pub use bitv::Bitv;
 pub use btree::BTree;
index 325f55b463451dc81dc4e6f0a3860e4bdb0b0e4a..8abc5a3ca115bab3ac59d7f9672381cc94793c8d 100644 (file)
@@ -431,8 +431,9 @@ fn decode(d: &mut D) -> RingBuf<T> {
 
 #[cfg(test)]
 mod tests {
+    extern crate test;
+    use self::test::BenchHarness;
     use deque::Deque;
-    use extra::test;
     use std::clone::Clone;
     use std::cmp::Eq;
     use super::RingBuf;
index 714bce9d03221107f8d2a4c4a0c852771366c8b0..d7b0e66aad7e3e43112cd35bbed345ecb9fc289a 100644 (file)
@@ -470,9 +470,9 @@ fn test_move_iter() {
 
 #[cfg(test)]
 mod bench {
-
+    extern crate test;
+    use self::test::BenchHarness;
     use super::SmallIntMap;
-    use extra::test::BenchHarness;
     use deque::bench::{insert_rand_n, insert_seq_n, find_rand_n, find_seq_n};
 
     // Find seq
index b4ecd02a8fb52428c44cf903b0a71d7fcfea2bb0..a4b2357960656ef012c407051e9f0715b591d665 100644 (file)
@@ -1494,9 +1494,9 @@ fn test_from_iter() {
 
 #[cfg(test)]
 mod bench {
-
+    extern crate test;
+    use self::test::BenchHarness;
     use super::TreeMap;
-    use extra::test::BenchHarness;
     use deque::bench::{insert_rand_n, insert_seq_n, find_rand_n, find_seq_n};
 
     // Find seq
index be7aa216e30243d845ecdf17adc337b4dcf16203..dc5624f9de9470432ec8b50c250279a07d7aa125 100644 (file)
 
 extern crate sync;
 extern crate serialize;
-
 extern crate collections;
 
 // Utility modules
-
 pub mod c_vec;
-
-// And ... other stuff
-
 pub mod url;
 pub mod json;
 pub mod tempfile;
 #[cfg(unicode)]
 mod unicode;
 
-// Compiler support modules
-
-pub mod test;
-
 // A curious inner-module that's not exported that contains the binding
 // 'extra' so that macro-expanded references to extra::serialize and such
 // can be resolved within libextra.
 #[doc(hidden)]
 pub mod extra {
     pub use serialize;
-    pub use test;
 }
+
index 799157f9a1a7842054f5bc4b94f344707d5c870e..1687f5550b45d59c1cd857e4ad4dcc3adce2daa6 100644 (file)
@@ -1025,7 +1025,8 @@ fn test_sum_f64_between_ints_that_sum_to_0() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use std::vec;
     use stats::Stats;
 
diff --git a/src/libextra/test.rs b/src/libextra/test.rs
deleted file mode 100644 (file)
index 4b883d1..0000000
+++ /dev/null
@@ -1,1553 +0,0 @@
-// Copyright 2012-2013 The Rust Project Developers. See the COPYRIGHT
-// file at the top-level directory of this distribution and at
-// http://rust-lang.org/COPYRIGHT.
-//
-// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
-// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
-// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
-// option. This file may not be copied, modified, or distributed
-// except according to those terms.
-
-#[doc(hidden)];
-
-// Support code for rustc's built in test runner generator. Currently,
-// none of this is meant for users. It is intended to support the
-// simplest interface possible for representing and running tests
-// while providing a base that other test frameworks may build off of.
-
-extern crate getopts;
-extern crate term;
-
-use json::ToJson;
-use json;
-use serialize::Decodable;
-use stats::Stats;
-use stats;
-use time::precise_time_ns;
-use collections::TreeMap;
-
-use std::cmp;
-use std::io;
-use std::io::{File, PortReader, ChanWriter};
-use std::io::stdio::StdWriter;
-use std::str;
-use std::task;
-use std::to_str::ToStr;
-use std::f64;
-use std::os;
-
-// The name of a test. By convention this follows the rules for rust
-// paths; i.e. it should be a series of identifiers separated by double
-// colons. This way if some test runner wants to arrange the tests
-// hierarchically it may.
-
-#[deriving(Clone)]
-pub enum TestName {
-    StaticTestName(&'static str),
-    DynTestName(~str)
-}
-impl ToStr for TestName {
-    fn to_str(&self) -> ~str {
-        match (*self).clone() {
-            StaticTestName(s) => s.to_str(),
-            DynTestName(s) => s.to_str()
-        }
-    }
-}
-
-#[deriving(Clone)]
-enum NamePadding { PadNone, PadOnLeft, PadOnRight }
-
-impl TestDesc {
-    fn padded_name(&self, column_count: uint, align: NamePadding) -> ~str {
-        use std::num::Saturating;
-        let name = self.name.to_str();
-        let fill = column_count.saturating_sub(name.len());
-        let pad = " ".repeat(fill);
-        match align {
-            PadNone => name,
-            PadOnLeft => pad.append(name),
-            PadOnRight => name.append(pad),
-        }
-    }
-}
-
-/// Represents a benchmark function.
-pub trait TDynBenchFn {
-    fn run(&self, harness: &mut BenchHarness);
-}
-
-// A function that runs a test. If the function returns successfully,
-// the test succeeds; if the function fails then the test fails. We
-// may need to come up with a more clever definition of test in order
-// to support isolation of tests into tasks.
-pub enum TestFn {
-    StaticTestFn(extern fn()),
-    StaticBenchFn(extern fn(&mut BenchHarness)),
-    StaticMetricFn(proc(&mut MetricMap)),
-    DynTestFn(proc()),
-    DynMetricFn(proc(&mut MetricMap)),
-    DynBenchFn(~TDynBenchFn)
-}
-
-impl TestFn {
-    fn padding(&self) -> NamePadding {
-        match self {
-            &StaticTestFn(..)   => PadNone,
-            &StaticBenchFn(..)  => PadOnRight,
-            &StaticMetricFn(..) => PadOnRight,
-            &DynTestFn(..)      => PadNone,
-            &DynMetricFn(..)    => PadOnRight,
-            &DynBenchFn(..)     => PadOnRight,
-        }
-    }
-}
-
-// Structure passed to BenchFns
-pub struct BenchHarness {
-    priv iterations: u64,
-    priv ns_start: u64,
-    priv ns_end: u64,
-    bytes: u64
-}
-
-// The definition of a single test. A test runner will run a list of
-// these.
-#[deriving(Clone)]
-pub struct TestDesc {
-    name: TestName,
-    ignore: bool,
-    should_fail: bool
-}
-
-pub struct TestDescAndFn {
-    desc: TestDesc,
-    testfn: TestFn,
-}
-
-#[deriving(Clone, Encodable, Decodable, Eq)]
-pub struct Metric {
-    priv value: f64,
-    priv noise: f64
-}
-
-#[deriving(Eq)]
-pub struct MetricMap(TreeMap<~str,Metric>);
-
-impl Clone for MetricMap {
-    fn clone(&self) -> MetricMap {
-        let MetricMap(ref map) = *self;
-        MetricMap(map.clone())
-    }
-}
-
-/// Analysis of a single change in metric
-#[deriving(Eq)]
-pub enum MetricChange {
-    LikelyNoise,
-    MetricAdded,
-    MetricRemoved,
-    Improvement(f64),
-    Regression(f64)
-}
-
-pub type MetricDiff = TreeMap<~str,MetricChange>;
-
-// The default console test runner. It accepts the command line
-// arguments and a vector of test_descs.
-pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
-    let opts =
-        match parse_opts(args) {
-            Some(Ok(o)) => o,
-            Some(Err(msg)) => fail!("{}", msg),
-            None => return
-        };
-    match run_tests_console(&opts, tests) {
-        Ok(true) => {}
-        Ok(false) => fail!("Some tests failed"),
-        Err(e) => fail!("io error when running tests: {}", e),
-    }
-}
-
-// A variant optimized for invocation with a static test vector.
-// This will fail (intentionally) when fed any dynamic tests, because
-// it is copying the static values out into a dynamic vector and cannot
-// copy dynamic values. It is doing this because from this point on
-// a ~[TestDescAndFn] is used in order to effect ownership-transfer
-// semantics into parallel test runners, which in turn requires a ~[]
-// rather than a &[].
-pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
-    let owned_tests = tests.map(|t| {
-        match t.testfn {
-            StaticTestFn(f) =>
-            TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
-
-            StaticBenchFn(f) =>
-            TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
-
-            _ => {
-                fail!("non-static tests passed to test::test_main_static");
-            }
-        }
-    });
-    test_main(args, owned_tests)
-}
-
-pub struct TestOpts {
-    filter: Option<~str>,
-    run_ignored: bool,
-    run_tests: bool,
-    run_benchmarks: bool,
-    ratchet_metrics: Option<Path>,
-    ratchet_noise_percent: Option<f64>,
-    save_metrics: Option<Path>,
-    test_shard: Option<(uint,uint)>,
-    logfile: Option<Path>
-}
-
-/// Result of parsing the options.
-pub type OptRes = Result<TestOpts, ~str>;
-
-fn optgroups() -> ~[getopts::OptGroup] {
-    ~[getopts::optflag("", "ignored", "Run ignored tests"),
-      getopts::optflag("", "test", "Run tests and not benchmarks"),
-      getopts::optflag("", "bench", "Run benchmarks instead of tests"),
-      getopts::optflag("h", "help", "Display this message (longer with --help)"),
-      getopts::optopt("", "save-metrics", "Location to save bench metrics",
-                     "PATH"),
-      getopts::optopt("", "ratchet-metrics",
-                     "Location to load and save metrics from. The metrics \
-                      loaded are cause benchmarks to fail if they run too \
-                      slowly", "PATH"),
-      getopts::optopt("", "ratchet-noise-percent",
-                     "Tests within N% of the recorded metrics will be \
-                      considered as passing", "PERCENTAGE"),
-      getopts::optopt("", "logfile", "Write logs to the specified file instead \
-                          of stdout", "PATH"),
-      getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
-                     "A.B")]
-}
-
-fn usage(binary: &str, helpstr: &str) {
-    let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
-    println!("{}", getopts::usage(message, optgroups()));
-    println!("");
-    if helpstr == "help" {
-        println!("{}", "\
-The FILTER is matched against the name of all tests to run, and if any tests
-have a substring match, only those tests are run.
-
-By default, all tests are run in parallel. This can be altered with the
-RUST_TEST_TASKS environment variable when running tests (set it to 1).
-
-Test Attributes:
-
-    #[test]        - Indicates a function is a test to be run. This function
-                     takes no arguments.
-    #[bench]       - Indicates a function is a benchmark to be run. This
-                     function takes one argument (extra::test::BenchHarness).
-    #[should_fail] - This function (also labeled with #[test]) will only pass if
-                     the code causes a failure (an assertion failure or fail!)
-    #[ignore]      - When applied to a function which is already attributed as a
-                     test, then the test runner will ignore these tests during
-                     normal test runs. Running with --ignored will run these
-                     tests. This may also be written as #[ignore(cfg(...))] to
-                     ignore the test on certain configurations.");
-    }
-}
-
-// Parses command line arguments into test options
-pub fn parse_opts(args: &[~str]) -> Option<OptRes> {
-    let args_ = args.tail();
-    let matches =
-        match getopts::getopts(args_, optgroups()) {
-          Ok(m) => m,
-          Err(f) => return Some(Err(f.to_err_msg()))
-        };
-
-    if matches.opt_present("h") { usage(args[0], "h"); return None; }
-    if matches.opt_present("help") { usage(args[0], "help"); return None; }
-
-    let filter =
-        if matches.free.len() > 0 {
-            Some((matches).free[0].clone())
-        } else {
-            None
-        };
-
-    let run_ignored = matches.opt_present("ignored");
-
-    let logfile = matches.opt_str("logfile");
-    let logfile = logfile.map(|s| Path::new(s));
-
-    let run_benchmarks = matches.opt_present("bench");
-    let run_tests = ! run_benchmarks ||
-        matches.opt_present("test");
-
-    let ratchet_metrics = matches.opt_str("ratchet-metrics");
-    let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
-
-    let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
-    let ratchet_noise_percent = ratchet_noise_percent.map(|s| from_str::<f64>(s).unwrap());
-
-    let save_metrics = matches.opt_str("save-metrics");
-    let save_metrics = save_metrics.map(|s| Path::new(s));
-
-    let test_shard = matches.opt_str("test-shard");
-    let test_shard = opt_shard(test_shard);
-
-    let test_opts = TestOpts {
-        filter: filter,
-        run_ignored: run_ignored,
-        run_tests: run_tests,
-        run_benchmarks: run_benchmarks,
-        ratchet_metrics: ratchet_metrics,
-        ratchet_noise_percent: ratchet_noise_percent,
-        save_metrics: save_metrics,
-        test_shard: test_shard,
-        logfile: logfile
-    };
-
-    Some(Ok(test_opts))
-}
-
-pub fn opt_shard(maybestr: Option<~str>) -> Option<(uint,uint)> {
-    match maybestr {
-        None => None,
-        Some(s) => {
-            let vector = s.split('.').to_owned_vec();
-            if vector.len() == 2 {
-                match (from_str::<uint>(vector[0]),
-                       from_str::<uint>(vector[1])) {
-                    (Some(a), Some(b)) => Some((a, b)),
-                    _ => None
-                }
-            } else {
-                None
-            }
-        }
-    }
-}
-
-
-#[deriving(Clone, Eq)]
-pub struct BenchSamples {
-    priv ns_iter_summ: stats::Summary,
-    priv mb_s: uint
-}
-
-#[deriving(Clone, Eq)]
-pub enum TestResult {
-    TrOk,
-    TrFailed,
-    TrIgnored,
-    TrMetrics(MetricMap),
-    TrBench(BenchSamples),
-}
-
-enum OutputLocation<T> {
-    Pretty(term::Terminal<T>),
-    Raw(T),
-}
-
-struct ConsoleTestState<T> {
-    log_out: Option<File>,
-    out: OutputLocation<T>,
-    use_color: bool,
-    total: uint,
-    passed: uint,
-    failed: uint,
-    ignored: uint,
-    measured: uint,
-    metrics: MetricMap,
-    failures: ~[(TestDesc, ~[u8])],
-    max_name_len: uint, // number of columns to fill when aligning names
-}
-
-impl<T: Writer> ConsoleTestState<T> {
-    pub fn new(opts: &TestOpts,
-               _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
-        let log_out = match opts.logfile {
-            Some(ref path) => Some(if_ok!(File::create(path))),
-            None => None
-        };
-        let out = match term::Terminal::new(io::stdout()) {
-            Err(_) => Raw(io::stdout()),
-            Ok(t) => Pretty(t)
-        };
-        Ok(ConsoleTestState {
-            out: out,
-            log_out: log_out,
-            use_color: use_color(),
-            total: 0u,
-            passed: 0u,
-            failed: 0u,
-            ignored: 0u,
-            measured: 0u,
-            metrics: MetricMap::new(),
-            failures: ~[],
-            max_name_len: 0u,
-        })
-    }
-
-    pub fn write_ok(&mut self) -> io::IoResult<()> {
-        self.write_pretty("ok", term::color::GREEN)
-    }
-
-    pub fn write_failed(&mut self) -> io::IoResult<()> {
-        self.write_pretty("FAILED", term::color::RED)
-    }
-
-    pub fn write_ignored(&mut self) -> io::IoResult<()> {
-        self.write_pretty("ignored", term::color::YELLOW)
-    }
-
-    pub fn write_metric(&mut self) -> io::IoResult<()> {
-        self.write_pretty("metric", term::color::CYAN)
-    }
-
-    pub fn write_bench(&mut self) -> io::IoResult<()> {
-        self.write_pretty("bench", term::color::CYAN)
-    }
-
-    pub fn write_added(&mut self) -> io::IoResult<()> {
-        self.write_pretty("added", term::color::GREEN)
-    }
-
-    pub fn write_improved(&mut self) -> io::IoResult<()> {
-        self.write_pretty("improved", term::color::GREEN)
-    }
-
-    pub fn write_removed(&mut self) -> io::IoResult<()> {
-        self.write_pretty("removed", term::color::YELLOW)
-    }
-
-    pub fn write_regressed(&mut self) -> io::IoResult<()> {
-        self.write_pretty("regressed", term::color::RED)
-    }
-
-    pub fn write_pretty(&mut self,
-                        word: &str,
-                        color: term::color::Color) -> io::IoResult<()> {
-        match self.out {
-            Pretty(ref mut term) => {
-                if self.use_color {
-                    if_ok!(term.fg(color));
-                }
-                if_ok!(term.write(word.as_bytes()));
-                if self.use_color {
-                    if_ok!(term.reset());
-                }
-                Ok(())
-            }
-            Raw(ref mut stdout) => stdout.write(word.as_bytes())
-        }
-    }
-
-    pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
-        match self.out {
-            Pretty(ref mut term) => term.write(s.as_bytes()),
-            Raw(ref mut stdout) => stdout.write(s.as_bytes())
-        }
-    }
-
-    pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
-        self.total = len;
-        let noun = if len != 1 { &"tests" } else { &"test" };
-        self.write_plain(format!("\nrunning {} {}\n", len, noun))
-    }
-
-    pub fn write_test_start(&mut self, test: &TestDesc,
-                            align: NamePadding) -> io::IoResult<()> {
-        let name = test.padded_name(self.max_name_len, align);
-        self.write_plain(format!("test {} ... ", name))
-    }
-
-    pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
-        if_ok!(match *result {
-            TrOk => self.write_ok(),
-            TrFailed => self.write_failed(),
-            TrIgnored => self.write_ignored(),
-            TrMetrics(ref mm) => {
-                if_ok!(self.write_metric());
-                self.write_plain(format!(": {}", fmt_metrics(mm)))
-            }
-            TrBench(ref bs) => {
-                if_ok!(self.write_bench());
-                self.write_plain(format!(": {}", fmt_bench_samples(bs)))
-            }
-        });
-        self.write_plain("\n")
-    }
-
-    pub fn write_log(&mut self, test: &TestDesc,
-                     result: &TestResult) -> io::IoResult<()> {
-        match self.log_out {
-            None => Ok(()),
-            Some(ref mut o) => {
-                let s = format!("{} {}\n", match *result {
-                        TrOk => ~"ok",
-                        TrFailed => ~"failed",
-                        TrIgnored => ~"ignored",
-                        TrMetrics(ref mm) => fmt_metrics(mm),
-                        TrBench(ref bs) => fmt_bench_samples(bs)
-                    }, test.name.to_str());
-                o.write(s.as_bytes())
-            }
-        }
-    }
-
-    pub fn write_failures(&mut self) -> io::IoResult<()> {
-        if_ok!(self.write_plain("\nfailures:\n"));
-        let mut failures = ~[];
-        let mut fail_out  = ~"";
-        for &(ref f, ref stdout) in self.failures.iter() {
-            failures.push(f.name.to_str());
-            if stdout.len() > 0 {
-                fail_out.push_str(format!("---- {} stdout ----\n\t",
-                                  f.name.to_str()));
-                let output = str::from_utf8_lossy(*stdout);
-                fail_out.push_str(output.as_slice().replace("\n", "\n\t"));
-                fail_out.push_str("\n");
-            }
-        }
-        if fail_out.len() > 0 {
-            if_ok!(self.write_plain("\n"));
-            if_ok!(self.write_plain(fail_out));
-        }
-
-        if_ok!(self.write_plain("\nfailures:\n"));
-        failures.sort();
-        for name in failures.iter() {
-            if_ok!(self.write_plain(format!("    {}\n", name.to_str())));
-        }
-        Ok(())
-    }
-
-    pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
-        let mut noise = 0;
-        let mut improved = 0;
-        let mut regressed = 0;
-        let mut added = 0;
-        let mut removed = 0;
-
-        for (k, v) in diff.iter() {
-            match *v {
-                LikelyNoise => noise += 1,
-                MetricAdded => {
-                    added += 1;
-                    if_ok!(self.write_added());
-                    if_ok!(self.write_plain(format!(": {}\n", *k)));
-                }
-                MetricRemoved => {
-                    removed += 1;
-                    if_ok!(self.write_removed());
-                    if_ok!(self.write_plain(format!(": {}\n", *k)));
-                }
-                Improvement(pct) => {
-                    improved += 1;
-                    if_ok!(self.write_plain(format!(": {}", *k)));
-                    if_ok!(self.write_improved());
-                    if_ok!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
-                }
-                Regression(pct) => {
-                    regressed += 1;
-                    if_ok!(self.write_plain(format!(": {}", *k)));
-                    if_ok!(self.write_regressed());
-                    if_ok!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
-                }
-            }
-        }
-        if_ok!(self.write_plain(format!("result of ratchet: {} metrics added, \
-                                        {} removed, {} improved, {} regressed, \
-                                        {} noise\n",
-                                       added, removed, improved, regressed,
-                                       noise)));
-        if regressed == 0 {
-            if_ok!(self.write_plain("updated ratchet file\n"));
-        } else {
-            if_ok!(self.write_plain("left ratchet file untouched\n"));
-        }
-        Ok(())
-    }
-
-    pub fn write_run_finish(&mut self,
-                            ratchet_metrics: &Option<Path>,
-                            ratchet_pct: Option<f64>) -> io::IoResult<bool> {
-        assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
-
-        let ratchet_success = match *ratchet_metrics {
-            None => true,
-            Some(ref pth) => {
-                if_ok!(self.write_plain(format!("\nusing metrics ratcher: {}\n",
-                                        pth.display())));
-                match ratchet_pct {
-                    None => (),
-                    Some(pct) =>
-                        if_ok!(self.write_plain(format!("with noise-tolerance \
-                                                         forced to: {}%\n",
-                                                        pct)))
-                }
-                let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
-                if_ok!(self.write_metric_diff(&diff));
-                ok
-            }
-        };
-
-        let test_success = self.failed == 0u;
-        if !test_success {
-            if_ok!(self.write_failures());
-        }
-
-        let success = ratchet_success && test_success;
-
-        if_ok!(self.write_plain("\ntest result: "));
-        if success {
-            // There's no parallelism at this point so it's safe to use color
-            if_ok!(self.write_ok());
-        } else {
-            if_ok!(self.write_failed());
-        }
-        let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
-                        self.passed, self.failed, self.ignored, self.measured);
-        if_ok!(self.write_plain(s));
-        return Ok(success);
-    }
-}
-
-pub fn fmt_metrics(mm: &MetricMap) -> ~str {
-    let MetricMap(ref mm) = *mm;
-    let v : ~[~str] = mm.iter()
-        .map(|(k,v)| format!("{}: {} (+/- {})",
-                          *k,
-                          v.value as f64,
-                          v.noise as f64))
-        .collect();
-    v.connect(", ")
-}
-
-pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
-    if bs.mb_s != 0 {
-        format!("{:>9} ns/iter (+/- {}) = {} MB/s",
-             bs.ns_iter_summ.median as uint,
-             (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
-             bs.mb_s)
-    } else {
-        format!("{:>9} ns/iter (+/- {})",
-             bs.ns_iter_summ.median as uint,
-             (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
-    }
-}
-
-// A simple console test runner
-pub fn run_tests_console(opts: &TestOpts,
-                         tests: ~[TestDescAndFn]) -> io::IoResult<bool> {
-    fn callback<T: Writer>(event: &TestEvent,
-                           st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
-        debug!("callback(event={:?})", event);
-        match (*event).clone() {
-            TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
-            TeWait(ref test, padding) => st.write_test_start(test, padding),
-            TeResult(test, result, stdout) => {
-                if_ok!(st.write_log(&test, &result));
-                if_ok!(st.write_result(&result));
-                match result {
-                    TrOk => st.passed += 1,
-                    TrIgnored => st.ignored += 1,
-                    TrMetrics(mm) => {
-                        let tname = test.name.to_str();
-                        let MetricMap(mm) = mm;
-                        for (k,v) in mm.iter() {
-                            st.metrics.insert_metric(tname + "." + *k,
-                                                     v.value, v.noise);
-                        }
-                        st.measured += 1
-                    }
-                    TrBench(bs) => {
-                        st.metrics.insert_metric(test.name.to_str(),
-                                                 bs.ns_iter_summ.median,
-                                                 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
-                        st.measured += 1
-                    }
-                    TrFailed => {
-                        st.failed += 1;
-                        st.failures.push((test, stdout));
-                    }
-                }
-                Ok(())
-            }
-        }
-    }
-    let mut st = if_ok!(ConsoleTestState::new(opts, None::<StdWriter>));
-    fn len_if_padded(t: &TestDescAndFn) -> uint {
-        match t.testfn.padding() {
-            PadNone => 0u,
-            PadOnLeft | PadOnRight => t.desc.name.to_str().len(),
-        }
-    }
-    match tests.iter().max_by(|t|len_if_padded(*t)) {
-        Some(t) => {
-            let n = t.desc.name.to_str();
-            debug!("Setting max_name_len from: {}", n);
-            st.max_name_len = n.len();
-        },
-        None => {}
-    }
-    if_ok!(run_tests(opts, tests, |x| callback(&x, &mut st)));
-    match opts.save_metrics {
-        None => (),
-        Some(ref pth) => {
-            if_ok!(st.metrics.save(pth));
-            if_ok!(st.write_plain(format!("\nmetrics saved to: {}",
-                                          pth.display())));
-        }
-    }
-    return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
-}
-
-#[test]
-fn should_sort_failures_before_printing_them() {
-    use std::io::MemWriter;
-    use std::str;
-
-    let test_a = TestDesc {
-        name: StaticTestName("a"),
-        ignore: false,
-        should_fail: false
-    };
-
-    let test_b = TestDesc {
-        name: StaticTestName("b"),
-        ignore: false,
-        should_fail: false
-    };
-
-    let mut st = ConsoleTestState {
-        log_out: None,
-        out: Raw(MemWriter::new()),
-        use_color: false,
-        total: 0u,
-        passed: 0u,
-        failed: 0u,
-        ignored: 0u,
-        measured: 0u,
-        max_name_len: 10u,
-        metrics: MetricMap::new(),
-        failures: ~[(test_b, ~[]), (test_a, ~[])]
-    };
-
-    st.write_failures().unwrap();
-    let s = match st.out {
-        Raw(ref m) => str::from_utf8_lossy(m.get_ref()),
-        Pretty(_) => unreachable!()
-    };
-
-    let apos = s.as_slice().find_str("a").unwrap();
-    let bpos = s.as_slice().find_str("b").unwrap();
-    assert!(apos < bpos);
-}
-
-fn use_color() -> bool { return get_concurrency() == 1; }
-
-#[deriving(Clone)]
-enum TestEvent {
-    TeFiltered(~[TestDesc]),
-    TeWait(TestDesc, NamePadding),
-    TeResult(TestDesc, TestResult, ~[u8] /* stdout */),
-}
-
-pub type MonitorMsg = (TestDesc, TestResult, ~[u8] /* stdout */);
-
-fn run_tests(opts: &TestOpts,
-             tests: ~[TestDescAndFn],
-             callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
-    let filtered_tests = filter_tests(opts, tests);
-    let filtered_descs = filtered_tests.map(|t| t.desc.clone());
-
-    if_ok!(callback(TeFiltered(filtered_descs)));
-
-    let (filtered_tests, filtered_benchs_and_metrics) =
-        filtered_tests.partition(|e| {
-            match e.testfn {
-                StaticTestFn(_) | DynTestFn(_) => true,
-                _ => false
-            }
-        });
-
-    // It's tempting to just spawn all the tests at once, but since we have
-    // many tests that run in other processes we would be making a big mess.
-    let concurrency = get_concurrency();
-    debug!("using {} test tasks", concurrency);
-
-    let mut remaining = filtered_tests;
-    remaining.reverse();
-    let mut pending = 0;
-
-    let (p, ch) = Chan::new();
-
-    while pending > 0 || !remaining.is_empty() {
-        while pending < concurrency && !remaining.is_empty() {
-            let test = remaining.pop().unwrap();
-            if concurrency == 1 {
-                // We are doing one test at a time so we can print the name
-                // of the test before we run it. Useful for debugging tests
-                // that hang forever.
-                if_ok!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
-            }
-            run_test(!opts.run_tests, test, ch.clone());
-            pending += 1;
-        }
-
-        let (desc, result, stdout) = p.recv();
-        if concurrency != 1 {
-            if_ok!(callback(TeWait(desc.clone(), PadNone)));
-        }
-        if_ok!(callback(TeResult(desc, result, stdout)));
-        pending -= 1;
-    }
-
-    // All benchmarks run at the end, in serial.
-    // (this includes metric fns)
-    for b in filtered_benchs_and_metrics.move_iter() {
-        if_ok!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
-        run_test(!opts.run_benchmarks, b, ch.clone());
-        let (test, result, stdout) = p.recv();
-        if_ok!(callback(TeResult(test, result, stdout)));
-    }
-    Ok(())
-}
-
-fn get_concurrency() -> uint {
-    use std::rt;
-    match os::getenv("RUST_TEST_TASKS") {
-        Some(s) => {
-            let opt_n: Option<uint> = FromStr::from_str(s);
-            match opt_n {
-                Some(n) if n > 0 => n,
-                _ => fail!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
-            }
-        }
-        None => {
-            rt::default_sched_threads()
-        }
-    }
-}
-
-pub fn filter_tests(
-    opts: &TestOpts,
-    tests: ~[TestDescAndFn]) -> ~[TestDescAndFn]
-{
-    let mut filtered = tests;
-
-    // Remove tests that don't match the test filter
-    filtered = if opts.filter.is_none() {
-        filtered
-    } else {
-        let filter_str = match opts.filter {
-          Some(ref f) => (*f).clone(),
-          None => ~""
-        };
-
-        fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
-            Option<TestDescAndFn> {
-            if test.desc.name.to_str().contains(filter_str) {
-                return Some(test);
-            } else {
-                return None;
-            }
-        }
-
-        filtered.move_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
-    };
-
-    // Maybe pull out the ignored test and unignore them
-    filtered = if !opts.run_ignored {
-        filtered
-    } else {
-        fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
-            if test.desc.ignore {
-                let TestDescAndFn {desc, testfn} = test;
-                Some(TestDescAndFn {
-                    desc: TestDesc {ignore: false, ..desc},
-                    testfn: testfn
-                })
-            } else {
-                None
-            }
-        };
-        filtered.move_iter().filter_map(|x| filter(x)).collect()
-    };
-
-    // Sort the tests alphabetically
-    filtered.sort_by(|t1, t2| t1.desc.name.to_str().cmp(&t2.desc.name.to_str()));
-
-    // Shard the remaining tests, if sharding requested.
-    match opts.test_shard {
-        None => filtered,
-        Some((a,b)) =>
-            filtered.move_iter().enumerate()
-            .filter(|&(i,_)| i % b == a)
-            .map(|(_,t)| t)
-            .to_owned_vec()
-    }
-}
-
-pub fn run_test(force_ignore: bool,
-                test: TestDescAndFn,
-                monitor_ch: Chan<MonitorMsg>) {
-
-    let TestDescAndFn {desc, testfn} = test;
-
-    if force_ignore || desc.ignore {
-        monitor_ch.send((desc, TrIgnored, ~[]));
-        return;
-    }
-
-    fn run_test_inner(desc: TestDesc,
-                      monitor_ch: Chan<MonitorMsg>,
-                      testfn: proc()) {
-        spawn(proc() {
-            let (p, c) = Chan::new();
-            let mut reader = PortReader::new(p);
-            let stdout = ChanWriter::new(c.clone());
-            let stderr = ChanWriter::new(c);
-            let mut task = task::task().named(match desc.name {
-                DynTestName(ref name) => name.clone().into_maybe_owned(),
-                StaticTestName(name) => name.into_maybe_owned(),
-            });
-            task.opts.stdout = Some(~stdout as ~Writer);
-            task.opts.stderr = Some(~stderr as ~Writer);
-            let result_future = task.future_result();
-            task.spawn(testfn);
-
-            let stdout = reader.read_to_end().unwrap();
-            let task_result = result_future.recv();
-            let test_result = calc_result(&desc, task_result.is_ok());
-            monitor_ch.send((desc.clone(), test_result, stdout));
-        })
-    }
-
-    match testfn {
-        DynBenchFn(bencher) => {
-            let bs = ::test::bench::benchmark(|harness| bencher.run(harness));
-            monitor_ch.send((desc, TrBench(bs), ~[]));
-            return;
-        }
-        StaticBenchFn(benchfn) => {
-            let bs = ::test::bench::benchmark(|harness| benchfn(harness));
-            monitor_ch.send((desc, TrBench(bs), ~[]));
-            return;
-        }
-        DynMetricFn(f) => {
-            let mut mm = MetricMap::new();
-            f(&mut mm);
-            monitor_ch.send((desc, TrMetrics(mm), ~[]));
-            return;
-        }
-        StaticMetricFn(f) => {
-            let mut mm = MetricMap::new();
-            f(&mut mm);
-            monitor_ch.send((desc, TrMetrics(mm), ~[]));
-            return;
-        }
-        DynTestFn(f) => run_test_inner(desc, monitor_ch, f),
-        StaticTestFn(f) => run_test_inner(desc, monitor_ch, proc() f())
-    }
-}
-
-fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
-    if task_succeeded {
-        if desc.should_fail { TrFailed }
-        else { TrOk }
-    } else {
-        if desc.should_fail { TrOk }
-        else { TrFailed }
-    }
-}
-
-
-impl ToJson for Metric {
-    fn to_json(&self) -> json::Json {
-        let mut map = ~TreeMap::new();
-        map.insert(~"value", json::Number(self.value));
-        map.insert(~"noise", json::Number(self.noise));
-        json::Object(map)
-    }
-}
-
-impl MetricMap {
-
-    pub fn new() -> MetricMap {
-        MetricMap(TreeMap::new())
-    }
-
-    /// Load MetricDiff from a file.
-    ///
-    /// # Failure
-    ///
-    /// This function will fail if the path does not exist or the path does not
-    /// contain a valid metric map.
-    pub fn load(p: &Path) -> MetricMap {
-        assert!(p.exists());
-        let mut f = File::open(p).unwrap();
-        let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
-        let mut decoder = json::Decoder::new(value);
-        MetricMap(Decodable::decode(&mut decoder))
-    }
-
-    /// Write MetricDiff to a file.
-    pub fn save(&self, p: &Path) -> io::IoResult<()> {
-        let mut file = if_ok!(File::create(p));
-        let MetricMap(ref map) = *self;
-        map.to_json().to_pretty_writer(&mut file)
-    }
-
-    /// Compare against another MetricMap. Optionally compare all
-    /// measurements in the maps using the provided `noise_pct` as a
-    /// percentage of each value to consider noise. If `None`, each
-    /// measurement's noise threshold is independently chosen as the
-    /// maximum of that measurement's recorded noise quantity in either
-    /// map.
-    pub fn compare_to_old(&self, old: &MetricMap,
-                          noise_pct: Option<f64>) -> MetricDiff {
-        let mut diff : MetricDiff = TreeMap::new();
-        let MetricMap(ref selfmap) = *self;
-        let MetricMap(ref old) = *old;
-        for (k, vold) in old.iter() {
-            let r = match selfmap.find(k) {
-                None => MetricRemoved,
-                Some(v) => {
-                    let delta = (v.value - vold.value);
-                    let noise = match noise_pct {
-                        None => f64::max(vold.noise.abs(), v.noise.abs()),
-                        Some(pct) => vold.value * pct / 100.0
-                    };
-                    if delta.abs() <= noise {
-                        LikelyNoise
-                    } else {
-                        let pct = delta.abs() / cmp::max(vold.value, f64::EPSILON) * 100.0;
-                        if vold.noise < 0.0 {
-                            // When 'noise' is negative, it means we want
-                            // to see deltas that go up over time, and can
-                            // only tolerate slight negative movement.
-                            if delta < 0.0 {
-                                Regression(pct)
-                            } else {
-                                Improvement(pct)
-                            }
-                        } else {
-                            // When 'noise' is positive, it means we want
-                            // to see deltas that go down over time, and
-                            // can only tolerate slight positive movements.
-                            if delta < 0.0 {
-                                Improvement(pct)
-                            } else {
-                                Regression(pct)
-                            }
-                        }
-                    }
-                }
-            };
-            diff.insert((*k).clone(), r);
-        }
-        let MetricMap(ref map) = *self;
-        for (k, _) in map.iter() {
-            if !diff.contains_key(k) {
-                diff.insert((*k).clone(), MetricAdded);
-            }
-        }
-        diff
-    }
-
-    /// Insert a named `value` (+/- `noise`) metric into the map. The value
-    /// must be non-negative. The `noise` indicates the uncertainty of the
-    /// metric, which doubles as the "noise range" of acceptable
-    /// pairwise-regressions on this named value, when comparing from one
-    /// metric to the next using `compare_to_old`.
-    ///
-    /// If `noise` is positive, then it means this metric is of a value
-    /// you want to see grow smaller, so a change larger than `noise` in the
-    /// positive direction represents a regression.
-    ///
-    /// If `noise` is negative, then it means this metric is of a value
-    /// you want to see grow larger, so a change larger than `noise` in the
-    /// negative direction represents a regression.
-    pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
-        let m = Metric {
-            value: value,
-            noise: noise
-        };
-        let MetricMap(ref mut map) = *self;
-        map.insert(name.to_owned(), m);
-    }
-
-    /// Attempt to "ratchet" an external metric file. This involves loading
-    /// metrics from a metric file (if it exists), comparing against
-    /// the metrics in `self` using `compare_to_old`, and rewriting the
-    /// file to contain the metrics in `self` if none of the
-    /// `MetricChange`s are `Regression`. Returns the diff as well
-    /// as a boolean indicating whether the ratchet succeeded.
-    pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
-        let old = if p.exists() {
-            MetricMap::load(p)
-        } else {
-            MetricMap::new()
-        };
-
-        let diff : MetricDiff = self.compare_to_old(&old, pct);
-        let ok = diff.iter().all(|(_, v)| {
-            match *v {
-                Regression(_) => false,
-                _ => true
-            }
-        });
-
-        if ok {
-            debug!("rewriting file '{:?}' with updated metrics", p);
-            self.save(p).unwrap();
-        }
-        return (diff, ok)
-    }
-}
-
-
-// Benchmarking
-
-/// A function that is opaque to the optimizer, to allow benchmarks to
-/// pretend to use outputs to assist in avoiding dead-code
-/// elimination.
-///
-/// This function is a no-op, and does not even read from `dummy`.
-pub fn black_box<T>(dummy: T) {
-    // we need to "use" the argument in some way LLVM can't
-    // introspect.
-    unsafe {asm!("" : : "r"(&dummy))}
-}
-
-
-impl BenchHarness {
-    /// Callback for benchmark functions to run in their body.
-    pub fn iter<T>(&mut self, inner: || -> T) {
-        self.ns_start = precise_time_ns();
-        let k = self.iterations;
-        for _ in range(0u64, k) {
-            black_box(inner());
-        }
-        self.ns_end = precise_time_ns();
-    }
-
-    pub fn ns_elapsed(&mut self) -> u64 {
-        if self.ns_start == 0 || self.ns_end == 0 {
-            0
-        } else {
-            self.ns_end - self.ns_start
-        }
-    }
-
-    pub fn ns_per_iter(&mut self) -> u64 {
-        if self.iterations == 0 {
-            0
-        } else {
-            self.ns_elapsed() / cmp::max(self.iterations, 1)
-        }
-    }
-
-    pub fn bench_n(&mut self, n: u64, f: |&mut BenchHarness|) {
-        self.iterations = n;
-        debug!("running benchmark for {} iterations",
-               n as uint);
-        f(self);
-    }
-
-    // This is a more statistics-driven benchmark algorithm
-    pub fn auto_bench(&mut self, f: |&mut BenchHarness|) -> stats::Summary {
-
-        // Initial bench run to get ballpark figure.
-        let mut n = 1_u64;
-        self.bench_n(n, |x| f(x));
-
-        // Try to estimate iter count for 1ms falling back to 1m
-        // iterations if first run took < 1ns.
-        if self.ns_per_iter() == 0 {
-            n = 1_000_000;
-        } else {
-            n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
-        }
-        // if the first run took more than 1ms we don't want to just
-        // be left doing 0 iterations on every loop. The unfortunate
-        // side effect of not being able to do as many runs is
-        // automatically handled by the statistical analysis below
-        // (i.e. larger error bars).
-        if n == 0 { n = 1; }
-
-        debug!("Initial run took {} ns, iter count that takes 1ms estimated as {}",
-               self.ns_per_iter(), n);
-
-        let mut total_run = 0;
-        let samples : &mut [f64] = [0.0_f64, ..50];
-        loop {
-            let loop_start = precise_time_ns();
-
-            for p in samples.mut_iter() {
-                self.bench_n(n, |x| f(x));
-                *p = self.ns_per_iter() as f64;
-            };
-
-            stats::winsorize(samples, 5.0);
-            let summ = stats::Summary::new(samples);
-
-            for p in samples.mut_iter() {
-                self.bench_n(5 * n, |x| f(x));
-                *p = self.ns_per_iter() as f64;
-            };
-
-            stats::winsorize(samples, 5.0);
-            let summ5 = stats::Summary::new(samples);
-
-            debug!("{} samples, median {}, MAD={}, MADP={}",
-                   samples.len(),
-                   summ.median as f64,
-                   summ.median_abs_dev as f64,
-                   summ.median_abs_dev_pct as f64);
-
-            let now = precise_time_ns();
-            let loop_run = now - loop_start;
-
-            // If we've run for 100ms and seem to have converged to a
-            // stable median.
-            if loop_run > 100_000_000 &&
-                summ.median_abs_dev_pct < 1.0 &&
-                summ.median - summ5.median < summ5.median_abs_dev {
-                return summ5;
-            }
-
-            total_run += loop_run;
-            // Longest we ever run for is 3s.
-            if total_run > 3_000_000_000 {
-                return summ5;
-            }
-
-            n *= 2;
-        }
-    }
-
-
-
-
-}
-
-pub mod bench {
-    use std::cmp;
-    use test::{BenchHarness, BenchSamples};
-
-    pub fn benchmark(f: |&mut BenchHarness|) -> BenchSamples {
-        let mut bs = BenchHarness {
-            iterations: 0,
-            ns_start: 0,
-            ns_end: 0,
-            bytes: 0
-        };
-
-        let ns_iter_summ = bs.auto_bench(f);
-
-        let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
-        let iter_s = 1_000_000_000 / ns_iter;
-        let mb_s = (bs.bytes * iter_s) / 1_000_000;
-
-        BenchSamples {
-            ns_iter_summ: ns_iter_summ,
-            mb_s: mb_s as uint
-        }
-    }
-}
-
-#[cfg(test)]
-mod tests {
-    use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
-               TestDesc, TestDescAndFn,
-               Metric, MetricMap, MetricAdded, MetricRemoved,
-               Improvement, Regression, LikelyNoise,
-               StaticTestName, DynTestName, DynTestFn};
-    use test::{TestOpts, run_test};
-
-    use tempfile::TempDir;
-
-    #[test]
-    pub fn do_not_run_ignored_tests() {
-        fn f() { fail!(); }
-        let desc = TestDescAndFn {
-            desc: TestDesc {
-                name: StaticTestName("whatever"),
-                ignore: true,
-                should_fail: false
-            },
-            testfn: DynTestFn(proc() f()),
-        };
-        let (p, ch) = Chan::new();
-        run_test(false, desc, ch);
-        let (_, res, _) = p.recv();
-        assert!(res != TrOk);
-    }
-
-    #[test]
-    pub fn ignored_tests_result_in_ignored() {
-        fn f() { }
-        let desc = TestDescAndFn {
-            desc: TestDesc {
-                name: StaticTestName("whatever"),
-                ignore: true,
-                should_fail: false
-            },
-            testfn: DynTestFn(proc() f()),
-        };
-        let (p, ch) = Chan::new();
-        run_test(false, desc, ch);
-        let (_, res, _) = p.recv();
-        assert_eq!(res, TrIgnored);
-    }
-
-    #[test]
-    fn test_should_fail() {
-        fn f() { fail!(); }
-        let desc = TestDescAndFn {
-            desc: TestDesc {
-                name: StaticTestName("whatever"),
-                ignore: false,
-                should_fail: true
-            },
-            testfn: DynTestFn(proc() f()),
-        };
-        let (p, ch) = Chan::new();
-        run_test(false, desc, ch);
-        let (_, res, _) = p.recv();
-        assert_eq!(res, TrOk);
-    }
-
-    #[test]
-    fn test_should_fail_but_succeeds() {
-        fn f() { }
-        let desc = TestDescAndFn {
-            desc: TestDesc {
-                name: StaticTestName("whatever"),
-                ignore: false,
-                should_fail: true
-            },
-            testfn: DynTestFn(proc() f()),
-        };
-        let (p, ch) = Chan::new();
-        run_test(false, desc, ch);
-        let (_, res, _) = p.recv();
-        assert_eq!(res, TrFailed);
-    }
-
-    #[test]
-    fn first_free_arg_should_be_a_filter() {
-        let args = ~[~"progname", ~"filter"];
-        let opts = match parse_opts(args) {
-            Some(Ok(o)) => o,
-            _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
-        };
-        assert!("filter" == opts.filter.clone().unwrap());
-    }
-
-    #[test]
-    fn parse_ignored_flag() {
-        let args = ~[~"progname", ~"filter", ~"--ignored"];
-        let opts = match parse_opts(args) {
-            Some(Ok(o)) => o,
-            _ => fail!("Malformed arg in parse_ignored_flag")
-        };
-        assert!((opts.run_ignored));
-    }
-
-    #[test]
-    pub fn filter_for_ignored_option() {
-        // When we run ignored tests the test filter should filter out all the
-        // unignored tests and flip the ignore flag on the rest to false
-
-        let opts = TestOpts {
-            filter: None,
-            run_ignored: true,
-            logfile: None,
-            run_tests: true,
-            run_benchmarks: false,
-            ratchet_noise_percent: None,
-            ratchet_metrics: None,
-            save_metrics: None,
-            test_shard: None
-        };
-
-        let tests = ~[
-            TestDescAndFn {
-                desc: TestDesc {
-                    name: StaticTestName("1"),
-                    ignore: true,
-                    should_fail: false,
-                },
-                testfn: DynTestFn(proc() {}),
-            },
-            TestDescAndFn {
-                desc: TestDesc {
-                    name: StaticTestName("2"),
-                    ignore: false,
-                    should_fail: false
-                },
-                testfn: DynTestFn(proc() {}),
-            },
-        ];
-        let filtered = filter_tests(&opts, tests);
-
-        assert_eq!(filtered.len(), 1);
-        assert_eq!(filtered[0].desc.name.to_str(), ~"1");
-        assert!(filtered[0].desc.ignore == false);
-    }
-
-    #[test]
-    pub fn sort_tests() {
-        let opts = TestOpts {
-            filter: None,
-            run_ignored: false,
-            logfile: None,
-            run_tests: true,
-            run_benchmarks: false,
-            ratchet_noise_percent: None,
-            ratchet_metrics: None,
-            save_metrics: None,
-            test_shard: None
-        };
-
-        let names =
-            ~[~"sha1::test", ~"int::test_to_str", ~"int::test_pow",
-             ~"test::do_not_run_ignored_tests",
-             ~"test::ignored_tests_result_in_ignored",
-             ~"test::first_free_arg_should_be_a_filter",
-             ~"test::parse_ignored_flag", ~"test::filter_for_ignored_option",
-             ~"test::sort_tests"];
-        let tests =
-        {
-            fn testfn() { }
-            let mut tests = ~[];
-            for name in names.iter() {
-                let test = TestDescAndFn {
-                    desc: TestDesc {
-                        name: DynTestName((*name).clone()),
-                        ignore: false,
-                        should_fail: false
-                    },
-                    testfn: DynTestFn(testfn),
-                };
-                tests.push(test);
-            }
-            tests
-        };
-        let filtered = filter_tests(&opts, tests);
-
-        let expected =
-            ~[~"int::test_pow", ~"int::test_to_str", ~"sha1::test",
-              ~"test::do_not_run_ignored_tests",
-              ~"test::filter_for_ignored_option",
-              ~"test::first_free_arg_should_be_a_filter",
-              ~"test::ignored_tests_result_in_ignored",
-              ~"test::parse_ignored_flag",
-              ~"test::sort_tests"];
-
-        for (a, b) in expected.iter().zip(filtered.iter()) {
-            assert!(*a == b.desc.name.to_str());
-        }
-    }
-
-    #[test]
-    pub fn test_metricmap_compare() {
-        let mut m1 = MetricMap::new();
-        let mut m2 = MetricMap::new();
-        m1.insert_metric("in-both-noise", 1000.0, 200.0);
-        m2.insert_metric("in-both-noise", 1100.0, 200.0);
-
-        m1.insert_metric("in-first-noise", 1000.0, 2.0);
-        m2.insert_metric("in-second-noise", 1000.0, 2.0);
-
-        m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
-        m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
-
-        m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
-        m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
-
-        m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
-        m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
-
-        m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
-        m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
-
-        let diff1 = m2.compare_to_old(&m1, None);
-
-        assert_eq!(*(diff1.find(&~"in-both-noise").unwrap()), LikelyNoise);
-        assert_eq!(*(diff1.find(&~"in-first-noise").unwrap()), MetricRemoved);
-        assert_eq!(*(diff1.find(&~"in-second-noise").unwrap()), MetricAdded);
-        assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").unwrap()),
-                   Regression(100.0));
-        assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").unwrap()),
-                   Improvement(50.0));
-        assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").unwrap()),
-                   Regression(50.0));
-        assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").unwrap()),
-                   Improvement(100.0));
-        assert_eq!(diff1.len(), 7);
-
-        let diff2 = m2.compare_to_old(&m1, Some(200.0));
-
-        assert_eq!(*(diff2.find(&~"in-both-noise").unwrap()), LikelyNoise);
-        assert_eq!(*(diff2.find(&~"in-first-noise").unwrap()), MetricRemoved);
-        assert_eq!(*(diff2.find(&~"in-second-noise").unwrap()), MetricAdded);
-        assert_eq!(*(diff2.find(&~"in-both-want-downwards-but-regressed").unwrap()), LikelyNoise);
-        assert_eq!(*(diff2.find(&~"in-both-want-downwards-and-improved").unwrap()), LikelyNoise);
-        assert_eq!(*(diff2.find(&~"in-both-want-upwards-but-regressed").unwrap()), LikelyNoise);
-        assert_eq!(*(diff2.find(&~"in-both-want-upwards-and-improved").unwrap()), LikelyNoise);
-        assert_eq!(diff2.len(), 7);
-    }
-
-    #[test]
-    pub fn ratchet_test() {
-
-        let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
-        let pth = dpth.path().join("ratchet.json");
-
-        let mut m1 = MetricMap::new();
-        m1.insert_metric("runtime", 1000.0, 2.0);
-        m1.insert_metric("throughput", 50.0, 2.0);
-
-        let mut m2 = MetricMap::new();
-        m2.insert_metric("runtime", 1100.0, 2.0);
-        m2.insert_metric("throughput", 50.0, 2.0);
-
-        m1.save(&pth).unwrap();
-
-        // Ask for a ratchet that should fail to advance.
-        let (diff1, ok1) = m2.ratchet(&pth, None);
-        assert_eq!(ok1, false);
-        assert_eq!(diff1.len(), 2);
-        assert_eq!(*(diff1.find(&~"runtime").unwrap()), Regression(10.0));
-        assert_eq!(*(diff1.find(&~"throughput").unwrap()), LikelyNoise);
-
-        // Check that it was not rewritten.
-        let m3 = MetricMap::load(&pth);
-        let MetricMap(m3) = m3;
-        assert_eq!(m3.len(), 2);
-        assert_eq!(*(m3.find(&~"runtime").unwrap()), Metric { value: 1000.0, noise: 2.0 });
-        assert_eq!(*(m3.find(&~"throughput").unwrap()), Metric { value: 50.0, noise: 2.0 });
-
-        // Ask for a ratchet with an explicit noise-percentage override,
-        // that should advance.
-        let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
-        assert_eq!(ok2, true);
-        assert_eq!(diff2.len(), 2);
-        assert_eq!(*(diff2.find(&~"runtime").unwrap()), LikelyNoise);
-        assert_eq!(*(diff2.find(&~"throughput").unwrap()), LikelyNoise);
-
-        // Check that it was rewritten.
-        let m4 = MetricMap::load(&pth);
-        let MetricMap(m4) = m4;
-        assert_eq!(m4.len(), 2);
-        assert_eq!(*(m4.find(&~"runtime").unwrap()), Metric { value: 1100.0, noise: 2.0 });
-        assert_eq!(*(m4.find(&~"throughput").unwrap()), Metric { value: 50.0, noise: 2.0 });
-    }
-}
index 7197b0dba1d0118445b6a35ade4c20533b2d1a86..6b833804e9ba49c3d22ef71277478e9168bb9743 100644 (file)
@@ -2546,11 +2546,12 @@ fn test_negative_rand_range() {
 
 #[cfg(test)]
 mod bench {
-    use super::{BigInt, BigUint};
+    extern crate test;
+    use self::test::BenchHarness;
+    use super::BigUint;
     use std::iter;
     use std::mem::replace;
     use std::num::{FromPrimitive, Zero, One};
-    use extra::test::BenchHarness;
 
     fn factorial(n: uint) -> BigUint {
         let mut f: BigUint = One::one();
index a41996d044f82db877f9fa611658c29ca92e6d5e..a483946322f804a89104aff53a356c249cd8dc5a 100644 (file)
@@ -430,8 +430,7 @@ fn test_is_integer() {
 
     mod arith {
         use super::{_0, _1, _2, _1_2, _3_2, _neg1_2, to_big};
-        use super::super::{Ratio, Rational, BigRational};
-
+        use super::super::{Ratio, Rational};
 
         #[test]
         fn test_add() {
index be9fcf4a1e9ae87ef608e4f192d106dc3372f9c5..45b1a42898c2a77d1b36919aa89332a35a579233 100644 (file)
@@ -1,4 +1,4 @@
-// Copyright 2012 The Rust Project Developers. See the COPYRIGHT
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
 // file at the top-level directory of this distribution and at
 // http://rust-lang.org/COPYRIGHT.
 //
@@ -46,7 +46,7 @@ struct TestCtxt<'a> {
     path: RefCell<~[ast::Ident]>,
     ext_cx: ExtCtxt<'a>,
     testfns: RefCell<~[Test]>,
-    is_extra: bool,
+    is_test_crate: bool,
     config: ast::CrateConfig,
 }
 
@@ -164,7 +164,7 @@ fn generate_test_harness(sess: session::Session, krate: ast::Crate)
         ext_cx: ExtCtxt::new(sess.parse_sess, sess.opts.cfg.clone(), loader),
         path: RefCell::new(~[]),
         testfns: RefCell::new(~[]),
-        is_extra: is_extra(&krate),
+        is_test_crate: is_test_crate(&krate),
         config: krate.config.clone(),
     };
 
@@ -275,13 +275,12 @@ fn add_test_module(cx: &TestCtxt, m: &ast::Mod) -> ast::Mod {
 
 mod __test {
   #[!resolve_unexported]
-  extern crate extra (name = "extra", vers = "...");
+  extern crate test (name = "test", vers = "...");
   fn main() {
-    #[main];
-    extra::test::test_main_static(::os::args(), tests)
+    test::test_main_static(::os::args(), tests)
   }
 
-  static tests : &'static [extra::test::TestDescAndFn] = &[
+  static tests : &'static [test::TestDescAndFn] = &[
     ... the list of tests in the crate ...
   ];
 }
@@ -289,15 +288,15 @@ fn main() {
 */
 
 fn mk_std(cx: &TestCtxt) -> ast::ViewItem {
-    let id_extra = token::str_to_ident("extra");
-    let vi = if cx.is_extra {
+    let id_test = token::str_to_ident("test");
+    let vi = if cx.is_test_crate {
         ast::ViewItemUse(
-            ~[@nospan(ast::ViewPathSimple(id_extra,
-                                          path_node(~[id_extra]),
+            ~[@nospan(ast::ViewPathSimple(id_test,
+                                          path_node(~[id_test]),
                                           ast::DUMMY_NODE_ID))])
     } else {
-        ast::ViewItemExternMod(id_extra,
-                               with_version("extra"),
+        ast::ViewItemExternMod(id_test,
+                               with_version("test"),
                                ast::DUMMY_NODE_ID)
     };
     ast::ViewItem {
@@ -310,7 +309,7 @@ fn mk_std(cx: &TestCtxt) -> ast::ViewItem {
 
 fn mk_test_module(cx: &TestCtxt) -> @ast::Item {
 
-    // Link to extra
+    // Link to test crate
     let view_items = ~[mk_std(cx)];
 
     // A constant vector of test descriptors.
@@ -321,7 +320,7 @@ fn mk_test_module(cx: &TestCtxt) -> @ast::Item {
     let mainfn = (quote_item!(&cx.ext_cx,
         pub fn main() {
             #[main];
-            extra::test::test_main_static(::std::os::args(), TESTS);
+            test::test_main_static(::std::os::args(), TESTS);
         }
     )).unwrap();
 
@@ -383,15 +382,15 @@ fn mk_tests(cx: &TestCtxt) -> @ast::Item {
     let test_descs = mk_test_descs(cx);
 
     (quote_item!(&cx.ext_cx,
-        pub static TESTS : &'static [self::extra::test::TestDescAndFn] =
+        pub static TESTS : &'static [self::test::TestDescAndFn] =
             $test_descs
         ;
     )).unwrap()
 }
 
-fn is_extra(krate: &ast::Crate) -> bool {
+fn is_test_crate(krate: &ast::Crate) -> bool {
     match attr::find_crateid(krate.attrs) {
-        Some(ref s) if "extra" == s.name => true,
+        Some(ref s) if "test" == s.name => true,
         _ => false
     }
 }
@@ -444,9 +443,9 @@ fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
     };
 
     let t_expr = if test.bench {
-        quote_expr!(&cx.ext_cx, self::extra::test::StaticBenchFn($fn_expr) )
+        quote_expr!(&cx.ext_cx, self::test::StaticBenchFn($fn_expr) )
     } else {
-        quote_expr!(&cx.ext_cx, self::extra::test::StaticTestFn($fn_expr) )
+        quote_expr!(&cx.ext_cx, self::test::StaticTestFn($fn_expr) )
     };
 
     let ignore_expr = if test.ignore {
@@ -462,9 +461,9 @@ fn mk_test_desc_and_fn_rec(cx: &TestCtxt, test: &Test) -> @ast::Expr {
     };
 
     let e = quote_expr!(&cx.ext_cx,
-        self::extra::test::TestDescAndFn {
-            desc: self::extra::test::TestDesc {
-                name: self::extra::test::StaticTestName($name_expr),
+        self::test::TestDescAndFn {
+            desc: self::test::TestDesc {
+                name: self::test::StaticTestName($name_expr),
                 ignore: $ignore_expr,
                 should_fail: $fail_expr
             },
index 940cebf78471ee6b9c6d96b7ed4807fb1319e4ad..bd17f6b581483c74eb5213b92f797e12ccdea4ff 100644 (file)
@@ -635,7 +635,8 @@ fn test_1million_random_sha256() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use super::{Sha256, FixedBuffer, Digest};
 
     #[bench]
index 4194f5e47293628610fe8eaf809c49e42c96ece5..de992e68fe0ba064146c84ebd4ae5bcd0f775551 100644 (file)
@@ -23,6 +23,7 @@
 extern crate sync;
 extern crate getopts;
 extern crate collections;
+extern crate testing = "test";
 
 use std::local_data;
 use std::io;
index 2fcf5527a341d2d62dda3cf730e5db6e75ee35ca..104cec9c814f44e70d6141b9e078ad2dfe37f17f 100644 (file)
 
 use clean;
 
-use extra;
+use extra::json;
 use dl = std::unstable::dynamic_lib;
 
-pub type PluginJson = Option<(~str, extra::json::Json)>;
+pub type PluginJson = Option<(~str, json::Json)>;
 pub type PluginResult = (clean::Crate, PluginJson);
 pub type plugin_callback = extern fn (clean::Crate) -> PluginResult;
 
index db4842b79a04bbd23a7599d1f2b2755460418adf..c0f8d2696caf07984aee13fdff694230b5f4659c 100644 (file)
@@ -15,8 +15,8 @@
 use std::run;
 use std::str;
 
+use testing;
 use extra::tempfile::TempDir;
-use extra::test;
 use rustc::back::link;
 use rustc::driver::driver;
 use rustc::driver::session;
@@ -89,7 +89,7 @@ pub fn run(input: &str, matches: &getopts::Matches) -> int {
     let mut args = args.to_owned_vec();
     args.unshift(~"rustdoctest");
 
-    test::test_main(args, collector.tests);
+    testing::test_main(args, collector.tests);
 
     0
 }
@@ -164,7 +164,7 @@ fn maketest(s: &str, cratename: &str) -> ~str {
 }
 
 pub struct Collector {
-    priv tests: ~[test::TestDescAndFn],
+    priv tests: ~[testing::TestDescAndFn],
     priv names: ~[~str],
     priv libs: @RefCell<HashSet<Path>>,
     priv cnt: uint,
@@ -180,13 +180,13 @@ pub fn add_test(&mut self, test: &str, should_fail: bool) {
         let libs = (*libs.get()).clone();
         let cratename = self.cratename.to_owned();
         debug!("Creating test {}: {}", name, test);
-        self.tests.push(test::TestDescAndFn {
-            desc: test::TestDesc {
-                name: test::DynTestName(name),
+        self.tests.push(testing::TestDescAndFn {
+            desc: testing::TestDesc {
+                name: testing::DynTestName(name),
                 ignore: false,
                 should_fail: false, // compiler failures are test failures
             },
-            testfn: test::DynTestFn(proc() {
+            testfn: testing::DynTestFn(proc() {
                 runtest(test, cratename, libs, should_fail);
             }),
         });
index 918c5e85857db2b6d950c6f50fd61829e4601d42..839450ce57cc32e3792c34e72f543b9693037918 100644 (file)
@@ -260,8 +260,9 @@ fn from_base64(&self) -> Result<~[u8], FromBase64Error> {
 }
 
 #[cfg(test)]
-mod test {
-    use extra::test::BenchHarness;
+mod tests {
+    extern crate test;
+    use self::test::BenchHarness;
     use base64::{Config, FromBase64, ToBase64, STANDARD, URL_SAFE};
 
     #[test]
index 3d57a32a830b2c867162c9ca85c6ae8dadb96bf9..6dc23f586dbea2ff64ddd5be3b0aad13ed408601 100644 (file)
@@ -1037,8 +1037,9 @@ fn test_v(v: Option<int>) {
 
 #[cfg(test)]
 mod bench {
+    extern crate test;
+    use self::test::BenchHarness;
     use ebml::reader;
-    use extra::test::BenchHarness;
 
     #[bench]
     pub fn vuint_at_A_aligned(bh: &mut BenchHarness) {
index 08a3dda854e3aed0ed93c62a2a3b2e8b8e2dcc4c..223a586a5a0dd8d38d907e3401ef0033cc447ec7 100644 (file)
@@ -139,7 +139,8 @@ fn from_hex(&self) -> Result<~[u8], FromHexError> {
 
 #[cfg(test)]
 mod tests {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use hex::{FromHex, ToHex};
 
     #[test]
index 7a4b1cff6e47c0643e61867d0c65e699979cd551..f89f56c0f344e2afcd475852654720784b6d2ea5 100644 (file)
@@ -24,7 +24,7 @@
 
 // test harness access
 #[cfg(test)]
-extern crate extra;
+extern crate test;
 
 pub use self::serialize::{Decoder, Encoder, Decodable, Encodable,
     DecoderHelpers, EncoderHelpers};
index adbd4be316c8b78419834f1b38a4e2b2b86fa1be..e6b0958617e38d6b0db4584ae3fffdbb24fa302a 100644 (file)
@@ -664,7 +664,8 @@ fn test_clone_eq_null() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use libc;
     use prelude::*;
 
index fbed53ee30d6ec18ff18ecd5a7ba46dc30d423ea..227f3a0a083627fc216febe4c561ad293f6020cb 100644 (file)
@@ -346,11 +346,12 @@ fn flush(&mut self) -> IoResult<()> {
 
 #[cfg(test)]
 mod test {
+    extern crate test;
     use io;
     use prelude::*;
     use super::*;
     use super::super::mem::{MemReader, MemWriter, BufReader};
-    use Harness = extra::test::BenchHarness;
+    use Harness = self::test::BenchHarness;
 
     /// A type, free to create, primarily intended for benchmarking creation of
     /// wrappers that, just for construction, don't need a Reader/Writer that
index da4697d0e48804491a51e8801058a6044bac11b7..ee366e96f23c596b2e3ba68f93367e94f0523995 100644 (file)
@@ -456,7 +456,8 @@ fn test_u64_from_be_bytes() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use container::Container;
 
     macro_rules! u64_from_be_bytes_bench_impl(
index c322e9bf5726fb1a8f1f8ad05497c6504f72b8a6..11053f01ded09222d8653d9bb173a40b425832b3 100644 (file)
@@ -267,8 +267,8 @@ fn test_replace() {
 /// Completely miscellaneous language-construct benchmarks.
 #[cfg(test)]
 mod bench {
-
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use option::{Some,None};
 
     // Static/dynamic method dispatch
index 688e8347d9ad5e962545e4bde91febcd0618d2e6..da3f2c1636fe010e8aff00e039cc2746710645b6 100644 (file)
@@ -865,7 +865,6 @@ fn from_str_radix(val: &str, rdx: uint) -> Option<f32> {
 #[cfg(test)]
 mod tests {
     use f32::*;
-
     use num::*;
     use num;
 
index dafb3187ff8487ad5d0e10925dec228ba63943e2..24165cbef50b9a76b68c9b3bbd494e901208a9bd 100644 (file)
@@ -867,7 +867,6 @@ fn from_str_radix(val: &str, rdx: uint) -> Option<f64> {
 #[cfg(test)]
 mod tests {
     use f64::*;
-
     use num::*;
     use num;
 
index 026e7ebbd48d59aa003d9e7f02ad9a653ac065ff..8a417096c3ea7522aaf98ccafe7e55cfdfde8117 100644 (file)
@@ -1734,10 +1734,11 @@ macro_rules! assert_pow(
 
 #[cfg(test)]
 mod bench {
+    extern crate test;
+    use self::test::BenchHarness;
     use num;
     use vec;
     use prelude::*;
-    use extra::test::BenchHarness;
 
     #[bench]
     fn bench_pow_function(b: &mut BenchHarness) {
index 1ecabff875872d90dace565ff20f4ced22311c23..6be829f51d73c858d164a207859aa8866f9005f7 100644 (file)
@@ -803,7 +803,8 @@ fn from_str_issue7588() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use rand::{XorShiftRng, Rng};
     use to_str::ToStr;
     use f64;
index a15ce4f0102b74f4b9dd1f6b7210c5d16d54593f..ac329e6fe839510466e533cefed0428961d7fbfd 100644 (file)
@@ -466,8 +466,8 @@ pub trait Index<Index,Result> {
 
 #[cfg(test)]
 mod bench {
-
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use ops::Drop;
 
     // Overhead of dtors
index f8e9d0ae344047304060d44552ce82066f41b7fc..9aaa86c4cfe217f60cf0e6bf4a0676583b1cdd28 100644 (file)
@@ -1250,7 +1250,8 @@ macro_rules! t(
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use super::*;
     use prelude::*;
 
index 09c36d945eb1a19fd876494ebefdcd765181d180..2fa9cf8bd48b2882e56f887ee16364e2744cb6c9 100644 (file)
@@ -119,7 +119,8 @@ fn test_exp_invalid_lambda_neg() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use mem::size_of;
     use prelude::*;
     use rand::{XorShiftRng, RAND_BENCH_N};
index a14b58188bd784eef6c244204cb6d7b2bfe87bc1..b9702ccd48da2bcb93bb145f1698ec3ecfeb8d54 100644 (file)
@@ -371,7 +371,8 @@ fn test_t() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use mem::size_of;
     use prelude::*;
     use rand::distributions::IndependentSample;
index c9dc3c8abc1abc7ee00b6973f7894bba773ebfbf..b2f952e2a4c98b1f26dee486029f8fde1500d65f 100644 (file)
@@ -187,7 +187,8 @@ fn test_log_normal_invalid_sd() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use mem::size_of;
     use prelude::*;
     use rand::{XorShiftRng, RAND_BENCH_N};
index 4496268430034a319b7f217a03d89c7e6798f2ce..7218f83d66203031fe0cbc9ee54637c74fc06261 100644 (file)
@@ -845,8 +845,9 @@ fn test_std_rng_reseed() {
 
 #[cfg(test)]
 mod bench {
+    extern crate test;
+    use self::test::BenchHarness;
     use prelude::*;
-    use extra::test::BenchHarness;
     use rand::{XorShiftRng, StdRng, IsaacRng, Isaac64Rng, Rng, RAND_BENCH_N};
     use mem::size_of;
 
index ab279fd3102d219fabee23846d12165d20a618ec..8128bb021487d50a1c577b0bc281b8f6b67d6841 100644 (file)
@@ -107,7 +107,8 @@ pub unsafe fn exchange_free(ptr: *u8) {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
 
     #[bench]
     fn alloc_owned_small(bh: &mut BenchHarness) {
index f3474b9401e1a2562db2549d7e24abc08930a0d5..8a42cd73565446f5c1700d948014af638b3c7cea 100644 (file)
@@ -308,7 +308,8 @@ pub fn live_allocs() -> *mut Box {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
 
     #[bench]
     fn alloc_managed_small(bh: &mut BenchHarness) {
index 2ac3a9817872e24b0b08452cd90e9d245cf45350..c3f79ff7139a6de652708802341cacdbedc7a51d 100644 (file)
@@ -4498,7 +4498,8 @@ fn test_into_maybe_owned() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use super::*;
     use prelude::*;
 
index f57c5bc649a91ae7fb58312e18c808d98c8be6f2..d17d59f8665e90e30cf5284e42a092fb9d2f7de1 100644 (file)
@@ -902,10 +902,11 @@ fn test_mut_bound() {
 
 #[cfg(test)]
 mod bench_map {
+    extern crate test;
+    use self::test::BenchHarness;
     use super::*;
     use prelude::*;
     use rand::{weak_rng, Rng};
-    use extra::test::BenchHarness;
 
     #[bench]
     fn bench_iter_small(bh: &mut BenchHarness) {
index 9d9ebad1c69b2934b4ce7b1ef24d740b6f107ea0..d16ad54a25d2aac770d7fcbfa5fbedd3a89d4ad6 100644 (file)
@@ -4360,7 +4360,8 @@ fn test_mut_last() {
 
 #[cfg(test)]
 mod bench {
-    use extra::test::BenchHarness;
+    extern crate test;
+    use self::test::BenchHarness;
     use mem;
     use prelude::*;
     use ptr;
index a3025d394dab5a99983e33d2f1a6544bac7673ee..e8edc1a0dfc0e4ac53d2c87c6d6950bf8e96c19e 100644 (file)
@@ -1196,8 +1196,9 @@ pub enum InlinedItem {
 
 #[cfg(test)]
 mod test {
+    extern crate extra;
+    use self::extra::json;
     use serialize;
-    use extra;
     use codemap::*;
     use super::*;
 
@@ -1223,6 +1224,6 @@ fn check_asts_encodable() {
             },
         };
         // doesn't matter which encoder we use....
-        let _f = (&e as &serialize::Encodable<extra::json::Encoder>);
+        let _f = (&e as &serialize::Encodable<json::Encoder>);
     }
 }
index 3cbdad9a71d7e29532b96f9e380f16fc3f04eccf..42c9ab461aa009be8ee037a1059e9c34165a3e89 100644 (file)
@@ -32,7 +32,6 @@
 
 #[deny(non_camel_case_types)];
 
-#[cfg(test)] extern crate extra;
 extern crate serialize;
 extern crate term;
 extern crate collections;
index b4139714a2edf90c1b5a770ecaf25b04e182055b..08aec07577098561e4bac74978d21ed4dc7e23ee 100644 (file)
@@ -283,9 +283,10 @@ pub fn maybe_aborted<T>(result: T, mut p: Parser) -> T {
 
 #[cfg(test)]
 mod test {
+    extern crate extra;
+    use self::extra::json;
     use super::*;
     use serialize::Encodable;
-    use extra;
     use std::io;
     use std::io::MemWriter;
     use std::str;
@@ -300,9 +301,9 @@ mod test {
     use util::parser_testing::string_to_stmt;
 
     #[cfg(test)]
-    fn to_json_str<'a, E: Encodable<extra::json::Encoder<'a>>>(val: &E) -> ~str {
+    fn to_json_str<'a, E: Encodable<json::Encoder<'a>>>(val: &E) -> ~str {
         let mut writer = MemWriter::new();
-        let mut encoder = extra::json::Encoder::new(&mut writer as &mut io::Writer);
+        let mut encoder = json::Encoder::new(&mut writer as &mut io::Writer);
         val.encode(&mut encoder);
         str::from_utf8_owned(writer.unwrap()).unwrap()
     }
diff --git a/src/libtest/lib.rs b/src/libtest/lib.rs
new file mode 100644 (file)
index 0000000..226dd75
--- /dev/null
@@ -0,0 +1,1577 @@
+// Copyright 2012-2014 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+// Support code for rustc's built in test runner generator. Currently,
+// none of this is meant for users. It is intended to support the
+// simplest interface possible for representing and running tests
+// while providing a base that other test frameworks may build off of.
+
+#[crate_id = "test#0.10-pre"];
+#[comment = "Rust internal test library only used by rustc"];
+#[license = "MIT/ASL2"];
+#[crate_type = "rlib"];
+#[crate_type = "dylib"];
+
+#[feature(asm)];
+
+extern crate collections;
+extern crate extra;
+extern crate getopts;
+extern crate serialize;
+extern crate term;
+
+use collections::TreeMap;
+use extra::json::ToJson;
+use extra::json;
+use extra::stats::Stats;
+use extra::stats;
+use extra::time::precise_time_ns;
+use getopts::{OptGroup, optflag, optopt};
+use serialize::Decodable;
+use term::Terminal;
+use term::color::{Color, RED, YELLOW, GREEN, CYAN};
+
+use std::cmp;
+use std::io;
+use std::io::{File, PortReader, ChanWriter};
+use std::io::stdio::StdWriter;
+use std::str;
+use std::task;
+use std::to_str::ToStr;
+use std::f64;
+use std::os;
+
+// to be used by rustc to compile tests in libtest
+pub mod test {
+    pub use {BenchHarness, TestName, TestResult, TestDesc,
+             TestDescAndFn, TestOpts, TrFailed, TrIgnored, TrOk,
+             Metric, MetricMap, MetricAdded, MetricRemoved,
+             MetricChange, Improvement, Regression, LikelyNoise,
+             StaticTestFn, StaticTestName, DynTestName, DynTestFn,
+             run_test, test_main, test_main_static, filter_tests,
+             parse_opts};
+}
+
+// The name of a test. By convention this follows the rules for rust
+// paths; i.e. it should be a series of identifiers separated by double
+// colons. This way if some test runner wants to arrange the tests
+// hierarchically it may.
+
+#[deriving(Clone)]
+pub enum TestName {
+    StaticTestName(&'static str),
+    DynTestName(~str)
+}
+impl ToStr for TestName {
+    fn to_str(&self) -> ~str {
+        match (*self).clone() {
+            StaticTestName(s) => s.to_str(),
+            DynTestName(s) => s.to_str()
+        }
+    }
+}
+
+#[deriving(Clone)]
+enum NamePadding { PadNone, PadOnLeft, PadOnRight }
+
+impl TestDesc {
+    fn padded_name(&self, column_count: uint, align: NamePadding) -> ~str {
+        use std::num::Saturating;
+        let name = self.name.to_str();
+        let fill = column_count.saturating_sub(name.len());
+        let pad = " ".repeat(fill);
+        match align {
+            PadNone => name,
+            PadOnLeft => pad.append(name),
+            PadOnRight => name.append(pad),
+        }
+    }
+}
+
+/// Represents a benchmark function.
+pub trait TDynBenchFn {
+    fn run(&self, harness: &mut BenchHarness);
+}
+
+// A function that runs a test. If the function returns successfully,
+// the test succeeds; if the function fails then the test fails. We
+// may need to come up with a more clever definition of test in order
+// to support isolation of tests into tasks.
+pub enum TestFn {
+    StaticTestFn(extern fn()),
+    StaticBenchFn(extern fn(&mut BenchHarness)),
+    StaticMetricFn(proc(&mut MetricMap)),
+    DynTestFn(proc()),
+    DynMetricFn(proc(&mut MetricMap)),
+    DynBenchFn(~TDynBenchFn)
+}
+
+impl TestFn {
+    fn padding(&self) -> NamePadding {
+        match self {
+            &StaticTestFn(..)   => PadNone,
+            &StaticBenchFn(..)  => PadOnRight,
+            &StaticMetricFn(..) => PadOnRight,
+            &DynTestFn(..)      => PadNone,
+            &DynMetricFn(..)    => PadOnRight,
+            &DynBenchFn(..)     => PadOnRight,
+        }
+    }
+}
+
+// Structure passed to BenchFns
+pub struct BenchHarness {
+    priv iterations: u64,
+    priv ns_start: u64,
+    priv ns_end: u64,
+    bytes: u64
+}
+
+// The definition of a single test. A test runner will run a list of
+// these.
+#[deriving(Clone)]
+pub struct TestDesc {
+    name: TestName,
+    ignore: bool,
+    should_fail: bool
+}
+
+pub struct TestDescAndFn {
+    desc: TestDesc,
+    testfn: TestFn,
+}
+
+#[deriving(Clone, Encodable, Decodable, Eq)]
+pub struct Metric {
+    priv value: f64,
+    priv noise: f64
+}
+
+impl Metric {
+    pub fn new(value: f64, noise: f64) -> Metric {
+        Metric {value: value, noise: noise}
+    }
+}
+
+#[deriving(Eq)]
+pub struct MetricMap(TreeMap<~str,Metric>);
+
+impl Clone for MetricMap {
+    fn clone(&self) -> MetricMap {
+        let MetricMap(ref map) = *self;
+        MetricMap(map.clone())
+    }
+}
+
+/// Analysis of a single change in metric
+#[deriving(Eq)]
+pub enum MetricChange {
+    LikelyNoise,
+    MetricAdded,
+    MetricRemoved,
+    Improvement(f64),
+    Regression(f64)
+}
+
+pub type MetricDiff = TreeMap<~str,MetricChange>;
+
+// The default console test runner. It accepts the command line
+// arguments and a vector of test_descs.
+pub fn test_main(args: &[~str], tests: ~[TestDescAndFn]) {
+    let opts =
+        match parse_opts(args) {
+            Some(Ok(o)) => o,
+            Some(Err(msg)) => fail!("{}", msg),
+            None => return
+        };
+    match run_tests_console(&opts, tests) {
+        Ok(true) => {}
+        Ok(false) => fail!("Some tests failed"),
+        Err(e) => fail!("io error when running tests: {}", e),
+    }
+}
+
+// A variant optimized for invocation with a static test vector.
+// This will fail (intentionally) when fed any dynamic tests, because
+// it is copying the static values out into a dynamic vector and cannot
+// copy dynamic values. It is doing this because from this point on
+// a ~[TestDescAndFn] is used in order to effect ownership-transfer
+// semantics into parallel test runners, which in turn requires a ~[]
+// rather than a &[].
+pub fn test_main_static(args: &[~str], tests: &[TestDescAndFn]) {
+    let owned_tests = tests.map(|t| {
+        match t.testfn {
+            StaticTestFn(f) =>
+            TestDescAndFn { testfn: StaticTestFn(f), desc: t.desc.clone() },
+
+            StaticBenchFn(f) =>
+            TestDescAndFn { testfn: StaticBenchFn(f), desc: t.desc.clone() },
+
+            _ => {
+                fail!("non-static tests passed to test::test_main_static");
+            }
+        }
+    });
+    test_main(args, owned_tests)
+}
+
+pub struct TestOpts {
+    filter: Option<~str>,
+    run_ignored: bool,
+    run_tests: bool,
+    run_benchmarks: bool,
+    ratchet_metrics: Option<Path>,
+    ratchet_noise_percent: Option<f64>,
+    save_metrics: Option<Path>,
+    test_shard: Option<(uint,uint)>,
+    logfile: Option<Path>
+}
+
+/// Result of parsing the options.
+pub type OptRes = Result<TestOpts, ~str>;
+
+fn optgroups() -> ~[getopts::OptGroup] {
+    ~[getopts::optflag("", "ignored", "Run ignored tests"),
+      getopts::optflag("", "test", "Run tests and not benchmarks"),
+      getopts::optflag("", "bench", "Run benchmarks instead of tests"),
+      getopts::optflag("h", "help", "Display this message (longer with --help)"),
+      getopts::optopt("", "save-metrics", "Location to save bench metrics",
+                     "PATH"),
+      getopts::optopt("", "ratchet-metrics",
+                     "Location to load and save metrics from. The metrics \
+                      loaded are cause benchmarks to fail if they run too \
+                      slowly", "PATH"),
+      getopts::optopt("", "ratchet-noise-percent",
+                     "Tests within N% of the recorded metrics will be \
+                      considered as passing", "PERCENTAGE"),
+      getopts::optopt("", "logfile", "Write logs to the specified file instead \
+                          of stdout", "PATH"),
+      getopts::optopt("", "test-shard", "run shard A, of B shards, worth of the testsuite",
+                     "A.B")]
+}
+
+fn usage(binary: &str, helpstr: &str) {
+    let message = format!("Usage: {} [OPTIONS] [FILTER]", binary);
+    println!("{}", getopts::usage(message, optgroups()));
+    println!("");
+    if helpstr == "help" {
+        println!("{}", "\
+The FILTER is matched against the name of all tests to run, and if any tests
+have a substring match, only those tests are run.
+
+By default, all tests are run in parallel. This can be altered with the
+RUST_TEST_TASKS environment variable when running tests (set it to 1).
+
+Test Attributes:
+
+    #[test]        - Indicates a function is a test to be run. This function
+                     takes no arguments.
+    #[bench]       - Indicates a function is a benchmark to be run. This
+                     function takes one argument (test::BenchHarness).
+    #[should_fail] - This function (also labeled with #[test]) will only pass if
+                     the code causes a failure (an assertion failure or fail!)
+    #[ignore]      - When applied to a function which is already attributed as a
+                     test, then the test runner will ignore these tests during
+                     normal test runs. Running with --ignored will run these
+                     tests. This may also be written as #[ignore(cfg(...))] to
+                     ignore the test on certain configurations.");
+    }
+}
+
+// Parses command line arguments into test options
+pub fn parse_opts(args: &[~str]) -> Option<OptRes> {
+    let args_ = args.tail();
+    let matches =
+        match getopts::getopts(args_, optgroups()) {
+          Ok(m) => m,
+          Err(f) => return Some(Err(f.to_err_msg()))
+        };
+
+    if matches.opt_present("h") { usage(args[0], "h"); return None; }
+    if matches.opt_present("help") { usage(args[0], "help"); return None; }
+
+    let filter =
+        if matches.free.len() > 0 {
+            Some((matches).free[0].clone())
+        } else {
+            None
+        };
+
+    let run_ignored = matches.opt_present("ignored");
+
+    let logfile = matches.opt_str("logfile");
+    let logfile = logfile.map(|s| Path::new(s));
+
+    let run_benchmarks = matches.opt_present("bench");
+    let run_tests = ! run_benchmarks ||
+        matches.opt_present("test");
+
+    let ratchet_metrics = matches.opt_str("ratchet-metrics");
+    let ratchet_metrics = ratchet_metrics.map(|s| Path::new(s));
+
+    let ratchet_noise_percent = matches.opt_str("ratchet-noise-percent");
+    let ratchet_noise_percent = ratchet_noise_percent.map(|s| from_str::<f64>(s).unwrap());
+
+    let save_metrics = matches.opt_str("save-metrics");
+    let save_metrics = save_metrics.map(|s| Path::new(s));
+
+    let test_shard = matches.opt_str("test-shard");
+    let test_shard = opt_shard(test_shard);
+
+    let test_opts = TestOpts {
+        filter: filter,
+        run_ignored: run_ignored,
+        run_tests: run_tests,
+        run_benchmarks: run_benchmarks,
+        ratchet_metrics: ratchet_metrics,
+        ratchet_noise_percent: ratchet_noise_percent,
+        save_metrics: save_metrics,
+        test_shard: test_shard,
+        logfile: logfile
+    };
+
+    Some(Ok(test_opts))
+}
+
+pub fn opt_shard(maybestr: Option<~str>) -> Option<(uint,uint)> {
+    match maybestr {
+        None => None,
+        Some(s) => {
+            let vector = s.split('.').to_owned_vec();
+            if vector.len() == 2 {
+                match (from_str::<uint>(vector[0]),
+                       from_str::<uint>(vector[1])) {
+                    (Some(a), Some(b)) => Some((a, b)),
+                    _ => None
+                }
+            } else {
+                None
+            }
+        }
+    }
+}
+
+
+#[deriving(Clone, Eq)]
+pub struct BenchSamples {
+    priv ns_iter_summ: stats::Summary,
+    priv mb_s: uint
+}
+
+#[deriving(Clone, Eq)]
+pub enum TestResult {
+    TrOk,
+    TrFailed,
+    TrIgnored,
+    TrMetrics(MetricMap),
+    TrBench(BenchSamples),
+}
+
+enum OutputLocation<T> {
+    Pretty(term::Terminal<T>),
+    Raw(T),
+}
+
+struct ConsoleTestState<T> {
+    log_out: Option<File>,
+    out: OutputLocation<T>,
+    use_color: bool,
+    total: uint,
+    passed: uint,
+    failed: uint,
+    ignored: uint,
+    measured: uint,
+    metrics: MetricMap,
+    failures: ~[(TestDesc, ~[u8])],
+    max_name_len: uint, // number of columns to fill when aligning names
+}
+
+impl<T: Writer> ConsoleTestState<T> {
+    pub fn new(opts: &TestOpts,
+               _: Option<T>) -> io::IoResult<ConsoleTestState<StdWriter>> {
+        let log_out = match opts.logfile {
+            Some(ref path) => Some(if_ok!(File::create(path))),
+            None => None
+        };
+        let out = match term::Terminal::new(io::stdout()) {
+            Err(_) => Raw(io::stdout()),
+            Ok(t) => Pretty(t)
+        };
+        Ok(ConsoleTestState {
+            out: out,
+            log_out: log_out,
+            use_color: use_color(),
+            total: 0u,
+            passed: 0u,
+            failed: 0u,
+            ignored: 0u,
+            measured: 0u,
+            metrics: MetricMap::new(),
+            failures: ~[],
+            max_name_len: 0u,
+        })
+    }
+
+    pub fn write_ok(&mut self) -> io::IoResult<()> {
+        self.write_pretty("ok", term::color::GREEN)
+    }
+
+    pub fn write_failed(&mut self) -> io::IoResult<()> {
+        self.write_pretty("FAILED", term::color::RED)
+    }
+
+    pub fn write_ignored(&mut self) -> io::IoResult<()> {
+        self.write_pretty("ignored", term::color::YELLOW)
+    }
+
+    pub fn write_metric(&mut self) -> io::IoResult<()> {
+        self.write_pretty("metric", term::color::CYAN)
+    }
+
+    pub fn write_bench(&mut self) -> io::IoResult<()> {
+        self.write_pretty("bench", term::color::CYAN)
+    }
+
+    pub fn write_added(&mut self) -> io::IoResult<()> {
+        self.write_pretty("added", term::color::GREEN)
+    }
+
+    pub fn write_improved(&mut self) -> io::IoResult<()> {
+        self.write_pretty("improved", term::color::GREEN)
+    }
+
+    pub fn write_removed(&mut self) -> io::IoResult<()> {
+        self.write_pretty("removed", term::color::YELLOW)
+    }
+
+    pub fn write_regressed(&mut self) -> io::IoResult<()> {
+        self.write_pretty("regressed", term::color::RED)
+    }
+
+    pub fn write_pretty(&mut self,
+                        word: &str,
+                        color: term::color::Color) -> io::IoResult<()> {
+        match self.out {
+            Pretty(ref mut term) => {
+                if self.use_color {
+                    if_ok!(term.fg(color));
+                }
+                if_ok!(term.write(word.as_bytes()));
+                if self.use_color {
+                    if_ok!(term.reset());
+                }
+                Ok(())
+            }
+            Raw(ref mut stdout) => stdout.write(word.as_bytes())
+        }
+    }
+
+    pub fn write_plain(&mut self, s: &str) -> io::IoResult<()> {
+        match self.out {
+            Pretty(ref mut term) => term.write(s.as_bytes()),
+            Raw(ref mut stdout) => stdout.write(s.as_bytes())
+        }
+    }
+
+    pub fn write_run_start(&mut self, len: uint) -> io::IoResult<()> {
+        self.total = len;
+        let noun = if len != 1 { &"tests" } else { &"test" };
+        self.write_plain(format!("\nrunning {} {}\n", len, noun))
+    }
+
+    pub fn write_test_start(&mut self, test: &TestDesc,
+                            align: NamePadding) -> io::IoResult<()> {
+        let name = test.padded_name(self.max_name_len, align);
+        self.write_plain(format!("test {} ... ", name))
+    }
+
+    pub fn write_result(&mut self, result: &TestResult) -> io::IoResult<()> {
+        if_ok!(match *result {
+            TrOk => self.write_ok(),
+            TrFailed => self.write_failed(),
+            TrIgnored => self.write_ignored(),
+            TrMetrics(ref mm) => {
+                if_ok!(self.write_metric());
+                self.write_plain(format!(": {}", fmt_metrics(mm)))
+            }
+            TrBench(ref bs) => {
+                if_ok!(self.write_bench());
+                self.write_plain(format!(": {}", fmt_bench_samples(bs)))
+            }
+        });
+        self.write_plain("\n")
+    }
+
+    pub fn write_log(&mut self, test: &TestDesc,
+                     result: &TestResult) -> io::IoResult<()> {
+        match self.log_out {
+            None => Ok(()),
+            Some(ref mut o) => {
+                let s = format!("{} {}\n", match *result {
+                        TrOk => ~"ok",
+                        TrFailed => ~"failed",
+                        TrIgnored => ~"ignored",
+                        TrMetrics(ref mm) => fmt_metrics(mm),
+                        TrBench(ref bs) => fmt_bench_samples(bs)
+                    }, test.name.to_str());
+                o.write(s.as_bytes())
+            }
+        }
+    }
+
+    pub fn write_failures(&mut self) -> io::IoResult<()> {
+        if_ok!(self.write_plain("\nfailures:\n"));
+        let mut failures = ~[];
+        let mut fail_out  = ~"";
+        for &(ref f, ref stdout) in self.failures.iter() {
+            failures.push(f.name.to_str());
+            if stdout.len() > 0 {
+                fail_out.push_str(format!("---- {} stdout ----\n\t",
+                                  f.name.to_str()));
+                let output = str::from_utf8_lossy(*stdout);
+                fail_out.push_str(output.as_slice().replace("\n", "\n\t"));
+                fail_out.push_str("\n");
+            }
+        }
+        if fail_out.len() > 0 {
+            if_ok!(self.write_plain("\n"));
+            if_ok!(self.write_plain(fail_out));
+        }
+
+        if_ok!(self.write_plain("\nfailures:\n"));
+        failures.sort();
+        for name in failures.iter() {
+            if_ok!(self.write_plain(format!("    {}\n", name.to_str())));
+        }
+        Ok(())
+    }
+
+    pub fn write_metric_diff(&mut self, diff: &MetricDiff) -> io::IoResult<()> {
+        let mut noise = 0;
+        let mut improved = 0;
+        let mut regressed = 0;
+        let mut added = 0;
+        let mut removed = 0;
+
+        for (k, v) in diff.iter() {
+            match *v {
+                LikelyNoise => noise += 1,
+                MetricAdded => {
+                    added += 1;
+                    if_ok!(self.write_added());
+                    if_ok!(self.write_plain(format!(": {}\n", *k)));
+                }
+                MetricRemoved => {
+                    removed += 1;
+                    if_ok!(self.write_removed());
+                    if_ok!(self.write_plain(format!(": {}\n", *k)));
+                }
+                Improvement(pct) => {
+                    improved += 1;
+                    if_ok!(self.write_plain(format!(": {}", *k)));
+                    if_ok!(self.write_improved());
+                    if_ok!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
+                }
+                Regression(pct) => {
+                    regressed += 1;
+                    if_ok!(self.write_plain(format!(": {}", *k)));
+                    if_ok!(self.write_regressed());
+                    if_ok!(self.write_plain(format!(" by {:.2f}%\n", pct as f64)));
+                }
+            }
+        }
+        if_ok!(self.write_plain(format!("result of ratchet: {} metrics added, \
+                                        {} removed, {} improved, {} regressed, \
+                                        {} noise\n",
+                                       added, removed, improved, regressed,
+                                       noise)));
+        if regressed == 0 {
+            if_ok!(self.write_plain("updated ratchet file\n"));
+        } else {
+            if_ok!(self.write_plain("left ratchet file untouched\n"));
+        }
+        Ok(())
+    }
+
+    pub fn write_run_finish(&mut self,
+                            ratchet_metrics: &Option<Path>,
+                            ratchet_pct: Option<f64>) -> io::IoResult<bool> {
+        assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
+
+        let ratchet_success = match *ratchet_metrics {
+            None => true,
+            Some(ref pth) => {
+                if_ok!(self.write_plain(format!("\nusing metrics ratcher: {}\n",
+                                        pth.display())));
+                match ratchet_pct {
+                    None => (),
+                    Some(pct) =>
+                        if_ok!(self.write_plain(format!("with noise-tolerance \
+                                                         forced to: {}%\n",
+                                                        pct)))
+                }
+                let (diff, ok) = self.metrics.ratchet(pth, ratchet_pct);
+                if_ok!(self.write_metric_diff(&diff));
+                ok
+            }
+        };
+
+        let test_success = self.failed == 0u;
+        if !test_success {
+            if_ok!(self.write_failures());
+        }
+
+        let success = ratchet_success && test_success;
+
+        if_ok!(self.write_plain("\ntest result: "));
+        if success {
+            // There's no parallelism at this point so it's safe to use color
+            if_ok!(self.write_ok());
+        } else {
+            if_ok!(self.write_failed());
+        }
+        let s = format!(". {} passed; {} failed; {} ignored; {} measured\n\n",
+                        self.passed, self.failed, self.ignored, self.measured);
+        if_ok!(self.write_plain(s));
+        return Ok(success);
+    }
+}
+
+pub fn fmt_metrics(mm: &MetricMap) -> ~str {
+    let MetricMap(ref mm) = *mm;
+    let v : ~[~str] = mm.iter()
+        .map(|(k,v)| format!("{}: {} (+/- {})",
+                          *k,
+                          v.value as f64,
+                          v.noise as f64))
+        .collect();
+    v.connect(", ")
+}
+
+pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
+    if bs.mb_s != 0 {
+        format!("{:>9} ns/iter (+/- {}) = {} MB/s",
+             bs.ns_iter_summ.median as uint,
+             (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint,
+             bs.mb_s)
+    } else {
+        format!("{:>9} ns/iter (+/- {})",
+             bs.ns_iter_summ.median as uint,
+             (bs.ns_iter_summ.max - bs.ns_iter_summ.min) as uint)
+    }
+}
+
+// A simple console test runner
+pub fn run_tests_console(opts: &TestOpts,
+                         tests: ~[TestDescAndFn]) -> io::IoResult<bool> {
+    fn callback<T: Writer>(event: &TestEvent,
+                           st: &mut ConsoleTestState<T>) -> io::IoResult<()> {
+        debug!("callback(event={:?})", event);
+        match (*event).clone() {
+            TeFiltered(ref filtered_tests) => st.write_run_start(filtered_tests.len()),
+            TeWait(ref test, padding) => st.write_test_start(test, padding),
+            TeResult(test, result, stdout) => {
+                if_ok!(st.write_log(&test, &result));
+                if_ok!(st.write_result(&result));
+                match result {
+                    TrOk => st.passed += 1,
+                    TrIgnored => st.ignored += 1,
+                    TrMetrics(mm) => {
+                        let tname = test.name.to_str();
+                        let MetricMap(mm) = mm;
+                        for (k,v) in mm.iter() {
+                            st.metrics.insert_metric(tname + "." + *k,
+                                                     v.value, v.noise);
+                        }
+                        st.measured += 1
+                    }
+                    TrBench(bs) => {
+                        st.metrics.insert_metric(test.name.to_str(),
+                                                 bs.ns_iter_summ.median,
+                                                 bs.ns_iter_summ.max - bs.ns_iter_summ.min);
+                        st.measured += 1
+                    }
+                    TrFailed => {
+                        st.failed += 1;
+                        st.failures.push((test, stdout));
+                    }
+                }
+                Ok(())
+            }
+        }
+    }
+    let mut st = if_ok!(ConsoleTestState::new(opts, None::<StdWriter>));
+    fn len_if_padded(t: &TestDescAndFn) -> uint {
+        match t.testfn.padding() {
+            PadNone => 0u,
+            PadOnLeft | PadOnRight => t.desc.name.to_str().len(),
+        }
+    }
+    match tests.iter().max_by(|t|len_if_padded(*t)) {
+        Some(t) => {
+            let n = t.desc.name.to_str();
+            debug!("Setting max_name_len from: {}", n);
+            st.max_name_len = n.len();
+        },
+        None => {}
+    }
+    if_ok!(run_tests(opts, tests, |x| callback(&x, &mut st)));
+    match opts.save_metrics {
+        None => (),
+        Some(ref pth) => {
+            if_ok!(st.metrics.save(pth));
+            if_ok!(st.write_plain(format!("\nmetrics saved to: {}",
+                                          pth.display())));
+        }
+    }
+    return st.write_run_finish(&opts.ratchet_metrics, opts.ratchet_noise_percent);
+}
+
+#[test]
+fn should_sort_failures_before_printing_them() {
+    use std::io::MemWriter;
+    use std::str;
+
+    let test_a = TestDesc {
+        name: StaticTestName("a"),
+        ignore: false,
+        should_fail: false
+    };
+
+    let test_b = TestDesc {
+        name: StaticTestName("b"),
+        ignore: false,
+        should_fail: false
+    };
+
+    let mut st = ConsoleTestState {
+        log_out: None,
+        out: Raw(MemWriter::new()),
+        use_color: false,
+        total: 0u,
+        passed: 0u,
+        failed: 0u,
+        ignored: 0u,
+        measured: 0u,
+        max_name_len: 10u,
+        metrics: MetricMap::new(),
+        failures: ~[(test_b, ~[]), (test_a, ~[])]
+    };
+
+    st.write_failures().unwrap();
+    let s = match st.out {
+        Raw(ref m) => str::from_utf8_lossy(m.get_ref()),
+        Pretty(_) => unreachable!()
+    };
+
+    let apos = s.as_slice().find_str("a").unwrap();
+    let bpos = s.as_slice().find_str("b").unwrap();
+    assert!(apos < bpos);
+}
+
+fn use_color() -> bool { return get_concurrency() == 1; }
+
+#[deriving(Clone)]
+enum TestEvent {
+    TeFiltered(~[TestDesc]),
+    TeWait(TestDesc, NamePadding),
+    TeResult(TestDesc, TestResult, ~[u8] /* stdout */),
+}
+
+pub type MonitorMsg = (TestDesc, TestResult, ~[u8] /* stdout */);
+
+fn run_tests(opts: &TestOpts,
+             tests: ~[TestDescAndFn],
+             callback: |e: TestEvent| -> io::IoResult<()>) -> io::IoResult<()> {
+    let filtered_tests = filter_tests(opts, tests);
+    let filtered_descs = filtered_tests.map(|t| t.desc.clone());
+
+    if_ok!(callback(TeFiltered(filtered_descs)));
+
+    let (filtered_tests, filtered_benchs_and_metrics) =
+        filtered_tests.partition(|e| {
+            match e.testfn {
+                StaticTestFn(_) | DynTestFn(_) => true,
+                _ => false
+            }
+        });
+
+    // It's tempting to just spawn all the tests at once, but since we have
+    // many tests that run in other processes we would be making a big mess.
+    let concurrency = get_concurrency();
+    debug!("using {} test tasks", concurrency);
+
+    let mut remaining = filtered_tests;
+    remaining.reverse();
+    let mut pending = 0;
+
+    let (p, ch) = Chan::<MonitorMsg>::new();
+
+    while pending > 0 || !remaining.is_empty() {
+        while pending < concurrency && !remaining.is_empty() {
+            let test = remaining.pop().unwrap();
+            if concurrency == 1 {
+                // We are doing one test at a time so we can print the name
+                // of the test before we run it. Useful for debugging tests
+                // that hang forever.
+                if_ok!(callback(TeWait(test.desc.clone(), test.testfn.padding())));
+            }
+            run_test(!opts.run_tests, test, ch.clone());
+            pending += 1;
+        }
+
+        let (desc, result, stdout) = p.recv();
+        if concurrency != 1 {
+            if_ok!(callback(TeWait(desc.clone(), PadNone)));
+        }
+        if_ok!(callback(TeResult(desc, result, stdout)));
+        pending -= 1;
+    }
+
+    // All benchmarks run at the end, in serial.
+    // (this includes metric fns)
+    for b in filtered_benchs_and_metrics.move_iter() {
+        if_ok!(callback(TeWait(b.desc.clone(), b.testfn.padding())));
+        run_test(!opts.run_benchmarks, b, ch.clone());
+        let (test, result, stdout) = p.recv();
+        if_ok!(callback(TeResult(test, result, stdout)));
+    }
+    Ok(())
+}
+
+fn get_concurrency() -> uint {
+    use std::rt;
+    match os::getenv("RUST_TEST_TASKS") {
+        Some(s) => {
+            let opt_n: Option<uint> = FromStr::from_str(s);
+            match opt_n {
+                Some(n) if n > 0 => n,
+                _ => fail!("RUST_TEST_TASKS is `{}`, should be a positive integer.", s)
+            }
+        }
+        None => {
+            rt::default_sched_threads()
+        }
+    }
+}
+
+pub fn filter_tests(
+    opts: &TestOpts,
+    tests: ~[TestDescAndFn]) -> ~[TestDescAndFn]
+{
+    let mut filtered = tests;
+
+    // Remove tests that don't match the test filter
+    filtered = if opts.filter.is_none() {
+        filtered
+    } else {
+        let filter_str = match opts.filter {
+          Some(ref f) => (*f).clone(),
+          None => ~""
+        };
+
+        fn filter_fn(test: TestDescAndFn, filter_str: &str) ->
+            Option<TestDescAndFn> {
+            if test.desc.name.to_str().contains(filter_str) {
+                return Some(test);
+            } else {
+                return None;
+            }
+        }
+
+        filtered.move_iter().filter_map(|x| filter_fn(x, filter_str)).collect()
+    };
+
+    // Maybe pull out the ignored test and unignore them
+    filtered = if !opts.run_ignored {
+        filtered
+    } else {
+        fn filter(test: TestDescAndFn) -> Option<TestDescAndFn> {
+            if test.desc.ignore {
+                let TestDescAndFn {desc, testfn} = test;
+                Some(TestDescAndFn {
+                    desc: TestDesc {ignore: false, ..desc},
+                    testfn: testfn
+                })
+            } else {
+                None
+            }
+        };
+        filtered.move_iter().filter_map(|x| filter(x)).collect()
+    };
+
+    // Sort the tests alphabetically
+    filtered.sort_by(|t1, t2| t1.desc.name.to_str().cmp(&t2.desc.name.to_str()));
+
+    // Shard the remaining tests, if sharding requested.
+    match opts.test_shard {
+        None => filtered,
+        Some((a,b)) =>
+            filtered.move_iter().enumerate()
+            .filter(|&(i,_)| i % b == a)
+            .map(|(_,t)| t)
+            .to_owned_vec()
+    }
+}
+
+pub fn run_test(force_ignore: bool,
+                test: TestDescAndFn,
+                monitor_ch: Chan<MonitorMsg>) {
+
+    let TestDescAndFn {desc, testfn} = test;
+
+    if force_ignore || desc.ignore {
+        monitor_ch.send((desc, TrIgnored, ~[]));
+        return;
+    }
+
+    fn run_test_inner(desc: TestDesc,
+                      monitor_ch: Chan<MonitorMsg>,
+                      testfn: proc()) {
+        spawn(proc() {
+            let (p, c) = Chan::new();
+            let mut reader = PortReader::new(p);
+            let stdout = ChanWriter::new(c.clone());
+            let stderr = ChanWriter::new(c);
+            let mut task = task::task().named(match desc.name {
+                DynTestName(ref name) => name.clone().into_maybe_owned(),
+                StaticTestName(name) => name.into_maybe_owned(),
+            });
+            task.opts.stdout = Some(~stdout as ~Writer);
+            task.opts.stderr = Some(~stderr as ~Writer);
+            let result_future = task.future_result();
+            task.spawn(testfn);
+
+            let stdout = reader.read_to_end().unwrap();
+            let task_result = result_future.recv();
+            let test_result = calc_result(&desc, task_result.is_ok());
+            monitor_ch.send((desc.clone(), test_result, stdout));
+        })
+    }
+
+    match testfn {
+        DynBenchFn(bencher) => {
+            let bs = ::bench::benchmark(|harness| bencher.run(harness));
+            monitor_ch.send((desc, TrBench(bs), ~[]));
+            return;
+        }
+        StaticBenchFn(benchfn) => {
+            let bs = ::bench::benchmark(|harness| benchfn(harness));
+            monitor_ch.send((desc, TrBench(bs), ~[]));
+            return;
+        }
+        DynMetricFn(f) => {
+            let mut mm = MetricMap::new();
+            f(&mut mm);
+            monitor_ch.send((desc, TrMetrics(mm), ~[]));
+            return;
+        }
+        StaticMetricFn(f) => {
+            let mut mm = MetricMap::new();
+            f(&mut mm);
+            monitor_ch.send((desc, TrMetrics(mm), ~[]));
+            return;
+        }
+        DynTestFn(f) => run_test_inner(desc, monitor_ch, f),
+        StaticTestFn(f) => run_test_inner(desc, monitor_ch, proc() f())
+    }
+}
+
+fn calc_result(desc: &TestDesc, task_succeeded: bool) -> TestResult {
+    if task_succeeded {
+        if desc.should_fail { TrFailed }
+        else { TrOk }
+    } else {
+        if desc.should_fail { TrOk }
+        else { TrFailed }
+    }
+}
+
+
+impl ToJson for Metric {
+    fn to_json(&self) -> json::Json {
+        let mut map = ~TreeMap::new();
+        map.insert(~"value", json::Number(self.value));
+        map.insert(~"noise", json::Number(self.noise));
+        json::Object(map)
+    }
+}
+
+impl MetricMap {
+
+    pub fn new() -> MetricMap {
+        MetricMap(TreeMap::new())
+    }
+
+    /// Load MetricDiff from a file.
+    ///
+    /// # Failure
+    ///
+    /// This function will fail if the path does not exist or the path does not
+    /// contain a valid metric map.
+    pub fn load(p: &Path) -> MetricMap {
+        assert!(p.exists());
+        let mut f = File::open(p).unwrap();
+        let value = json::from_reader(&mut f as &mut io::Reader).unwrap();
+        let mut decoder = json::Decoder::new(value);
+        MetricMap(Decodable::decode(&mut decoder))
+    }
+
+    /// Write MetricDiff to a file.
+    pub fn save(&self, p: &Path) -> io::IoResult<()> {
+        let mut file = if_ok!(File::create(p));
+        let MetricMap(ref map) = *self;
+        map.to_json().to_pretty_writer(&mut file)
+    }
+
+    /// Compare against another MetricMap. Optionally compare all
+    /// measurements in the maps using the provided `noise_pct` as a
+    /// percentage of each value to consider noise. If `None`, each
+    /// measurement's noise threshold is independently chosen as the
+    /// maximum of that measurement's recorded noise quantity in either
+    /// map.
+    pub fn compare_to_old(&self, old: &MetricMap,
+                          noise_pct: Option<f64>) -> MetricDiff {
+        let mut diff : MetricDiff = TreeMap::new();
+        let MetricMap(ref selfmap) = *self;
+        let MetricMap(ref old) = *old;
+        for (k, vold) in old.iter() {
+            let r = match selfmap.find(k) {
+                None => MetricRemoved,
+                Some(v) => {
+                    let delta = (v.value - vold.value);
+                    let noise = match noise_pct {
+                        None => f64::max(vold.noise.abs(), v.noise.abs()),
+                        Some(pct) => vold.value * pct / 100.0
+                    };
+                    if delta.abs() <= noise {
+                        LikelyNoise
+                    } else {
+                        let pct = delta.abs() / cmp::max(vold.value, f64::EPSILON) * 100.0;
+                        if vold.noise < 0.0 {
+                            // When 'noise' is negative, it means we want
+                            // to see deltas that go up over time, and can
+                            // only tolerate slight negative movement.
+                            if delta < 0.0 {
+                                Regression(pct)
+                            } else {
+                                Improvement(pct)
+                            }
+                        } else {
+                            // When 'noise' is positive, it means we want
+                            // to see deltas that go down over time, and
+                            // can only tolerate slight positive movements.
+                            if delta < 0.0 {
+                                Improvement(pct)
+                            } else {
+                                Regression(pct)
+                            }
+                        }
+                    }
+                }
+            };
+            diff.insert((*k).clone(), r);
+        }
+        let MetricMap(ref map) = *self;
+        for (k, _) in map.iter() {
+            if !diff.contains_key(k) {
+                diff.insert((*k).clone(), MetricAdded);
+            }
+        }
+        diff
+    }
+
+    /// Insert a named `value` (+/- `noise`) metric into the map. The value
+    /// must be non-negative. The `noise` indicates the uncertainty of the
+    /// metric, which doubles as the "noise range" of acceptable
+    /// pairwise-regressions on this named value, when comparing from one
+    /// metric to the next using `compare_to_old`.
+    ///
+    /// If `noise` is positive, then it means this metric is of a value
+    /// you want to see grow smaller, so a change larger than `noise` in the
+    /// positive direction represents a regression.
+    ///
+    /// If `noise` is negative, then it means this metric is of a value
+    /// you want to see grow larger, so a change larger than `noise` in the
+    /// negative direction represents a regression.
+    pub fn insert_metric(&mut self, name: &str, value: f64, noise: f64) {
+        let m = Metric {
+            value: value,
+            noise: noise
+        };
+        let MetricMap(ref mut map) = *self;
+        map.insert(name.to_owned(), m);
+    }
+
+    /// Attempt to "ratchet" an external metric file. This involves loading
+    /// metrics from a metric file (if it exists), comparing against
+    /// the metrics in `self` using `compare_to_old`, and rewriting the
+    /// file to contain the metrics in `self` if none of the
+    /// `MetricChange`s are `Regression`. Returns the diff as well
+    /// as a boolean indicating whether the ratchet succeeded.
+    pub fn ratchet(&self, p: &Path, pct: Option<f64>) -> (MetricDiff, bool) {
+        let old = if p.exists() {
+            MetricMap::load(p)
+        } else {
+            MetricMap::new()
+        };
+
+        let diff : MetricDiff = self.compare_to_old(&old, pct);
+        let ok = diff.iter().all(|(_, v)| {
+            match *v {
+                Regression(_) => false,
+                _ => true
+            }
+        });
+
+        if ok {
+            debug!("rewriting file '{:?}' with updated metrics", p);
+            self.save(p).unwrap();
+        }
+        return (diff, ok)
+    }
+}
+
+
+// Benchmarking
+
+/// A function that is opaque to the optimizer, to allow benchmarks to
+/// pretend to use outputs to assist in avoiding dead-code
+/// elimination.
+///
+/// This function is a no-op, and does not even read from `dummy`.
+pub fn black_box<T>(dummy: T) {
+    // we need to "use" the argument in some way LLVM can't
+    // introspect.
+    unsafe {asm!("" : : "r"(&dummy))}
+}
+
+
+impl BenchHarness {
+    /// Callback for benchmark functions to run in their body.
+    pub fn iter<T>(&mut self, inner: || -> T) {
+        self.ns_start = precise_time_ns();
+        let k = self.iterations;
+        for _ in range(0u64, k) {
+            black_box(inner());
+        }
+        self.ns_end = precise_time_ns();
+    }
+
+    pub fn ns_elapsed(&mut self) -> u64 {
+        if self.ns_start == 0 || self.ns_end == 0 {
+            0
+        } else {
+            self.ns_end - self.ns_start
+        }
+    }
+
+    pub fn ns_per_iter(&mut self) -> u64 {
+        if self.iterations == 0 {
+            0
+        } else {
+            self.ns_elapsed() / cmp::max(self.iterations, 1)
+        }
+    }
+
+    pub fn bench_n(&mut self, n: u64, f: |&mut BenchHarness|) {
+        self.iterations = n;
+        debug!("running benchmark for {} iterations",
+               n as uint);
+        f(self);
+    }
+
+    // This is a more statistics-driven benchmark algorithm
+    pub fn auto_bench(&mut self, f: |&mut BenchHarness|) -> stats::Summary {
+
+        // Initial bench run to get ballpark figure.
+        let mut n = 1_u64;
+        self.bench_n(n, |x| f(x));
+
+        // Try to estimate iter count for 1ms falling back to 1m
+        // iterations if first run took < 1ns.
+        if self.ns_per_iter() == 0 {
+            n = 1_000_000;
+        } else {
+            n = 1_000_000 / cmp::max(self.ns_per_iter(), 1);
+        }
+        // if the first run took more than 1ms we don't want to just
+        // be left doing 0 iterations on every loop. The unfortunate
+        // side effect of not being able to do as many runs is
+        // automatically handled by the statistical analysis below
+        // (i.e. larger error bars).
+        if n == 0 { n = 1; }
+
+        debug!("Initial run took {} ns, iter count that takes 1ms estimated as {}",
+               self.ns_per_iter(), n);
+
+        let mut total_run = 0;
+        let samples : &mut [f64] = [0.0_f64, ..50];
+        loop {
+            let loop_start = precise_time_ns();
+
+            for p in samples.mut_iter() {
+                self.bench_n(n, |x| f(x));
+                *p = self.ns_per_iter() as f64;
+            };
+
+            stats::winsorize(samples, 5.0);
+            let summ = stats::Summary::new(samples);
+
+            for p in samples.mut_iter() {
+                self.bench_n(5 * n, |x| f(x));
+                *p = self.ns_per_iter() as f64;
+            };
+
+            stats::winsorize(samples, 5.0);
+            let summ5 = stats::Summary::new(samples);
+
+            debug!("{} samples, median {}, MAD={}, MADP={}",
+                   samples.len(),
+                   summ.median as f64,
+                   summ.median_abs_dev as f64,
+                   summ.median_abs_dev_pct as f64);
+
+            let now = precise_time_ns();
+            let loop_run = now - loop_start;
+
+            // If we've run for 100ms and seem to have converged to a
+            // stable median.
+            if loop_run > 100_000_000 &&
+                summ.median_abs_dev_pct < 1.0 &&
+                summ.median - summ5.median < summ5.median_abs_dev {
+                return summ5;
+            }
+
+            total_run += loop_run;
+            // Longest we ever run for is 3s.
+            if total_run > 3_000_000_000 {
+                return summ5;
+            }
+
+            n *= 2;
+        }
+    }
+}
+
+pub mod bench {
+    use std::cmp;
+    use super::{BenchHarness, BenchSamples};
+
+    pub fn benchmark(f: |&mut BenchHarness|) -> BenchSamples {
+        let mut bs = BenchHarness {
+            iterations: 0,
+            ns_start: 0,
+            ns_end: 0,
+            bytes: 0
+        };
+
+        let ns_iter_summ = bs.auto_bench(f);
+
+        let ns_iter = cmp::max(ns_iter_summ.median as u64, 1);
+        let iter_s = 1_000_000_000 / ns_iter;
+        let mb_s = (bs.bytes * iter_s) / 1_000_000;
+
+        BenchSamples {
+            ns_iter_summ: ns_iter_summ,
+            mb_s: mb_s as uint
+        }
+    }
+}
+
+#[cfg(test)]
+mod tests {
+    use test::{TrFailed, TrIgnored, TrOk, filter_tests, parse_opts,
+               TestDesc, TestDescAndFn, TestOpts, run_test,
+               Metric, MetricMap, MetricAdded, MetricRemoved,
+               Improvement, Regression, LikelyNoise,
+               StaticTestName, DynTestName, DynTestFn};
+    use extra::tempfile::TempDir;
+
+    #[test]
+    pub fn do_not_run_ignored_tests() {
+        fn f() { fail!(); }
+        let desc = TestDescAndFn {
+            desc: TestDesc {
+                name: StaticTestName("whatever"),
+                ignore: true,
+                should_fail: false
+            },
+            testfn: DynTestFn(proc() f()),
+        };
+        let (p, ch) = Chan::new();
+        run_test(false, desc, ch);
+        let (_, res, _) = p.recv();
+        assert!(res != TrOk);
+    }
+
+    #[test]
+    pub fn ignored_tests_result_in_ignored() {
+        fn f() { }
+        let desc = TestDescAndFn {
+            desc: TestDesc {
+                name: StaticTestName("whatever"),
+                ignore: true,
+                should_fail: false
+            },
+            testfn: DynTestFn(proc() f()),
+        };
+        let (p, ch) = Chan::new();
+        run_test(false, desc, ch);
+        let (_, res, _) = p.recv();
+        assert_eq!(res, TrIgnored);
+    }
+
+    #[test]
+    fn test_should_fail() {
+        fn f() { fail!(); }
+        let desc = TestDescAndFn {
+            desc: TestDesc {
+                name: StaticTestName("whatever"),
+                ignore: false,
+                should_fail: true
+            },
+            testfn: DynTestFn(proc() f()),
+        };
+        let (p, ch) = Chan::new();
+        run_test(false, desc, ch);
+        let (_, res, _) = p.recv();
+        assert_eq!(res, TrOk);
+    }
+
+    #[test]
+    fn test_should_fail_but_succeeds() {
+        fn f() { }
+        let desc = TestDescAndFn {
+            desc: TestDesc {
+                name: StaticTestName("whatever"),
+                ignore: false,
+                should_fail: true
+            },
+            testfn: DynTestFn(proc() f()),
+        };
+        let (p, ch) = Chan::new();
+        run_test(false, desc, ch);
+        let (_, res, _) = p.recv();
+        assert_eq!(res, TrFailed);
+    }
+
+    #[test]
+    fn first_free_arg_should_be_a_filter() {
+        let args = ~[~"progname", ~"filter"];
+        let opts = match parse_opts(args) {
+            Some(Ok(o)) => o,
+            _ => fail!("Malformed arg in first_free_arg_should_be_a_filter")
+        };
+        assert!("filter" == opts.filter.clone().unwrap());
+    }
+
+    #[test]
+    fn parse_ignored_flag() {
+        let args = ~[~"progname", ~"filter", ~"--ignored"];
+        let opts = match parse_opts(args) {
+            Some(Ok(o)) => o,
+            _ => fail!("Malformed arg in parse_ignored_flag")
+        };
+        assert!((opts.run_ignored));
+    }
+
+    #[test]
+    pub fn filter_for_ignored_option() {
+        // When we run ignored tests the test filter should filter out all the
+        // unignored tests and flip the ignore flag on the rest to false
+
+        let opts = TestOpts {
+            filter: None,
+            run_ignored: true,
+            logfile: None,
+            run_tests: true,
+            run_benchmarks: false,
+            ratchet_noise_percent: None,
+            ratchet_metrics: None,
+            save_metrics: None,
+            test_shard: None
+        };
+
+        let tests = ~[
+            TestDescAndFn {
+                desc: TestDesc {
+                    name: StaticTestName("1"),
+                    ignore: true,
+                    should_fail: false,
+                },
+                testfn: DynTestFn(proc() {}),
+            },
+            TestDescAndFn {
+                desc: TestDesc {
+                    name: StaticTestName("2"),
+                    ignore: false,
+                    should_fail: false
+                },
+                testfn: DynTestFn(proc() {}),
+            },
+        ];
+        let filtered = filter_tests(&opts, tests);
+
+        assert_eq!(filtered.len(), 1);
+        assert_eq!(filtered[0].desc.name.to_str(), ~"1");
+        assert!(filtered[0].desc.ignore == false);
+    }
+
+    #[test]
+    pub fn sort_tests() {
+        let opts = TestOpts {
+            filter: None,
+            run_ignored: false,
+            logfile: None,
+            run_tests: true,
+            run_benchmarks: false,
+            ratchet_noise_percent: None,
+            ratchet_metrics: None,
+            save_metrics: None,
+            test_shard: None
+        };
+
+        let names =
+            ~[~"sha1::test", ~"int::test_to_str", ~"int::test_pow",
+             ~"test::do_not_run_ignored_tests",
+             ~"test::ignored_tests_result_in_ignored",
+             ~"test::first_free_arg_should_be_a_filter",
+             ~"test::parse_ignored_flag", ~"test::filter_for_ignored_option",
+             ~"test::sort_tests"];
+        let tests =
+        {
+            fn testfn() { }
+            let mut tests = ~[];
+            for name in names.iter() {
+                let test = TestDescAndFn {
+                    desc: TestDesc {
+                        name: DynTestName((*name).clone()),
+                        ignore: false,
+                        should_fail: false
+                    },
+                    testfn: DynTestFn(testfn),
+                };
+                tests.push(test);
+            }
+            tests
+        };
+        let filtered = filter_tests(&opts, tests);
+
+        let expected =
+            ~[~"int::test_pow", ~"int::test_to_str", ~"sha1::test",
+              ~"test::do_not_run_ignored_tests",
+              ~"test::filter_for_ignored_option",
+              ~"test::first_free_arg_should_be_a_filter",
+              ~"test::ignored_tests_result_in_ignored",
+              ~"test::parse_ignored_flag",
+              ~"test::sort_tests"];
+
+        for (a, b) in expected.iter().zip(filtered.iter()) {
+            assert!(*a == b.desc.name.to_str());
+        }
+    }
+
+    #[test]
+    pub fn test_metricmap_compare() {
+        let mut m1 = MetricMap::new();
+        let mut m2 = MetricMap::new();
+        m1.insert_metric("in-both-noise", 1000.0, 200.0);
+        m2.insert_metric("in-both-noise", 1100.0, 200.0);
+
+        m1.insert_metric("in-first-noise", 1000.0, 2.0);
+        m2.insert_metric("in-second-noise", 1000.0, 2.0);
+
+        m1.insert_metric("in-both-want-downwards-but-regressed", 1000.0, 10.0);
+        m2.insert_metric("in-both-want-downwards-but-regressed", 2000.0, 10.0);
+
+        m1.insert_metric("in-both-want-downwards-and-improved", 2000.0, 10.0);
+        m2.insert_metric("in-both-want-downwards-and-improved", 1000.0, 10.0);
+
+        m1.insert_metric("in-both-want-upwards-but-regressed", 2000.0, -10.0);
+        m2.insert_metric("in-both-want-upwards-but-regressed", 1000.0, -10.0);
+
+        m1.insert_metric("in-both-want-upwards-and-improved", 1000.0, -10.0);
+        m2.insert_metric("in-both-want-upwards-and-improved", 2000.0, -10.0);
+
+        let diff1 = m2.compare_to_old(&m1, None);
+
+        assert_eq!(*(diff1.find(&~"in-both-noise").unwrap()), LikelyNoise);
+        assert_eq!(*(diff1.find(&~"in-first-noise").unwrap()), MetricRemoved);
+        assert_eq!(*(diff1.find(&~"in-second-noise").unwrap()), MetricAdded);
+        assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").unwrap()),
+                   Regression(100.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").unwrap()),
+                   Improvement(50.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").unwrap()),
+                   Regression(50.0));
+        assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").unwrap()),
+                   Improvement(100.0));
+        assert_eq!(diff1.len(), 7);
+
+        let diff2 = m2.compare_to_old(&m1, Some(200.0));
+
+        assert_eq!(*(diff2.find(&~"in-both-noise").unwrap()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-first-noise").unwrap()), MetricRemoved);
+        assert_eq!(*(diff2.find(&~"in-second-noise").unwrap()), MetricAdded);
+        assert_eq!(*(diff2.find(&~"in-both-want-downwards-but-regressed").unwrap()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-both-want-downwards-and-improved").unwrap()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-both-want-upwards-but-regressed").unwrap()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"in-both-want-upwards-and-improved").unwrap()), LikelyNoise);
+        assert_eq!(diff2.len(), 7);
+    }
+
+    #[test]
+    pub fn ratchet_test() {
+
+        let dpth = TempDir::new("test-ratchet").expect("missing test for ratchet");
+        let pth = dpth.path().join("ratchet.json");
+
+        let mut m1 = MetricMap::new();
+        m1.insert_metric("runtime", 1000.0, 2.0);
+        m1.insert_metric("throughput", 50.0, 2.0);
+
+        let mut m2 = MetricMap::new();
+        m2.insert_metric("runtime", 1100.0, 2.0);
+        m2.insert_metric("throughput", 50.0, 2.0);
+
+        m1.save(&pth).unwrap();
+
+        // Ask for a ratchet that should fail to advance.
+        let (diff1, ok1) = m2.ratchet(&pth, None);
+        assert_eq!(ok1, false);
+        assert_eq!(diff1.len(), 2);
+        assert_eq!(*(diff1.find(&~"runtime").unwrap()), Regression(10.0));
+        assert_eq!(*(diff1.find(&~"throughput").unwrap()), LikelyNoise);
+
+        // Check that it was not rewritten.
+        let m3 = MetricMap::load(&pth);
+        let MetricMap(m3) = m3;
+        assert_eq!(m3.len(), 2);
+        assert_eq!(*(m3.find(&~"runtime").unwrap()), Metric::new(1000.0, 2.0));
+        assert_eq!(*(m3.find(&~"throughput").unwrap()), Metric::new(50.0, 2.0));
+
+        // Ask for a ratchet with an explicit noise-percentage override,
+        // that should advance.
+        let (diff2, ok2) = m2.ratchet(&pth, Some(10.0));
+        assert_eq!(ok2, true);
+        assert_eq!(diff2.len(), 2);
+        assert_eq!(*(diff2.find(&~"runtime").unwrap()), LikelyNoise);
+        assert_eq!(*(diff2.find(&~"throughput").unwrap()), LikelyNoise);
+
+        // Check that it was rewritten.
+        let m4 = MetricMap::load(&pth);
+        let MetricMap(m4) = m4;
+        assert_eq!(m4.len(), 2);
+        assert_eq!(*(m4.find(&~"runtime").unwrap()), Metric::new(1100.0, 2.0));
+        assert_eq!(*(m4.find(&~"throughput").unwrap()), Metric::new(50.0, 2.0));
+    }
+}
+
index 180838d41325c38d95a56d82f9c3f13e3f4c1c56..5941afb7d7573845d8ade0c2aec39bc355fa8114 100644 (file)
@@ -61,7 +61,7 @@ fn main() {
 
 // test harness access
 #[cfg(test)]
-extern crate extra;
+extern crate test;
 extern crate serialize;
 
 use std::str;
@@ -812,8 +812,9 @@ fn test_iterbytes_impl_for_uuid() {
 
 #[cfg(test)]
 mod bench {
+    extern crate test;
+    use self::test::BenchHarness;
     use super::Uuid;
-    use extra::test::BenchHarness;
 
     #[bench]
     pub fn create_uuids(bh: &mut BenchHarness) {