pub enum TestFn {
StaticTestFn(extern fn()),
StaticBenchFn(extern fn(&mut BenchHarness)),
+ StaticMetricFn(~fn(&mut MetricMap)),
DynTestFn(~fn()),
+ DynMetricFn(~fn(&mut MetricMap)),
DynBenchFn(~fn(&mut BenchHarness))
}
noise: f64
}
+#[deriving(Eq)]
pub struct MetricMap(TreeMap<~str,Metric>);
/// Analysis of a single change in metric
}
#[deriving(Eq)]
-pub enum TestResult { TrOk, TrFailed, TrIgnored, TrBench(BenchSamples) }
+pub enum TestResult {
+ TrOk,
+ TrFailed,
+ TrIgnored,
+ TrMetrics(MetricMap),
+ TrBench(BenchSamples)
+}
struct ConsoleTestState {
out: @io::Writer,
passed: uint,
failed: uint,
ignored: uint,
- benchmarked: uint,
+ measured: uint,
metrics: MetricMap,
failures: ~[TestDesc]
}
passed: 0u,
failed: 0u,
ignored: 0u,
- benchmarked: 0u,
+ measured: 0u,
metrics: MetricMap::new(),
failures: ~[]
}
self.write_pretty("ignored", term::color::YELLOW);
}
+ pub fn write_metric(&self) {
+ self.write_pretty("metric", term::color::CYAN);
+ }
+
pub fn write_bench(&self) {
self.write_pretty("bench", term::color::CYAN);
}
-
pub fn write_added(&self) {
self.write_pretty("added", term::color::GREEN);
}
TrOk => self.write_ok(),
TrFailed => self.write_failed(),
TrIgnored => self.write_ignored(),
+ TrMetrics(ref mm) => {
+ self.write_metric();
+ self.out.write_str(": " + fmt_metrics(mm));
+ }
TrBench(ref bs) => {
self.write_bench();
self.out.write_str(": " + fmt_bench_samples(bs))
TrOk => ~"ok",
TrFailed => ~"failed",
TrIgnored => ~"ignored",
+ TrMetrics(ref mm) => fmt_metrics(mm),
TrBench(ref bs) => fmt_bench_samples(bs)
}, test.name.to_str()));
}
pub fn write_run_finish(&self,
ratchet_metrics: &Option<Path>,
ratchet_pct: Option<f64>) -> bool {
- assert!(self.passed + self.failed + self.ignored + self.benchmarked == self.total);
+ assert!(self.passed + self.failed + self.ignored + self.measured == self.total);
let ratchet_success = match *ratchet_metrics {
None => true,
} else {
self.write_failed();
}
- self.out.write_str(fmt!(". %u passed; %u failed; %u ignored, %u benchmarked\n\n",
- self.passed, self.failed, self.ignored, self.benchmarked));
+ self.out.write_str(fmt!(". %u passed; %u failed; %u ignored; %u measured\n\n",
+ self.passed, self.failed, self.ignored, self.measured));
return success;
}
}
+pub fn fmt_metrics(mm: &MetricMap) -> ~str {
+ use std::iterator::IteratorUtil;
+ let v : ~[~str] = mm.iter()
+ .transform(|(k,v)| fmt!("%s: %f (+/- %f)",
+ *k,
+ v.value as float,
+ v.noise as float))
+ .collect();
+ v.connect(", ")
+}
+
pub fn fmt_bench_samples(bs: &BenchSamples) -> ~str {
if bs.mb_s != 0 {
fmt!("%u ns/iter (+/- %u) = %u MB/s",
match result {
TrOk => st.passed += 1,
TrIgnored => st.ignored += 1,
+ TrMetrics(mm) => {
+ let tname = test.name.to_str();
+ for mm.iter().advance() |(k,v)| {
+ st.metrics.insert_metric(tname + "." + *k,
+ v.value, v.noise);
+ }
+ st.measured += 1
+ }
TrBench(bs) => {
st.metrics.insert_metric(test.name.to_str(),
bs.ns_iter_summ.median,
bs.ns_iter_summ.max - bs.ns_iter_summ.min);
- st.benchmarked += 1
+ st.measured += 1
}
TrFailed => {
st.failed += 1;
passed: 0u,
failed: 0u,
ignored: 0u,
- benchmarked: 0u,
+ measured: 0u,
metrics: MetricMap::new(),
failures: ~[test_b, test_a]
};
callback(TeFiltered(filtered_descs));
- let (filtered_tests, filtered_benchs) =
+ let (filtered_tests, filtered_benchs_and_metrics) =
do filtered_tests.partition |e| {
match e.testfn {
StaticTestFn(_) | DynTestFn(_) => true,
- StaticBenchFn(_) | DynBenchFn(_) => false
+ _ => false
}
};
}
// All benchmarks run at the end, in serial.
- for filtered_benchs.consume_iter().advance |b| {
+ // (this includes metric fns)
+ for filtered_benchs_and_metrics.consume_iter().advance |b| {
callback(TeWait(copy b.desc));
run_test(!opts.run_benchmarks, b, ch.clone());
let (test, result) = p.recv();
monitor_ch.send((desc, TrBench(bs)));
return;
}
+ DynMetricFn(f) => {
+ let mut mm = MetricMap::new();
+ f(&mut mm);
+ monitor_ch.send((desc, TrMetrics(mm)));
+ return;
+ }
+ StaticMetricFn(f) => {
+ let mut mm = MetricMap::new();
+ f(&mut mm);
+ monitor_ch.send((desc, TrMetrics(mm)));
+ return;
+ }
DynTestFn(f) => run_test_inner(desc, monitor_ch, f),
StaticTestFn(f) => run_test_inner(desc, monitor_ch, || f())
}
impl MetricMap {
- fn new() -> MetricMap {
+ pub fn new() -> MetricMap {
MetricMap(TreeMap::new())
}
/// Load MetricDiff from a file.
- fn load(p: &Path) -> MetricMap {
+ pub fn load(p: &Path) -> MetricMap {
assert!(os::path_exists(p));
let f = io::file_reader(p).get();
let mut decoder = json::Decoder(json::from_reader(f).get());
None => f64::max(vold.noise.abs(), v.noise.abs()),
Some(pct) => vold.value * pct / 100.0
};
- if delta.abs() < noise {
+ if delta.abs() <= noise {
LikelyNoise
} else {
let pct = delta.abs() / vold.value * 100.0;
assert_eq!(*(diff1.find(&~"in-both-noise").get()), LikelyNoise);
assert_eq!(*(diff1.find(&~"in-first-noise").get()), MetricRemoved);
assert_eq!(*(diff1.find(&~"in-second-noise").get()), MetricAdded);
- assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").get()), Regression(100.0));
- assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").get()), Improvement(50.0));
- assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").get()), Regression(50.0));
- assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").get()), Improvement(100.0));
+ assert_eq!(*(diff1.find(&~"in-both-want-downwards-but-regressed").get()),
+ Regression(100.0));
+ assert_eq!(*(diff1.find(&~"in-both-want-downwards-and-improved").get()),
+ Improvement(50.0));
+ assert_eq!(*(diff1.find(&~"in-both-want-upwards-but-regressed").get()),
+ Regression(50.0));
+ assert_eq!(*(diff1.find(&~"in-both-want-upwards-and-improved").get()),
+ Improvement(100.0));
assert_eq!(diff1.len(), 7);
let diff2 = m2.compare_to_old(&m1, Some(200.0));