]> git.lizzy.rs Git - rust.git/blob - src/libtest/lib.rs
Rollup merge of #67637 - Mark-Simulacrum:primitive-mod, r=dtolnay
[rust.git] / src / libtest / lib.rs
1 //! Support code for rustc's built in unit-test and micro-benchmarking
2 //! framework.
3 //!
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
8 //!
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
10
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
14 // build off of.
15
16 // N.B., this is also specified in this crate's Cargo.toml, but libsyntax contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
19
20 #![crate_name = "test"]
21 #![unstable(feature = "test", issue = "50297")]
22 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
23 #![feature(asm)]
24 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
25 #![feature(rustc_private)]
26 #![feature(nll)]
27 #![feature(bool_to_option)]
28 #![feature(set_stdio)]
29 #![feature(panic_unwind)]
30 #![feature(staged_api)]
31 #![feature(termination_trait_lib)]
32 #![feature(test)]
33
34 // Public reexports
35 pub use self::bench::{black_box, Bencher};
36 pub use self::console::run_tests_console;
37 pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
38 pub use self::types::TestName::*;
39 pub use self::types::*;
40 pub use self::ColorConfig::*;
41 pub use cli::TestOpts;
42
43 // Module to be used by rustc to compile tests in libtest
44 pub mod test {
45     pub use crate::{
46         assert_test_result,
47         bench::Bencher,
48         cli::{parse_opts, TestOpts},
49         filter_tests,
50         helpers::metrics::{Metric, MetricMap},
51         options::{Options, RunIgnored, RunStrategy, ShouldPanic},
52         run_test, test_main, test_main_static,
53         test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk},
54         time::{TestExecTime, TestTimeOptions},
55         types::{
56             DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
57             TestDescAndFn, TestName, TestType,
58         },
59     };
60 }
61
62 use std::{
63     env, io,
64     io::prelude::Write,
65     panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo},
66     process::{self, Command, Termination},
67     sync::mpsc::{channel, Sender},
68     sync::{Arc, Mutex},
69     thread,
70     time::{Duration, Instant},
71 };
72
73 pub mod bench;
74 mod cli;
75 mod console;
76 mod event;
77 mod formatters;
78 mod helpers;
79 mod options;
80 pub mod stats;
81 mod test_result;
82 mod time;
83 mod types;
84
85 #[cfg(test)]
86 mod tests;
87
88 use event::{CompletedTest, TestEvent};
89 use helpers::concurrency::get_concurrency;
90 use helpers::exit_code::get_exit_code;
91 use helpers::sink::Sink;
92 use options::{Concurrent, RunStrategy};
93 use test_result::*;
94 use time::TestExecTime;
95
96 // Process exit code to be used to indicate test failures.
97 const ERROR_EXIT_CODE: i32 = 101;
98
99 const SECONDARY_TEST_INVOKER_VAR: &'static str = "__RUST_TEST_INVOKE";
100
101 // The default console test runner. It accepts the command line
102 // arguments and a vector of test_descs.
103 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
104     let mut opts = match cli::parse_opts(args) {
105         Some(Ok(o)) => o,
106         Some(Err(msg)) => {
107             eprintln!("error: {}", msg);
108             process::exit(ERROR_EXIT_CODE);
109         }
110         None => return,
111     };
112     if let Some(options) = options {
113         opts.options = options;
114     }
115     if opts.list {
116         if let Err(e) = console::list_tests_console(&opts, tests) {
117             eprintln!("error: io error when listing tests: {:?}", e);
118             process::exit(ERROR_EXIT_CODE);
119         }
120     } else {
121         match console::run_tests_console(&opts, tests) {
122             Ok(true) => {}
123             Ok(false) => process::exit(ERROR_EXIT_CODE),
124             Err(e) => {
125                 eprintln!("error: io error when listing tests: {:?}", e);
126                 process::exit(ERROR_EXIT_CODE);
127             }
128         }
129     }
130 }
131
132 /// A variant optimized for invocation with a static test vector.
133 /// This will panic (intentionally) when fed any dynamic tests.
134 ///
135 /// This is the entry point for the main function generated by `rustc --test`
136 /// when panic=unwind.
137 pub fn test_main_static(tests: &[&TestDescAndFn]) {
138     let args = env::args().collect::<Vec<_>>();
139     let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
140     test_main(&args, owned_tests, None)
141 }
142
143 /// A variant optimized for invocation with a static test vector.
144 /// This will panic (intentionally) when fed any dynamic tests.
145 ///
146 /// Runs tests in panic=abort mode, which involves spawning subprocesses for
147 /// tests.
148 ///
149 /// This is the entry point for the main function generated by `rustc --test`
150 /// when panic=abort.
151 pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
152     // If we're being run in SpawnedSecondary mode, run the test here. run_test
153     // will then exit the process.
154     if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
155         env::remove_var(SECONDARY_TEST_INVOKER_VAR);
156         let test = tests
157             .iter()
158             .filter(|test| test.desc.name.as_slice() == name)
159             .map(make_owned_test)
160             .next()
161             .expect(&format!("couldn't find a test with the provided name '{}'", name));
162         let TestDescAndFn { desc, testfn } = test;
163         let testfn = match testfn {
164             StaticTestFn(f) => f,
165             _ => panic!("only static tests are supported"),
166         };
167         run_test_in_spawned_subprocess(desc, Box::new(testfn));
168     }
169
170     let args = env::args().collect::<Vec<_>>();
171     let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
172     test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
173 }
174
175 /// Clones static values for putting into a dynamic vector, which test_main()
176 /// needs to hand out ownership of tests to parallel test runners.
177 ///
178 /// This will panic when fed any dynamic tests, because they cannot be cloned.
179 fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
180     match test.testfn {
181         StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
182         StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
183         _ => panic!("non-static tests passed to test::test_main_static"),
184     }
185 }
186
187 /// Invoked when unit tests terminate. Should panic if the unit
188 /// Tests is considered a failure. By default, invokes `report()`
189 /// and checks for a `0` result.
190 pub fn assert_test_result<T: Termination>(result: T) {
191     let code = result.report();
192     assert_eq!(
193         code, 0,
194         "the test returned a termination value with a non-zero status code ({}) \
195          which indicates a failure",
196         code
197     );
198 }
199
200 pub fn run_tests<F>(
201     opts: &TestOpts,
202     tests: Vec<TestDescAndFn>,
203     mut notify_about_test_event: F,
204 ) -> io::Result<()>
205 where
206     F: FnMut(TestEvent) -> io::Result<()>,
207 {
208     use std::collections::{self, HashMap};
209     use std::hash::BuildHasherDefault;
210     use std::sync::mpsc::RecvTimeoutError;
211     // Use a deterministic hasher
212     type TestMap =
213         HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
214
215     let tests_len = tests.len();
216
217     let mut filtered_tests = filter_tests(opts, tests);
218     if !opts.bench_benchmarks {
219         filtered_tests = convert_benchmarks_to_tests(filtered_tests);
220     }
221
222     let filtered_tests = {
223         let mut filtered_tests = filtered_tests;
224         for test in filtered_tests.iter_mut() {
225             test.desc.name = test.desc.name.with_padding(test.testfn.padding());
226         }
227
228         filtered_tests
229     };
230
231     let filtered_out = tests_len - filtered_tests.len();
232     let event = TestEvent::TeFilteredOut(filtered_out);
233     notify_about_test_event(event)?;
234
235     let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
236
237     let event = TestEvent::TeFiltered(filtered_descs);
238     notify_about_test_event(event)?;
239
240     let (filtered_tests, filtered_benchs): (Vec<_>, _) =
241         filtered_tests.into_iter().partition(|e| match e.testfn {
242             StaticTestFn(_) | DynTestFn(_) => true,
243             _ => false,
244         });
245
246     let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
247
248     let mut remaining = filtered_tests;
249     remaining.reverse();
250     let mut pending = 0;
251
252     let (tx, rx) = channel::<CompletedTest>();
253     let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
254         RunStrategy::SpawnPrimary
255     } else {
256         RunStrategy::InProcess
257     };
258
259     let mut running_tests: TestMap = HashMap::default();
260
261     fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
262         let now = Instant::now();
263         let timed_out = running_tests
264             .iter()
265             .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone()) } else { None })
266             .collect();
267         for test in &timed_out {
268             running_tests.remove(test);
269         }
270         timed_out
271     };
272
273     fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
274         running_tests.values().min().map(|next_timeout| {
275             let now = Instant::now();
276             if *next_timeout >= now { *next_timeout - now } else { Duration::new(0, 0) }
277         })
278     };
279
280     if concurrency == 1 {
281         while !remaining.is_empty() {
282             let test = remaining.pop().unwrap();
283             let event = TestEvent::TeWait(test.desc.clone());
284             notify_about_test_event(event)?;
285             run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No);
286             let completed_test = rx.recv().unwrap();
287
288             let event = TestEvent::TeResult(completed_test);
289             notify_about_test_event(event)?;
290         }
291     } else {
292         while pending > 0 || !remaining.is_empty() {
293             while pending < concurrency && !remaining.is_empty() {
294                 let test = remaining.pop().unwrap();
295                 let timeout = time::get_default_test_timeout();
296                 running_tests.insert(test.desc.clone(), timeout);
297
298                 let event = TestEvent::TeWait(test.desc.clone());
299                 notify_about_test_event(event)?; //here no pad
300                 run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes);
301                 pending += 1;
302             }
303
304             let mut res;
305             loop {
306                 if let Some(timeout) = calc_timeout(&running_tests) {
307                     res = rx.recv_timeout(timeout);
308                     for test in get_timed_out_tests(&mut running_tests) {
309                         let event = TestEvent::TeTimeout(test);
310                         notify_about_test_event(event)?;
311                     }
312
313                     match res {
314                         Err(RecvTimeoutError::Timeout) => {
315                             // Result is not yet ready, continue waiting.
316                         }
317                         _ => {
318                             // We've got a result, stop the loop.
319                             break;
320                         }
321                     }
322                 } else {
323                     res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
324                     break;
325                 }
326             }
327
328             let completed_test = res.unwrap();
329             running_tests.remove(&completed_test.desc);
330
331             let event = TestEvent::TeResult(completed_test);
332             notify_about_test_event(event)?;
333             pending -= 1;
334         }
335     }
336
337     if opts.bench_benchmarks {
338         // All benchmarks run at the end, in serial.
339         for b in filtered_benchs {
340             let event = TestEvent::TeWait(b.desc.clone());
341             notify_about_test_event(event)?;
342             run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No);
343             let completed_test = rx.recv().unwrap();
344
345             let event = TestEvent::TeResult(completed_test);
346             notify_about_test_event(event)?;
347         }
348     }
349     Ok(())
350 }
351
352 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
353     let mut filtered = tests;
354     let matches_filter = |test: &TestDescAndFn, filter: &str| {
355         let test_name = test.desc.name.as_slice();
356
357         match opts.filter_exact {
358             true => test_name == filter,
359             false => test_name.contains(filter),
360         }
361     };
362
363     // Remove tests that don't match the test filter
364     if let Some(ref filter) = opts.filter {
365         filtered.retain(|test| matches_filter(test, filter));
366     }
367
368     // Skip tests that match any of the skip filters
369     filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
370
371     // Excludes #[should_panic] tests
372     if opts.exclude_should_panic {
373         filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
374     }
375
376     // maybe unignore tests
377     match opts.run_ignored {
378         RunIgnored::Yes => {
379             filtered.iter_mut().for_each(|test| test.desc.ignore = false);
380         }
381         RunIgnored::Only => {
382             filtered.retain(|test| test.desc.ignore);
383             filtered.iter_mut().for_each(|test| test.desc.ignore = false);
384         }
385         RunIgnored::No => {}
386     }
387
388     // Sort the tests alphabetically
389     filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
390
391     filtered
392 }
393
394 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
395     // convert benchmarks to tests, if we're not benchmarking them
396     tests
397         .into_iter()
398         .map(|x| {
399             let testfn = match x.testfn {
400                 DynBenchFn(bench) => DynTestFn(Box::new(move || {
401                     bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
402                 })),
403                 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
404                     bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
405                 })),
406                 f => f,
407             };
408             TestDescAndFn { desc: x.desc, testfn }
409         })
410         .collect()
411 }
412
413 pub fn run_test(
414     opts: &TestOpts,
415     force_ignore: bool,
416     test: TestDescAndFn,
417     strategy: RunStrategy,
418     monitor_ch: Sender<CompletedTest>,
419     concurrency: Concurrent,
420 ) {
421     let TestDescAndFn { desc, testfn } = test;
422
423     // Emscripten can catch panics but other wasm targets cannot
424     let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
425         && cfg!(target_arch = "wasm32")
426         && !cfg!(target_os = "emscripten");
427
428     if force_ignore || desc.ignore || ignore_because_no_process_support {
429         let message = CompletedTest::new(desc, TrIgnored, None, Vec::new());
430         monitor_ch.send(message).unwrap();
431         return;
432     }
433
434     struct TestRunOpts {
435         pub strategy: RunStrategy,
436         pub nocapture: bool,
437         pub concurrency: Concurrent,
438         pub time: Option<time::TestTimeOptions>,
439     }
440
441     fn run_test_inner(
442         desc: TestDesc,
443         monitor_ch: Sender<CompletedTest>,
444         testfn: Box<dyn FnOnce() + Send>,
445         opts: TestRunOpts,
446     ) {
447         let concurrency = opts.concurrency;
448         let name = desc.name.clone();
449
450         let runtest = move || match opts.strategy {
451             RunStrategy::InProcess => run_test_in_process(
452                 desc,
453                 opts.nocapture,
454                 opts.time.is_some(),
455                 testfn,
456                 monitor_ch,
457                 opts.time,
458             ),
459             RunStrategy::SpawnPrimary => spawn_test_subprocess(
460                 desc,
461                 opts.nocapture,
462                 opts.time.is_some(),
463                 monitor_ch,
464                 opts.time,
465             ),
466         };
467
468         // If the platform is single-threaded we're just going to run
469         // the test synchronously, regardless of the concurrency
470         // level.
471         let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
472         if concurrency == Concurrent::Yes && supports_threads {
473             let cfg = thread::Builder::new().name(name.as_slice().to_owned());
474             cfg.spawn(runtest).unwrap();
475         } else {
476             runtest();
477         }
478     }
479
480     let test_run_opts =
481         TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
482
483     match testfn {
484         DynBenchFn(bencher) => {
485             // Benchmarks aren't expected to panic, so we run them all in-process.
486             crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
487                 bencher.run(harness)
488             });
489         }
490         StaticBenchFn(benchfn) => {
491             // Benchmarks aren't expected to panic, so we run them all in-process.
492             crate::bench::benchmark(desc, monitor_ch, opts.nocapture, benchfn);
493         }
494         DynTestFn(f) => {
495             match strategy {
496                 RunStrategy::InProcess => (),
497                 _ => panic!("Cannot run dynamic test fn out-of-process"),
498             };
499             run_test_inner(
500                 desc,
501                 monitor_ch,
502                 Box::new(move || __rust_begin_short_backtrace(f)),
503                 test_run_opts,
504             );
505         }
506         StaticTestFn(f) => run_test_inner(
507             desc,
508             monitor_ch,
509             Box::new(move || __rust_begin_short_backtrace(f)),
510             test_run_opts,
511         ),
512     }
513 }
514
515 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
516 #[inline(never)]
517 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
518     f()
519 }
520
521 fn run_test_in_process(
522     desc: TestDesc,
523     nocapture: bool,
524     report_time: bool,
525     testfn: Box<dyn FnOnce() + Send>,
526     monitor_ch: Sender<CompletedTest>,
527     time_opts: Option<time::TestTimeOptions>,
528 ) {
529     // Buffer for capturing standard I/O
530     let data = Arc::new(Mutex::new(Vec::new()));
531
532     let oldio = if !nocapture {
533         Some((
534             io::set_print(Some(Sink::new_boxed(&data))),
535             io::set_panic(Some(Sink::new_boxed(&data))),
536         ))
537     } else {
538         None
539     };
540
541     let start = report_time.then(Instant::now);
542     let result = catch_unwind(AssertUnwindSafe(testfn));
543     let exec_time = start.map(|start| {
544         let duration = start.elapsed();
545         TestExecTime(duration)
546     });
547
548     if let Some((printio, panicio)) = oldio {
549         io::set_print(printio);
550         io::set_panic(panicio);
551     }
552
553     let test_result = match result {
554         Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time),
555         Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time),
556     };
557     let stdout = data.lock().unwrap().to_vec();
558     let message = CompletedTest::new(desc, test_result, exec_time, stdout);
559     monitor_ch.send(message).unwrap();
560 }
561
562 fn spawn_test_subprocess(
563     desc: TestDesc,
564     nocapture: bool,
565     report_time: bool,
566     monitor_ch: Sender<CompletedTest>,
567     time_opts: Option<time::TestTimeOptions>,
568 ) {
569     let (result, test_output, exec_time) = (|| {
570         let args = env::args().collect::<Vec<_>>();
571         let current_exe = &args[0];
572
573         let mut command = Command::new(current_exe);
574         command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
575         if nocapture {
576             command.stdout(process::Stdio::inherit());
577             command.stderr(process::Stdio::inherit());
578         }
579
580         let start = report_time.then(Instant::now);
581         let output = match command.output() {
582             Ok(out) => out,
583             Err(e) => {
584                 let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
585                 return (TrFailed, err.into_bytes(), None);
586             }
587         };
588         let exec_time = start.map(|start| {
589             let duration = start.elapsed();
590             TestExecTime(duration)
591         });
592
593         let std::process::Output { stdout, stderr, status } = output;
594         let mut test_output = stdout;
595         formatters::write_stderr_delimiter(&mut test_output, &desc.name);
596         test_output.extend_from_slice(&stderr);
597
598         let result = match (|| -> Result<TestResult, String> {
599             let exit_code = get_exit_code(status)?;
600             Ok(get_result_from_exit_code(&desc, exit_code, &time_opts, &exec_time))
601         })() {
602             Ok(r) => r,
603             Err(e) => {
604                 write!(&mut test_output, "Unexpected error: {}", e).unwrap();
605                 TrFailed
606             }
607         };
608
609         (result, test_output, exec_time)
610     })();
611
612     let message = CompletedTest::new(desc, result, exec_time, test_output);
613     monitor_ch.send(message).unwrap();
614 }
615
616 fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Send>) -> ! {
617     let builtin_panic_hook = panic::take_hook();
618     let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| {
619         let test_result = match panic_info {
620             Some(info) => calc_result(&desc, Err(info.payload()), &None, &None),
621             None => calc_result(&desc, Ok(()), &None, &None),
622         };
623
624         // We don't support serializing TrFailedMsg, so just
625         // print the message out to stderr.
626         if let TrFailedMsg(msg) = &test_result {
627             eprintln!("{}", msg);
628         }
629
630         if let Some(info) = panic_info {
631             builtin_panic_hook(info);
632         }
633
634         if let TrOk = test_result {
635             process::exit(test_result::TR_OK);
636         } else {
637             process::exit(test_result::TR_FAILED);
638         }
639     });
640     let record_result2 = record_result.clone();
641     panic::set_hook(Box::new(move |info| record_result2(Some(&info))));
642     testfn();
643     record_result(None);
644     unreachable!("panic=abort callback should have exited the process")
645 }