]> git.lizzy.rs Git - rust.git/blob - library/test/src/lib.rs
Auto merge of #79945 - jackh726:existential_trait_ref, r=nikomatsakis
[rust.git] / library / test / src / lib.rs
1 //! Support code for rustc's built in unit-test and micro-benchmarking
2 //! framework.
3 //!
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
8 //!
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
10
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
14 // build off of.
15
16 // N.B., this is also specified in this crate's Cargo.toml, but librustc_ast contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
19
20 #![crate_name = "test"]
21 #![unstable(feature = "test", issue = "50297")]
22 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
23 #![cfg_attr(unix, feature(libc))]
24 #![feature(rustc_private)]
25 #![feature(nll)]
26 #![feature(available_concurrency)]
27 #![feature(internal_output_capture)]
28 #![feature(panic_unwind)]
29 #![feature(staged_api)]
30 #![feature(termination_trait_lib)]
31 #![feature(test)]
32 #![feature(total_cmp)]
33 #![feature(str_split_once)]
34
35 // Public reexports
36 pub use self::bench::{black_box, Bencher};
37 pub use self::console::run_tests_console;
38 pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
39 pub use self::types::TestName::*;
40 pub use self::types::*;
41 pub use self::ColorConfig::*;
42 pub use cli::TestOpts;
43
44 // Module to be used by rustc to compile tests in libtest
45 pub mod test {
46     pub use crate::{
47         assert_test_result,
48         bench::Bencher,
49         cli::{parse_opts, TestOpts},
50         filter_tests,
51         helpers::metrics::{Metric, MetricMap},
52         options::{Options, RunIgnored, RunStrategy, ShouldPanic},
53         run_test, test_main, test_main_static,
54         test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk},
55         time::{TestExecTime, TestTimeOptions},
56         types::{
57             DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
58             TestDescAndFn, TestName, TestType,
59         },
60     };
61 }
62
63 use std::{
64     env, io,
65     io::prelude::Write,
66     panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo},
67     process::{self, Command, Termination},
68     sync::mpsc::{channel, Sender},
69     sync::{Arc, Mutex},
70     thread,
71     time::{Duration, Instant},
72 };
73
74 pub mod bench;
75 mod cli;
76 mod console;
77 mod event;
78 mod formatters;
79 mod helpers;
80 mod options;
81 pub mod stats;
82 mod test_result;
83 mod time;
84 mod types;
85
86 #[cfg(test)]
87 mod tests;
88
89 use event::{CompletedTest, TestEvent};
90 use helpers::concurrency::get_concurrency;
91 use helpers::exit_code::get_exit_code;
92 use options::{Concurrent, RunStrategy};
93 use test_result::*;
94 use time::TestExecTime;
95
96 // Process exit code to be used to indicate test failures.
97 const ERROR_EXIT_CODE: i32 = 101;
98
99 const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
100
101 // The default console test runner. It accepts the command line
102 // arguments and a vector of test_descs.
103 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
104     let mut opts = match cli::parse_opts(args) {
105         Some(Ok(o)) => o,
106         Some(Err(msg)) => {
107             eprintln!("error: {}", msg);
108             process::exit(ERROR_EXIT_CODE);
109         }
110         None => return,
111     };
112     if let Some(options) = options {
113         opts.options = options;
114     }
115     if opts.list {
116         if let Err(e) = console::list_tests_console(&opts, tests) {
117             eprintln!("error: io error when listing tests: {:?}", e);
118             process::exit(ERROR_EXIT_CODE);
119         }
120     } else {
121         match console::run_tests_console(&opts, tests) {
122             Ok(true) => {}
123             Ok(false) => process::exit(ERROR_EXIT_CODE),
124             Err(e) => {
125                 eprintln!("error: io error when listing tests: {:?}", e);
126                 process::exit(ERROR_EXIT_CODE);
127             }
128         }
129     }
130 }
131
132 /// A variant optimized for invocation with a static test vector.
133 /// This will panic (intentionally) when fed any dynamic tests.
134 ///
135 /// This is the entry point for the main function generated by `rustc --test`
136 /// when panic=unwind.
137 pub fn test_main_static(tests: &[&TestDescAndFn]) {
138     let args = env::args().collect::<Vec<_>>();
139     let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
140     test_main(&args, owned_tests, None)
141 }
142
143 /// A variant optimized for invocation with a static test vector.
144 /// This will panic (intentionally) when fed any dynamic tests.
145 ///
146 /// Runs tests in panic=abort mode, which involves spawning subprocesses for
147 /// tests.
148 ///
149 /// This is the entry point for the main function generated by `rustc --test`
150 /// when panic=abort.
151 pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
152     // If we're being run in SpawnedSecondary mode, run the test here. run_test
153     // will then exit the process.
154     if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
155         env::remove_var(SECONDARY_TEST_INVOKER_VAR);
156         let test = tests
157             .iter()
158             .filter(|test| test.desc.name.as_slice() == name)
159             .map(make_owned_test)
160             .next()
161             .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name));
162         let TestDescAndFn { desc, testfn } = test;
163         let testfn = match testfn {
164             StaticTestFn(f) => f,
165             _ => panic!("only static tests are supported"),
166         };
167         run_test_in_spawned_subprocess(desc, Box::new(testfn));
168     }
169
170     let args = env::args().collect::<Vec<_>>();
171     let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
172     test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
173 }
174
175 /// Clones static values for putting into a dynamic vector, which test_main()
176 /// needs to hand out ownership of tests to parallel test runners.
177 ///
178 /// This will panic when fed any dynamic tests, because they cannot be cloned.
179 fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
180     match test.testfn {
181         StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
182         StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
183         _ => panic!("non-static tests passed to test::test_main_static"),
184     }
185 }
186
187 /// Invoked when unit tests terminate. Should panic if the unit
188 /// Tests is considered a failure. By default, invokes `report()`
189 /// and checks for a `0` result.
190 pub fn assert_test_result<T: Termination>(result: T) {
191     let code = result.report();
192     assert_eq!(
193         code, 0,
194         "the test returned a termination value with a non-zero status code ({}) \
195          which indicates a failure",
196         code
197     );
198 }
199
200 pub fn run_tests<F>(
201     opts: &TestOpts,
202     tests: Vec<TestDescAndFn>,
203     mut notify_about_test_event: F,
204 ) -> io::Result<()>
205 where
206     F: FnMut(TestEvent) -> io::Result<()>,
207 {
208     use std::collections::{self, HashMap};
209     use std::hash::BuildHasherDefault;
210     use std::sync::mpsc::RecvTimeoutError;
211     // Use a deterministic hasher
212     type TestMap =
213         HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
214
215     let tests_len = tests.len();
216
217     let mut filtered_tests = filter_tests(opts, tests);
218     if !opts.bench_benchmarks {
219         filtered_tests = convert_benchmarks_to_tests(filtered_tests);
220     }
221
222     let filtered_tests = {
223         let mut filtered_tests = filtered_tests;
224         for test in filtered_tests.iter_mut() {
225             test.desc.name = test.desc.name.with_padding(test.testfn.padding());
226         }
227
228         filtered_tests
229     };
230
231     let filtered_out = tests_len - filtered_tests.len();
232     let event = TestEvent::TeFilteredOut(filtered_out);
233     notify_about_test_event(event)?;
234
235     let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
236
237     let event = TestEvent::TeFiltered(filtered_descs);
238     notify_about_test_event(event)?;
239
240     let (filtered_tests, filtered_benchs): (Vec<_>, _) = filtered_tests
241         .into_iter()
242         .partition(|e| matches!(e.testfn, StaticTestFn(_) | DynTestFn(_)));
243
244     let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
245
246     let mut remaining = filtered_tests;
247     remaining.reverse();
248     let mut pending = 0;
249
250     let (tx, rx) = channel::<CompletedTest>();
251     let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
252         RunStrategy::SpawnPrimary
253     } else {
254         RunStrategy::InProcess
255     };
256
257     let mut running_tests: TestMap = HashMap::default();
258
259     fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
260         let now = Instant::now();
261         let timed_out = running_tests
262             .iter()
263             .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone()) } else { None })
264             .collect();
265         for test in &timed_out {
266             running_tests.remove(test);
267         }
268         timed_out
269     }
270
271     fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
272         running_tests.values().min().map(|next_timeout| {
273             let now = Instant::now();
274             if *next_timeout >= now { *next_timeout - now } else { Duration::new(0, 0) }
275         })
276     }
277
278     if concurrency == 1 {
279         while !remaining.is_empty() {
280             let test = remaining.pop().unwrap();
281             let event = TestEvent::TeWait(test.desc.clone());
282             notify_about_test_event(event)?;
283             run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No);
284             let completed_test = rx.recv().unwrap();
285
286             let event = TestEvent::TeResult(completed_test);
287             notify_about_test_event(event)?;
288         }
289     } else {
290         while pending > 0 || !remaining.is_empty() {
291             while pending < concurrency && !remaining.is_empty() {
292                 let test = remaining.pop().unwrap();
293                 let timeout = time::get_default_test_timeout();
294                 running_tests.insert(test.desc.clone(), timeout);
295
296                 let event = TestEvent::TeWait(test.desc.clone());
297                 notify_about_test_event(event)?; //here no pad
298                 run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes);
299                 pending += 1;
300             }
301
302             let mut res;
303             loop {
304                 if let Some(timeout) = calc_timeout(&running_tests) {
305                     res = rx.recv_timeout(timeout);
306                     for test in get_timed_out_tests(&mut running_tests) {
307                         let event = TestEvent::TeTimeout(test);
308                         notify_about_test_event(event)?;
309                     }
310
311                     match res {
312                         Err(RecvTimeoutError::Timeout) => {
313                             // Result is not yet ready, continue waiting.
314                         }
315                         _ => {
316                             // We've got a result, stop the loop.
317                             break;
318                         }
319                     }
320                 } else {
321                     res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
322                     break;
323                 }
324             }
325
326             let completed_test = res.unwrap();
327             running_tests.remove(&completed_test.desc);
328
329             let event = TestEvent::TeResult(completed_test);
330             notify_about_test_event(event)?;
331             pending -= 1;
332         }
333     }
334
335     if opts.bench_benchmarks {
336         // All benchmarks run at the end, in serial.
337         for b in filtered_benchs {
338             let event = TestEvent::TeWait(b.desc.clone());
339             notify_about_test_event(event)?;
340             run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No);
341             let completed_test = rx.recv().unwrap();
342
343             let event = TestEvent::TeResult(completed_test);
344             notify_about_test_event(event)?;
345         }
346     }
347     Ok(())
348 }
349
350 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
351     let mut filtered = tests;
352     let matches_filter = |test: &TestDescAndFn, filter: &str| {
353         let test_name = test.desc.name.as_slice();
354
355         match opts.filter_exact {
356             true => test_name == filter,
357             false => test_name.contains(filter),
358         }
359     };
360
361     // Remove tests that don't match the test filter
362     if let Some(ref filter) = opts.filter {
363         filtered.retain(|test| matches_filter(test, filter));
364     }
365
366     // Skip tests that match any of the skip filters
367     filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
368
369     // Excludes #[should_panic] tests
370     if opts.exclude_should_panic {
371         filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
372     }
373
374     // maybe unignore tests
375     match opts.run_ignored {
376         RunIgnored::Yes => {
377             filtered.iter_mut().for_each(|test| test.desc.ignore = false);
378         }
379         RunIgnored::Only => {
380             filtered.retain(|test| test.desc.ignore);
381             filtered.iter_mut().for_each(|test| test.desc.ignore = false);
382         }
383         RunIgnored::No => {}
384     }
385
386     // Sort the tests alphabetically
387     filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
388
389     filtered
390 }
391
392 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
393     // convert benchmarks to tests, if we're not benchmarking them
394     tests
395         .into_iter()
396         .map(|x| {
397             let testfn = match x.testfn {
398                 DynBenchFn(bench) => DynTestFn(Box::new(move || {
399                     bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
400                 })),
401                 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
402                     bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
403                 })),
404                 f => f,
405             };
406             TestDescAndFn { desc: x.desc, testfn }
407         })
408         .collect()
409 }
410
411 pub fn run_test(
412     opts: &TestOpts,
413     force_ignore: bool,
414     test: TestDescAndFn,
415     strategy: RunStrategy,
416     monitor_ch: Sender<CompletedTest>,
417     concurrency: Concurrent,
418 ) {
419     let TestDescAndFn { desc, testfn } = test;
420
421     // Emscripten can catch panics but other wasm targets cannot
422     let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
423         && cfg!(target_arch = "wasm32")
424         && !cfg!(target_os = "emscripten");
425
426     if force_ignore || desc.ignore || ignore_because_no_process_support {
427         let message = CompletedTest::new(desc, TrIgnored, None, Vec::new());
428         monitor_ch.send(message).unwrap();
429         return;
430     }
431
432     struct TestRunOpts {
433         pub strategy: RunStrategy,
434         pub nocapture: bool,
435         pub concurrency: Concurrent,
436         pub time: Option<time::TestTimeOptions>,
437     }
438
439     fn run_test_inner(
440         desc: TestDesc,
441         monitor_ch: Sender<CompletedTest>,
442         testfn: Box<dyn FnOnce() + Send>,
443         opts: TestRunOpts,
444     ) {
445         let concurrency = opts.concurrency;
446         let name = desc.name.clone();
447
448         let runtest = move || match opts.strategy {
449             RunStrategy::InProcess => run_test_in_process(
450                 desc,
451                 opts.nocapture,
452                 opts.time.is_some(),
453                 testfn,
454                 monitor_ch,
455                 opts.time,
456             ),
457             RunStrategy::SpawnPrimary => spawn_test_subprocess(
458                 desc,
459                 opts.nocapture,
460                 opts.time.is_some(),
461                 monitor_ch,
462                 opts.time,
463             ),
464         };
465
466         // If the platform is single-threaded we're just going to run
467         // the test synchronously, regardless of the concurrency
468         // level.
469         let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
470         if concurrency == Concurrent::Yes && supports_threads {
471             let cfg = thread::Builder::new().name(name.as_slice().to_owned());
472             cfg.spawn(runtest).unwrap();
473         } else {
474             runtest();
475         }
476     }
477
478     let test_run_opts =
479         TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
480
481     match testfn {
482         DynBenchFn(bencher) => {
483             // Benchmarks aren't expected to panic, so we run them all in-process.
484             crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
485                 bencher.run(harness)
486             });
487         }
488         StaticBenchFn(benchfn) => {
489             // Benchmarks aren't expected to panic, so we run them all in-process.
490             crate::bench::benchmark(desc, monitor_ch, opts.nocapture, benchfn);
491         }
492         DynTestFn(f) => {
493             match strategy {
494                 RunStrategy::InProcess => (),
495                 _ => panic!("Cannot run dynamic test fn out-of-process"),
496             };
497             run_test_inner(
498                 desc,
499                 monitor_ch,
500                 Box::new(move || __rust_begin_short_backtrace(f)),
501                 test_run_opts,
502             );
503         }
504         StaticTestFn(f) => run_test_inner(
505             desc,
506             monitor_ch,
507             Box::new(move || __rust_begin_short_backtrace(f)),
508             test_run_opts,
509         ),
510     }
511 }
512
513 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
514 #[inline(never)]
515 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
516     f();
517
518     // prevent this frame from being tail-call optimised away
519     black_box(());
520 }
521
522 fn run_test_in_process(
523     desc: TestDesc,
524     nocapture: bool,
525     report_time: bool,
526     testfn: Box<dyn FnOnce() + Send>,
527     monitor_ch: Sender<CompletedTest>,
528     time_opts: Option<time::TestTimeOptions>,
529 ) {
530     // Buffer for capturing standard I/O
531     let data = Arc::new(Mutex::new(Vec::new()));
532
533     if !nocapture {
534         io::set_output_capture(Some(data.clone()));
535     }
536
537     let start = report_time.then(Instant::now);
538     let result = catch_unwind(AssertUnwindSafe(testfn));
539     let exec_time = start.map(|start| {
540         let duration = start.elapsed();
541         TestExecTime(duration)
542     });
543
544     io::set_output_capture(None);
545
546     let test_result = match result {
547         Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time),
548         Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time),
549     };
550     let stdout = data.lock().unwrap_or_else(|e| e.into_inner()).to_vec();
551     let message = CompletedTest::new(desc, test_result, exec_time, stdout);
552     monitor_ch.send(message).unwrap();
553 }
554
555 fn spawn_test_subprocess(
556     desc: TestDesc,
557     nocapture: bool,
558     report_time: bool,
559     monitor_ch: Sender<CompletedTest>,
560     time_opts: Option<time::TestTimeOptions>,
561 ) {
562     let (result, test_output, exec_time) = (|| {
563         let args = env::args().collect::<Vec<_>>();
564         let current_exe = &args[0];
565
566         let mut command = Command::new(current_exe);
567         command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
568         if nocapture {
569             command.stdout(process::Stdio::inherit());
570             command.stderr(process::Stdio::inherit());
571         }
572
573         let start = report_time.then(Instant::now);
574         let output = match command.output() {
575             Ok(out) => out,
576             Err(e) => {
577                 let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
578                 return (TrFailed, err.into_bytes(), None);
579             }
580         };
581         let exec_time = start.map(|start| {
582             let duration = start.elapsed();
583             TestExecTime(duration)
584         });
585
586         let std::process::Output { stdout, stderr, status } = output;
587         let mut test_output = stdout;
588         formatters::write_stderr_delimiter(&mut test_output, &desc.name);
589         test_output.extend_from_slice(&stderr);
590
591         let result = match (|| -> Result<TestResult, String> {
592             let exit_code = get_exit_code(status)?;
593             Ok(get_result_from_exit_code(&desc, exit_code, &time_opts, &exec_time))
594         })() {
595             Ok(r) => r,
596             Err(e) => {
597                 write!(&mut test_output, "Unexpected error: {}", e).unwrap();
598                 TrFailed
599             }
600         };
601
602         (result, test_output, exec_time)
603     })();
604
605     let message = CompletedTest::new(desc, result, exec_time, test_output);
606     monitor_ch.send(message).unwrap();
607 }
608
609 fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Send>) -> ! {
610     let builtin_panic_hook = panic::take_hook();
611     let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| {
612         let test_result = match panic_info {
613             Some(info) => calc_result(&desc, Err(info.payload()), &None, &None),
614             None => calc_result(&desc, Ok(()), &None, &None),
615         };
616
617         // We don't support serializing TrFailedMsg, so just
618         // print the message out to stderr.
619         if let TrFailedMsg(msg) = &test_result {
620             eprintln!("{}", msg);
621         }
622
623         if let Some(info) = panic_info {
624             builtin_panic_hook(info);
625         }
626
627         if let TrOk = test_result {
628             process::exit(test_result::TR_OK);
629         } else {
630             process::exit(test_result::TR_FAILED);
631         }
632     });
633     let record_result2 = record_result.clone();
634     panic::set_hook(Box::new(move |info| record_result2(Some(&info))));
635     testfn();
636     record_result(None);
637     unreachable!("panic=abort callback should have exited the process")
638 }