]> git.lizzy.rs Git - rust.git/blob - src/libtest/lib.rs
Rollup merge of #74386 - msizanoen1:riscv-add-manifest-host, r=pietroalbini
[rust.git] / src / libtest / lib.rs
1 //! Support code for rustc's built in unit-test and micro-benchmarking
2 //! framework.
3 //!
4 //! Almost all user code will only be interested in `Bencher` and
5 //! `black_box`. All other interactions (such as writing tests and
6 //! benchmarks themselves) should be done via the `#[test]` and
7 //! `#[bench]` attributes.
8 //!
9 //! See the [Testing Chapter](../book/ch11-00-testing.html) of the book for more details.
10
11 // Currently, not much of this is meant for users. It is intended to
12 // support the simplest interface possible for representing and
13 // running tests while providing a base that other test frameworks may
14 // build off of.
15
16 // N.B., this is also specified in this crate's Cargo.toml, but librustc_ast contains logic specific to
17 // this crate, which relies on this attribute (rather than the value of `--crate-name` passed by
18 // cargo) to detect this crate.
19
20 #![crate_name = "test"]
21 #![unstable(feature = "test", issue = "50297")]
22 #![doc(html_root_url = "https://doc.rust-lang.org/nightly/", test(attr(deny(warnings))))]
23 #![cfg_attr(any(unix, target_os = "cloudabi"), feature(libc))]
24 #![feature(rustc_private)]
25 #![feature(nll)]
26 #![feature(bool_to_option)]
27 #![feature(set_stdio)]
28 #![feature(panic_unwind)]
29 #![feature(staged_api)]
30 #![feature(termination_trait_lib)]
31 #![feature(test)]
32
33 // Public reexports
34 pub use self::bench::{black_box, Bencher};
35 pub use self::console::run_tests_console;
36 pub use self::options::{ColorConfig, Options, OutputFormat, RunIgnored, ShouldPanic};
37 pub use self::types::TestName::*;
38 pub use self::types::*;
39 pub use self::ColorConfig::*;
40 pub use cli::TestOpts;
41
42 // Module to be used by rustc to compile tests in libtest
43 pub mod test {
44     pub use crate::{
45         assert_test_result,
46         bench::Bencher,
47         cli::{parse_opts, TestOpts},
48         filter_tests,
49         helpers::metrics::{Metric, MetricMap},
50         options::{Options, RunIgnored, RunStrategy, ShouldPanic},
51         run_test, test_main, test_main_static,
52         test_result::{TestResult, TrFailed, TrFailedMsg, TrIgnored, TrOk},
53         time::{TestExecTime, TestTimeOptions},
54         types::{
55             DynTestFn, DynTestName, StaticBenchFn, StaticTestFn, StaticTestName, TestDesc,
56             TestDescAndFn, TestName, TestType,
57         },
58     };
59 }
60
61 use std::{
62     env, io,
63     io::prelude::Write,
64     panic::{self, catch_unwind, AssertUnwindSafe, PanicInfo},
65     process::{self, Command, Termination},
66     sync::mpsc::{channel, Sender},
67     sync::{Arc, Mutex},
68     thread,
69     time::{Duration, Instant},
70 };
71
72 pub mod bench;
73 mod cli;
74 mod console;
75 mod event;
76 mod formatters;
77 mod helpers;
78 mod options;
79 pub mod stats;
80 mod test_result;
81 mod time;
82 mod types;
83
84 #[cfg(test)]
85 mod tests;
86
87 use event::{CompletedTest, TestEvent};
88 use helpers::concurrency::get_concurrency;
89 use helpers::exit_code::get_exit_code;
90 use helpers::sink::Sink;
91 use options::{Concurrent, RunStrategy};
92 use test_result::*;
93 use time::TestExecTime;
94
95 // Process exit code to be used to indicate test failures.
96 const ERROR_EXIT_CODE: i32 = 101;
97
98 const SECONDARY_TEST_INVOKER_VAR: &str = "__RUST_TEST_INVOKE";
99
100 // The default console test runner. It accepts the command line
101 // arguments and a vector of test_descs.
102 pub fn test_main(args: &[String], tests: Vec<TestDescAndFn>, options: Option<Options>) {
103     let mut opts = match cli::parse_opts(args) {
104         Some(Ok(o)) => o,
105         Some(Err(msg)) => {
106             eprintln!("error: {}", msg);
107             process::exit(ERROR_EXIT_CODE);
108         }
109         None => return,
110     };
111     if let Some(options) = options {
112         opts.options = options;
113     }
114     if opts.list {
115         if let Err(e) = console::list_tests_console(&opts, tests) {
116             eprintln!("error: io error when listing tests: {:?}", e);
117             process::exit(ERROR_EXIT_CODE);
118         }
119     } else {
120         match console::run_tests_console(&opts, tests) {
121             Ok(true) => {}
122             Ok(false) => process::exit(ERROR_EXIT_CODE),
123             Err(e) => {
124                 eprintln!("error: io error when listing tests: {:?}", e);
125                 process::exit(ERROR_EXIT_CODE);
126             }
127         }
128     }
129 }
130
131 /// A variant optimized for invocation with a static test vector.
132 /// This will panic (intentionally) when fed any dynamic tests.
133 ///
134 /// This is the entry point for the main function generated by `rustc --test`
135 /// when panic=unwind.
136 pub fn test_main_static(tests: &[&TestDescAndFn]) {
137     let args = env::args().collect::<Vec<_>>();
138     let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
139     test_main(&args, owned_tests, None)
140 }
141
142 /// A variant optimized for invocation with a static test vector.
143 /// This will panic (intentionally) when fed any dynamic tests.
144 ///
145 /// Runs tests in panic=abort mode, which involves spawning subprocesses for
146 /// tests.
147 ///
148 /// This is the entry point for the main function generated by `rustc --test`
149 /// when panic=abort.
150 pub fn test_main_static_abort(tests: &[&TestDescAndFn]) {
151     // If we're being run in SpawnedSecondary mode, run the test here. run_test
152     // will then exit the process.
153     if let Ok(name) = env::var(SECONDARY_TEST_INVOKER_VAR) {
154         env::remove_var(SECONDARY_TEST_INVOKER_VAR);
155         let test = tests
156             .iter()
157             .filter(|test| test.desc.name.as_slice() == name)
158             .map(make_owned_test)
159             .next()
160             .unwrap_or_else(|| panic!("couldn't find a test with the provided name '{}'", name));
161         let TestDescAndFn { desc, testfn } = test;
162         let testfn = match testfn {
163             StaticTestFn(f) => f,
164             _ => panic!("only static tests are supported"),
165         };
166         run_test_in_spawned_subprocess(desc, Box::new(testfn));
167     }
168
169     let args = env::args().collect::<Vec<_>>();
170     let owned_tests: Vec<_> = tests.iter().map(make_owned_test).collect();
171     test_main(&args, owned_tests, Some(Options::new().panic_abort(true)))
172 }
173
174 /// Clones static values for putting into a dynamic vector, which test_main()
175 /// needs to hand out ownership of tests to parallel test runners.
176 ///
177 /// This will panic when fed any dynamic tests, because they cannot be cloned.
178 fn make_owned_test(test: &&TestDescAndFn) -> TestDescAndFn {
179     match test.testfn {
180         StaticTestFn(f) => TestDescAndFn { testfn: StaticTestFn(f), desc: test.desc.clone() },
181         StaticBenchFn(f) => TestDescAndFn { testfn: StaticBenchFn(f), desc: test.desc.clone() },
182         _ => panic!("non-static tests passed to test::test_main_static"),
183     }
184 }
185
186 /// Invoked when unit tests terminate. Should panic if the unit
187 /// Tests is considered a failure. By default, invokes `report()`
188 /// and checks for a `0` result.
189 pub fn assert_test_result<T: Termination>(result: T) {
190     let code = result.report();
191     assert_eq!(
192         code, 0,
193         "the test returned a termination value with a non-zero status code ({}) \
194          which indicates a failure",
195         code
196     );
197 }
198
199 pub fn run_tests<F>(
200     opts: &TestOpts,
201     tests: Vec<TestDescAndFn>,
202     mut notify_about_test_event: F,
203 ) -> io::Result<()>
204 where
205     F: FnMut(TestEvent) -> io::Result<()>,
206 {
207     use std::collections::{self, HashMap};
208     use std::hash::BuildHasherDefault;
209     use std::sync::mpsc::RecvTimeoutError;
210     // Use a deterministic hasher
211     type TestMap =
212         HashMap<TestDesc, Instant, BuildHasherDefault<collections::hash_map::DefaultHasher>>;
213
214     let tests_len = tests.len();
215
216     let mut filtered_tests = filter_tests(opts, tests);
217     if !opts.bench_benchmarks {
218         filtered_tests = convert_benchmarks_to_tests(filtered_tests);
219     }
220
221     let filtered_tests = {
222         let mut filtered_tests = filtered_tests;
223         for test in filtered_tests.iter_mut() {
224             test.desc.name = test.desc.name.with_padding(test.testfn.padding());
225         }
226
227         filtered_tests
228     };
229
230     let filtered_out = tests_len - filtered_tests.len();
231     let event = TestEvent::TeFilteredOut(filtered_out);
232     notify_about_test_event(event)?;
233
234     let filtered_descs = filtered_tests.iter().map(|t| t.desc.clone()).collect();
235
236     let event = TestEvent::TeFiltered(filtered_descs);
237     notify_about_test_event(event)?;
238
239     let (filtered_tests, filtered_benchs): (Vec<_>, _) =
240         filtered_tests.into_iter().partition(|e| match e.testfn {
241             StaticTestFn(_) | DynTestFn(_) => true,
242             _ => false,
243         });
244
245     let concurrency = opts.test_threads.unwrap_or_else(get_concurrency);
246
247     let mut remaining = filtered_tests;
248     remaining.reverse();
249     let mut pending = 0;
250
251     let (tx, rx) = channel::<CompletedTest>();
252     let run_strategy = if opts.options.panic_abort && !opts.force_run_in_process {
253         RunStrategy::SpawnPrimary
254     } else {
255         RunStrategy::InProcess
256     };
257
258     let mut running_tests: TestMap = HashMap::default();
259
260     fn get_timed_out_tests(running_tests: &mut TestMap) -> Vec<TestDesc> {
261         let now = Instant::now();
262         let timed_out = running_tests
263             .iter()
264             .filter_map(|(desc, timeout)| if &now >= timeout { Some(desc.clone()) } else { None })
265             .collect();
266         for test in &timed_out {
267             running_tests.remove(test);
268         }
269         timed_out
270     };
271
272     fn calc_timeout(running_tests: &TestMap) -> Option<Duration> {
273         running_tests.values().min().map(|next_timeout| {
274             let now = Instant::now();
275             if *next_timeout >= now { *next_timeout - now } else { Duration::new(0, 0) }
276         })
277     };
278
279     if concurrency == 1 {
280         while !remaining.is_empty() {
281             let test = remaining.pop().unwrap();
282             let event = TestEvent::TeWait(test.desc.clone());
283             notify_about_test_event(event)?;
284             run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::No);
285             let completed_test = rx.recv().unwrap();
286
287             let event = TestEvent::TeResult(completed_test);
288             notify_about_test_event(event)?;
289         }
290     } else {
291         while pending > 0 || !remaining.is_empty() {
292             while pending < concurrency && !remaining.is_empty() {
293                 let test = remaining.pop().unwrap();
294                 let timeout = time::get_default_test_timeout();
295                 running_tests.insert(test.desc.clone(), timeout);
296
297                 let event = TestEvent::TeWait(test.desc.clone());
298                 notify_about_test_event(event)?; //here no pad
299                 run_test(opts, !opts.run_tests, test, run_strategy, tx.clone(), Concurrent::Yes);
300                 pending += 1;
301             }
302
303             let mut res;
304             loop {
305                 if let Some(timeout) = calc_timeout(&running_tests) {
306                     res = rx.recv_timeout(timeout);
307                     for test in get_timed_out_tests(&mut running_tests) {
308                         let event = TestEvent::TeTimeout(test);
309                         notify_about_test_event(event)?;
310                     }
311
312                     match res {
313                         Err(RecvTimeoutError::Timeout) => {
314                             // Result is not yet ready, continue waiting.
315                         }
316                         _ => {
317                             // We've got a result, stop the loop.
318                             break;
319                         }
320                     }
321                 } else {
322                     res = rx.recv().map_err(|_| RecvTimeoutError::Disconnected);
323                     break;
324                 }
325             }
326
327             let completed_test = res.unwrap();
328             running_tests.remove(&completed_test.desc);
329
330             let event = TestEvent::TeResult(completed_test);
331             notify_about_test_event(event)?;
332             pending -= 1;
333         }
334     }
335
336     if opts.bench_benchmarks {
337         // All benchmarks run at the end, in serial.
338         for b in filtered_benchs {
339             let event = TestEvent::TeWait(b.desc.clone());
340             notify_about_test_event(event)?;
341             run_test(opts, false, b, run_strategy, tx.clone(), Concurrent::No);
342             let completed_test = rx.recv().unwrap();
343
344             let event = TestEvent::TeResult(completed_test);
345             notify_about_test_event(event)?;
346         }
347     }
348     Ok(())
349 }
350
351 pub fn filter_tests(opts: &TestOpts, tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
352     let mut filtered = tests;
353     let matches_filter = |test: &TestDescAndFn, filter: &str| {
354         let test_name = test.desc.name.as_slice();
355
356         match opts.filter_exact {
357             true => test_name == filter,
358             false => test_name.contains(filter),
359         }
360     };
361
362     // Remove tests that don't match the test filter
363     if let Some(ref filter) = opts.filter {
364         filtered.retain(|test| matches_filter(test, filter));
365     }
366
367     // Skip tests that match any of the skip filters
368     filtered.retain(|test| !opts.skip.iter().any(|sf| matches_filter(test, sf)));
369
370     // Excludes #[should_panic] tests
371     if opts.exclude_should_panic {
372         filtered.retain(|test| test.desc.should_panic == ShouldPanic::No);
373     }
374
375     // maybe unignore tests
376     match opts.run_ignored {
377         RunIgnored::Yes => {
378             filtered.iter_mut().for_each(|test| test.desc.ignore = false);
379         }
380         RunIgnored::Only => {
381             filtered.retain(|test| test.desc.ignore);
382             filtered.iter_mut().for_each(|test| test.desc.ignore = false);
383         }
384         RunIgnored::No => {}
385     }
386
387     // Sort the tests alphabetically
388     filtered.sort_by(|t1, t2| t1.desc.name.as_slice().cmp(t2.desc.name.as_slice()));
389
390     filtered
391 }
392
393 pub fn convert_benchmarks_to_tests(tests: Vec<TestDescAndFn>) -> Vec<TestDescAndFn> {
394     // convert benchmarks to tests, if we're not benchmarking them
395     tests
396         .into_iter()
397         .map(|x| {
398             let testfn = match x.testfn {
399                 DynBenchFn(bench) => DynTestFn(Box::new(move || {
400                     bench::run_once(|b| __rust_begin_short_backtrace(|| bench.run(b)))
401                 })),
402                 StaticBenchFn(benchfn) => DynTestFn(Box::new(move || {
403                     bench::run_once(|b| __rust_begin_short_backtrace(|| benchfn(b)))
404                 })),
405                 f => f,
406             };
407             TestDescAndFn { desc: x.desc, testfn }
408         })
409         .collect()
410 }
411
412 pub fn run_test(
413     opts: &TestOpts,
414     force_ignore: bool,
415     test: TestDescAndFn,
416     strategy: RunStrategy,
417     monitor_ch: Sender<CompletedTest>,
418     concurrency: Concurrent,
419 ) {
420     let TestDescAndFn { desc, testfn } = test;
421
422     // Emscripten can catch panics but other wasm targets cannot
423     let ignore_because_no_process_support = desc.should_panic != ShouldPanic::No
424         && cfg!(target_arch = "wasm32")
425         && !cfg!(target_os = "emscripten");
426
427     if force_ignore || desc.ignore || ignore_because_no_process_support {
428         let message = CompletedTest::new(desc, TrIgnored, None, Vec::new());
429         monitor_ch.send(message).unwrap();
430         return;
431     }
432
433     struct TestRunOpts {
434         pub strategy: RunStrategy,
435         pub nocapture: bool,
436         pub concurrency: Concurrent,
437         pub time: Option<time::TestTimeOptions>,
438     }
439
440     fn run_test_inner(
441         desc: TestDesc,
442         monitor_ch: Sender<CompletedTest>,
443         testfn: Box<dyn FnOnce() + Send>,
444         opts: TestRunOpts,
445     ) {
446         let concurrency = opts.concurrency;
447         let name = desc.name.clone();
448
449         let runtest = move || match opts.strategy {
450             RunStrategy::InProcess => run_test_in_process(
451                 desc,
452                 opts.nocapture,
453                 opts.time.is_some(),
454                 testfn,
455                 monitor_ch,
456                 opts.time,
457             ),
458             RunStrategy::SpawnPrimary => spawn_test_subprocess(
459                 desc,
460                 opts.nocapture,
461                 opts.time.is_some(),
462                 monitor_ch,
463                 opts.time,
464             ),
465         };
466
467         // If the platform is single-threaded we're just going to run
468         // the test synchronously, regardless of the concurrency
469         // level.
470         let supports_threads = !cfg!(target_os = "emscripten") && !cfg!(target_arch = "wasm32");
471         if concurrency == Concurrent::Yes && supports_threads {
472             let cfg = thread::Builder::new().name(name.as_slice().to_owned());
473             cfg.spawn(runtest).unwrap();
474         } else {
475             runtest();
476         }
477     }
478
479     let test_run_opts =
480         TestRunOpts { strategy, nocapture: opts.nocapture, concurrency, time: opts.time_options };
481
482     match testfn {
483         DynBenchFn(bencher) => {
484             // Benchmarks aren't expected to panic, so we run them all in-process.
485             crate::bench::benchmark(desc, monitor_ch, opts.nocapture, |harness| {
486                 bencher.run(harness)
487             });
488         }
489         StaticBenchFn(benchfn) => {
490             // Benchmarks aren't expected to panic, so we run them all in-process.
491             crate::bench::benchmark(desc, monitor_ch, opts.nocapture, benchfn);
492         }
493         DynTestFn(f) => {
494             match strategy {
495                 RunStrategy::InProcess => (),
496                 _ => panic!("Cannot run dynamic test fn out-of-process"),
497             };
498             run_test_inner(
499                 desc,
500                 monitor_ch,
501                 Box::new(move || __rust_begin_short_backtrace(f)),
502                 test_run_opts,
503             );
504         }
505         StaticTestFn(f) => run_test_inner(
506             desc,
507             monitor_ch,
508             Box::new(move || __rust_begin_short_backtrace(f)),
509             test_run_opts,
510         ),
511     }
512 }
513
514 /// Fixed frame used to clean the backtrace with `RUST_BACKTRACE=1`.
515 #[inline(never)]
516 fn __rust_begin_short_backtrace<F: FnOnce()>(f: F) {
517     f()
518 }
519
520 fn run_test_in_process(
521     desc: TestDesc,
522     nocapture: bool,
523     report_time: bool,
524     testfn: Box<dyn FnOnce() + Send>,
525     monitor_ch: Sender<CompletedTest>,
526     time_opts: Option<time::TestTimeOptions>,
527 ) {
528     // Buffer for capturing standard I/O
529     let data = Arc::new(Mutex::new(Vec::new()));
530
531     let oldio = if !nocapture {
532         Some((
533             io::set_print(Some(Sink::new_boxed(&data))),
534             io::set_panic(Some(Sink::new_boxed(&data))),
535         ))
536     } else {
537         None
538     };
539
540     let start = report_time.then(Instant::now);
541     let result = catch_unwind(AssertUnwindSafe(testfn));
542     let exec_time = start.map(|start| {
543         let duration = start.elapsed();
544         TestExecTime(duration)
545     });
546
547     if let Some((printio, panicio)) = oldio {
548         io::set_print(printio);
549         io::set_panic(panicio);
550     }
551
552     let test_result = match result {
553         Ok(()) => calc_result(&desc, Ok(()), &time_opts, &exec_time),
554         Err(e) => calc_result(&desc, Err(e.as_ref()), &time_opts, &exec_time),
555     };
556     let stdout = data.lock().unwrap().to_vec();
557     let message = CompletedTest::new(desc, test_result, exec_time, stdout);
558     monitor_ch.send(message).unwrap();
559 }
560
561 fn spawn_test_subprocess(
562     desc: TestDesc,
563     nocapture: bool,
564     report_time: bool,
565     monitor_ch: Sender<CompletedTest>,
566     time_opts: Option<time::TestTimeOptions>,
567 ) {
568     let (result, test_output, exec_time) = (|| {
569         let args = env::args().collect::<Vec<_>>();
570         let current_exe = &args[0];
571
572         let mut command = Command::new(current_exe);
573         command.env(SECONDARY_TEST_INVOKER_VAR, desc.name.as_slice());
574         if nocapture {
575             command.stdout(process::Stdio::inherit());
576             command.stderr(process::Stdio::inherit());
577         }
578
579         let start = report_time.then(Instant::now);
580         let output = match command.output() {
581             Ok(out) => out,
582             Err(e) => {
583                 let err = format!("Failed to spawn {} as child for test: {:?}", args[0], e);
584                 return (TrFailed, err.into_bytes(), None);
585             }
586         };
587         let exec_time = start.map(|start| {
588             let duration = start.elapsed();
589             TestExecTime(duration)
590         });
591
592         let std::process::Output { stdout, stderr, status } = output;
593         let mut test_output = stdout;
594         formatters::write_stderr_delimiter(&mut test_output, &desc.name);
595         test_output.extend_from_slice(&stderr);
596
597         let result = match (|| -> Result<TestResult, String> {
598             let exit_code = get_exit_code(status)?;
599             Ok(get_result_from_exit_code(&desc, exit_code, &time_opts, &exec_time))
600         })() {
601             Ok(r) => r,
602             Err(e) => {
603                 write!(&mut test_output, "Unexpected error: {}", e).unwrap();
604                 TrFailed
605             }
606         };
607
608         (result, test_output, exec_time)
609     })();
610
611     let message = CompletedTest::new(desc, result, exec_time, test_output);
612     monitor_ch.send(message).unwrap();
613 }
614
615 fn run_test_in_spawned_subprocess(desc: TestDesc, testfn: Box<dyn FnOnce() + Send>) -> ! {
616     let builtin_panic_hook = panic::take_hook();
617     let record_result = Arc::new(move |panic_info: Option<&'_ PanicInfo<'_>>| {
618         let test_result = match panic_info {
619             Some(info) => calc_result(&desc, Err(info.payload()), &None, &None),
620             None => calc_result(&desc, Ok(()), &None, &None),
621         };
622
623         // We don't support serializing TrFailedMsg, so just
624         // print the message out to stderr.
625         if let TrFailedMsg(msg) = &test_result {
626             eprintln!("{}", msg);
627         }
628
629         if let Some(info) = panic_info {
630             builtin_panic_hook(info);
631         }
632
633         if let TrOk = test_result {
634             process::exit(test_result::TR_OK);
635         } else {
636             process::exit(test_result::TR_FAILED);
637         }
638     });
639     let record_result2 = record_result.clone();
640     panic::set_hook(Box::new(move |info| record_result2(Some(&info))));
641     testfn();
642     record_result(None);
643     unreachable!("panic=abort callback should have exited the process")
644 }