]> git.lizzy.rs Git - rust.git/blobdiff - ui_test/src/lib.rs
tweak failure output a little
[rust.git] / ui_test / src / lib.rs
index 6052efe02e06af3929a6fda2e116fe55574dc402..917e382379abe34443d2dba3ed6713eff12020c4 100644 (file)
@@ -1,17 +1,23 @@
+#![allow(clippy::enum_variant_names, clippy::useless_format, clippy::too_many_arguments)]
+
+use std::collections::VecDeque;
 use std::fmt::Write;
 use std::path::{Path, PathBuf};
 use std::process::{Command, ExitStatus};
 use std::sync::atomic::{AtomicUsize, Ordering};
 use std::sync::Mutex;
 
+pub use color_eyre;
+use color_eyre::eyre::Result;
 use colored::*;
-use comments::ErrorMatch;
-use crossbeam::queue::SegQueue;
+use parser::{ErrorMatch, Pattern};
 use regex::Regex;
+use rustc_stderr::{Level, Message};
 
-use crate::comments::Comments;
+use crate::parser::{Comments, Condition};
 
-mod comments;
+mod parser;
+mod rustc_stderr;
 #[cfg(test)]
 mod tests;
 
@@ -47,15 +53,14 @@ pub enum OutputConflictHandling {
 
 pub type Filter = Vec<(Regex, &'static str)>;
 
-pub fn run_tests(config: Config) {
+pub fn run_tests(config: Config) -> Result<()> {
     eprintln!("   Compiler flags: {:?}", config.args);
 
     // Get the triple with which to run the tests
     let target = config.target.clone().unwrap_or_else(|| config.get_host());
 
-    // A queue for files or folders to process
-    let todo = SegQueue::new();
-    todo.push(config.root_dir.clone());
+    // A channel for files to process
+    let (submit, receive) = crossbeam::channel::unbounded();
 
     // Some statistics and failure reports.
     let failures = Mutex::new(vec![]);
@@ -63,21 +68,38 @@ pub fn run_tests(config: Config) {
     let ignored = AtomicUsize::default();
     let filtered = AtomicUsize::default();
 
-    crossbeam::scope(|s| {
-        for _ in 0..std::thread::available_parallelism().unwrap().get() {
-            s.spawn(|_| {
-                while let Some(path) = todo.pop() {
-                    // Collect everything inside directories
-                    if path.is_dir() {
-                        for entry in std::fs::read_dir(path).unwrap() {
-                            todo.push(entry.unwrap().path());
-                        }
-                        continue;
-                    }
-                    // Only look at .rs files
-                    if !path.extension().map(|ext| ext == "rs").unwrap_or(false) {
-                        continue;
+    crossbeam::scope(|s| -> Result<()> {
+        // Create a thread that is in charge of walking the directory and submitting jobs.
+        // It closes the channel when it is done.
+        s.spawn(|_| {
+            let mut todo = VecDeque::new();
+            todo.push_back(config.root_dir.clone());
+            while let Some(path) = todo.pop_front() {
+                if path.is_dir() {
+                    // Enqueue everything inside this directory.
+                    // We want it sorted, to have some control over scheduling of slow tests.
+                    let mut entries =
+                        std::fs::read_dir(path).unwrap().collect::<Result<Vec<_>, _>>().unwrap();
+                    entries.sort_by_key(|e| e.file_name());
+                    for entry in entries {
+                        todo.push_back(entry.path());
                     }
+                } else if path.extension().map(|ext| ext == "rs").unwrap_or(false) {
+                    // Forward .rs files to the test workers.
+                    submit.send(path).unwrap();
+                }
+            }
+            // There will be no more jobs. This signals the workers to quit.
+            // (This also ensures `submit` is moved into this closure.)
+            drop(submit);
+        });
+
+        let mut threads = vec![];
+
+        // Create N worker threads that receive files to test.
+        for _ in 0..std::thread::available_parallelism().unwrap().get() {
+            threads.push(s.spawn(|_| -> Result<()> {
+                for path in &receive {
                     if !config.path_filter.is_empty() {
                         let path_display = path.display().to_string();
                         if !config.path_filter.iter().any(|filter| path_display.contains(filter)) {
@@ -85,9 +107,9 @@ pub fn run_tests(config: Config) {
                             continue;
                         }
                     }
-                    let comments = Comments::parse_file(&path);
+                    let comments = Comments::parse_file(&path)?;
                     // Ignore file if only/ignore rules do (not) apply
-                    if ignore_file(&comments, &target) {
+                    if !test_file_conditions(&comments, &target, &config) {
                         ignored.fetch_add(1, Ordering::Relaxed);
                         eprintln!(
                             "{} ... {}",
@@ -100,7 +122,8 @@ pub fn run_tests(config: Config) {
                     for revision in
                         comments.revisions.clone().unwrap_or_else(|| vec![String::new()])
                     {
-                        let (m, errors) = run_test(&path, &config, &target, &revision, &comments);
+                        let (m, errors, stderr) =
+                            run_test(&path, &config, &target, &revision, &comments);
 
                         // Using a single `eprintln!` to prevent messages from threads from getting intermingled.
                         let mut msg = format!("{} ", path.display());
@@ -113,14 +136,25 @@ pub fn run_tests(config: Config) {
                             succeeded.fetch_add(1, Ordering::Relaxed);
                         } else {
                             eprintln!("{msg}{}", "FAILED".red().bold());
-                            failures.lock().unwrap().push((path.clone(), m, revision, errors));
+                            failures.lock().unwrap().push((
+                                path.clone(),
+                                m,
+                                revision,
+                                errors,
+                                stderr,
+                            ));
                         }
                     }
                 }
-            });
+                Ok(())
+            }));
+        }
+        for thread in threads {
+            thread.join().unwrap()?;
         }
+        Ok(())
     })
-    .unwrap();
+    .unwrap()?;
 
     // Print all errors in a single thread to show reliable output
     let failures = failures.into_inner().unwrap();
@@ -128,47 +162,72 @@ pub fn run_tests(config: Config) {
     let ignored = ignored.load(Ordering::Relaxed);
     let filtered = filtered.load(Ordering::Relaxed);
     if !failures.is_empty() {
-        for (path, miri, revision, errors) in &failures {
+        for (path, miri, revision, errors, stderr) in &failures {
             eprintln!();
-            eprint!("{}", path.display().to_string().underline());
+            eprint!("{}", path.display().to_string().underline().bold());
             if !revision.is_empty() {
                 eprint!(" (revision `{}`)", revision);
             }
-            eprint!(" {}", "FAILED".red());
+            eprint!(" {}", "FAILED:".red().bold());
             eprintln!();
             eprintln!("command: {:?}", miri);
             eprintln!();
-            let mut dump_stderr = None;
             for error in errors {
                 match error {
                     Error::ExitStatus(mode, exit_status) => eprintln!("{mode:?} got {exit_status}"),
-                    Error::PatternNotFound { stderr, pattern, definition_line } => {
-                        eprintln!("`{pattern}` {} in stderr output", "not found".red());
+                    Error::PatternNotFound { pattern, definition_line } => {
+                        match pattern {
+                            Pattern::SubString(s) =>
+                                eprintln!("substring `{s}` {} in stderr output", "not found".red()),
+                            Pattern::Regex(r) =>
+                                eprintln!("`/{r}/` does {} stderr output", "not match".red()),
+                        }
                         eprintln!(
                             "expected because of pattern here: {}:{definition_line}",
                             path.display().to_string().bold()
                         );
-                        dump_stderr = Some(stderr.clone())
                     }
-                    Error::NoPatternsFound =>
-                        eprintln!("{}", "no error patterns found in failure test".red()),
+                    Error::NoPatternsFound => {
+                        eprintln!("{}", "no error patterns found in failure test".red());
+                    }
                     Error::PatternFoundInPassTest =>
                         eprintln!("{}", "error pattern found in success test".red()),
                     Error::OutputDiffers { path, actual, expected } => {
-                        dump_stderr = None;
                         eprintln!("actual output differed from expected {}", path.display());
                         eprintln!("{}", pretty_assertions::StrComparison::new(expected, actual));
                         eprintln!()
                     }
+                    Error::ErrorsWithoutPattern { path: None, msgs } => {
+                        eprintln!(
+                            "There were {} unmatched diagnostics that occurred outside the testfile and had not pattern",
+                            msgs.len(),
+                        );
+                        for Message { level, message } in msgs {
+                            eprintln!("    {level:?}: {message}")
+                        }
+                    }
+                    Error::ErrorsWithoutPattern { path: Some((path, line)), msgs } => {
+                        eprintln!(
+                            "There were {} unmatched diagnostics at {}:{line}",
+                            msgs.len(),
+                            path.display()
+                        );
+                        for Message { level, message } in msgs {
+                            eprintln!("    {level:?}: {message}")
+                        }
+                    }
                 }
                 eprintln!();
             }
-            if let Some(stderr) = dump_stderr {
-                eprintln!("actual stderr:");
-                eprintln!("{}", stderr);
-                eprintln!();
-            }
+            eprintln!("full stderr:");
+            eprintln!("{}", stderr);
+            eprintln!();
         }
+        eprintln!("{}", "FAILURES:".red().underline().bold());
+        for (path, _miri, _revision, _errors, _stderr) in &failures {
+            eprintln!("    {}", path.display());
+        }
+        eprintln!();
         eprintln!(
             "test result: {}. {} tests failed, {} tests passed, {} ignored, {} filtered out",
             "FAIL".red(),
@@ -188,6 +247,7 @@ pub fn run_tests(config: Config) {
         filtered.to_string().yellow(),
     );
     eprintln!();
+    Ok(())
 }
 
 #[derive(Debug)]
@@ -195,8 +255,7 @@ enum Error {
     /// Got an invalid exit status for the given mode.
     ExitStatus(Mode, ExitStatus),
     PatternNotFound {
-        stderr: String,
-        pattern: String,
+        pattern: Pattern,
         definition_line: usize,
     },
     /// A ui test checking for failure does not have any failure patterns
@@ -209,6 +268,10 @@ enum Error {
         actual: String,
         expected: String,
     },
+    ErrorsWithoutPattern {
+        msgs: Vec<Message>,
+        path: Option<(PathBuf, usize)>,
+    },
 }
 
 type Errors = Vec<Error>;
@@ -219,7 +282,7 @@ fn run_test(
     target: &str,
     revision: &str,
     comments: &Comments,
-) -> (Command, Errors) {
+) -> (Command, Errors, String) {
     // Run miri
     let mut miri = Command::new(&config.program);
     miri.args(config.args.iter());
@@ -227,6 +290,7 @@ fn run_test(
     if !revision.is_empty() {
         miri.arg(format!("--cfg={revision}"));
     }
+    miri.arg("--error-format=json");
     for arg in &comments.compile_flags {
         miri.arg(arg);
     }
@@ -235,7 +299,7 @@ fn run_test(
     }
     let output = miri.output().expect("could not execute miri");
     let mut errors = config.mode.ok(output.status);
-    check_test_result(
+    let stderr = check_test_result(
         path,
         config,
         target,
@@ -245,7 +309,7 @@ fn run_test(
         &output.stdout,
         &output.stderr,
     );
-    (miri, errors)
+    (miri, errors, stderr)
 }
 
 fn check_test_result(
@@ -257,11 +321,9 @@ fn check_test_result(
     errors: &mut Errors,
     stdout: &[u8],
     stderr: &[u8],
-) {
+) -> String {
     // Always remove annotation comments from stderr.
-    let annotations = Regex::new(r"\s*//~.*").unwrap();
-    let stderr = std::str::from_utf8(stderr).unwrap();
-    let stderr = annotations.replace_all(stderr, "");
+    let diagnostics = rustc_stderr::process(path, stderr);
     let stdout = std::str::from_utf8(stdout).unwrap();
     // Check output files (if any)
     let revised = |extension: &str| {
@@ -273,70 +335,121 @@ fn check_test_result(
     };
     // Check output files against actual output
     check_output(
-        &stderr,
+        &diagnostics.rendered,
         path,
         errors,
         revised("stderr"),
         target,
         &config.stderr_filters,
-        &config,
+        config,
         comments,
     );
     check_output(
-        &stdout,
+        stdout,
         path,
         errors,
         revised("stdout"),
         target,
         &config.stdout_filters,
-        &config,
+        config,
         comments,
     );
     // Check error annotations in the source against output
-    check_annotations(&stderr, errors, config, revision, comments);
+    check_annotations(
+        diagnostics.messages,
+        diagnostics.messages_from_unknown_file_or_line,
+        path,
+        errors,
+        config,
+        revision,
+        comments,
+    );
+    diagnostics.rendered
 }
 
 fn check_annotations(
-    unnormalized_stderr: &str,
+    mut messages: Vec<Vec<Message>>,
+    mut messages_from_unknown_file_or_line: Vec<Message>,
+    path: &Path,
     errors: &mut Errors,
     config: &Config,
     revision: &str,
     comments: &Comments,
 ) {
-    let mut found_annotation = false;
     if let Some((ref error_pattern, definition_line)) = comments.error_pattern {
-        if !unnormalized_stderr.contains(error_pattern) {
-            errors.push(Error::PatternNotFound {
-                stderr: unnormalized_stderr.to_string(),
-                pattern: error_pattern.to_string(),
-                definition_line,
-            });
+        // first check the diagnostics messages outside of our file. We check this first, so that
+        // you can mix in-file annotations with //@error-pattern annotations, even if there is overlap
+        // in the messages.
+        if let Some(i) = messages_from_unknown_file_or_line
+            .iter()
+            .position(|msg| error_pattern.matches(&msg.message))
+        {
+            messages_from_unknown_file_or_line.remove(i);
+        } else {
+            errors.push(Error::PatternNotFound { pattern: error_pattern.clone(), definition_line });
         }
-        found_annotation = true;
     }
-    for &ErrorMatch { ref matched, revision: ref rev, definition_line } in &comments.error_matches {
-        // FIXME: check that the error happens on the marked line
 
+    // The order on `Level` is such that `Error` is the highest level.
+    // We will ensure that *all* diagnostics of level at least `lowest_annotation_level`
+    // are matched.
+    let mut lowest_annotation_level = Level::Error;
+    for &ErrorMatch { ref pattern, revision: ref rev, definition_line, line, level } in
+        &comments.error_matches
+    {
         if let Some(rev) = rev {
             if rev != revision {
                 continue;
             }
         }
 
-        if !unnormalized_stderr.contains(matched) {
-            errors.push(Error::PatternNotFound {
-                stderr: unnormalized_stderr.to_string(),
-                pattern: matched.to_string(),
-                definition_line,
-            });
+        // If we found a diagnostic with a level annotation, make sure that all
+        // diagnostics of that level have annotations, even if we don't end up finding a matching diagnostic
+        // for this pattern.
+        lowest_annotation_level = std::cmp::min(lowest_annotation_level, level);
+
+        if let Some(msgs) = messages.get_mut(line) {
+            let found =
+                msgs.iter().position(|msg| pattern.matches(&msg.message) && msg.level == level);
+            if let Some(found) = found {
+                msgs.remove(found);
+                continue;
+            }
         }
-        found_annotation = true;
+
+        errors.push(Error::PatternNotFound { pattern: pattern.clone(), definition_line });
     }
-    match (config.mode, found_annotation) {
+
+    let filter = |msgs: Vec<Message>| -> Vec<_> {
+        msgs.into_iter()
+            .filter(|msg| {
+                msg.level
+                    >= comments.require_annotations_for_level.unwrap_or(lowest_annotation_level)
+            })
+            .collect()
+    };
+
+    let messages_from_unknown_file_or_line = filter(messages_from_unknown_file_or_line);
+    if !messages_from_unknown_file_or_line.is_empty() {
+        errors.push(Error::ErrorsWithoutPattern {
+            path: None,
+            msgs: messages_from_unknown_file_or_line,
+        });
+    }
+
+    for (line, msgs) in messages.into_iter().enumerate() {
+        let msgs = filter(msgs);
+        if !msgs.is_empty() {
+            errors
+                .push(Error::ErrorsWithoutPattern { path: Some((path.to_path_buf(), line)), msgs });
+        }
+    }
+
+    match (config.mode, comments.error_pattern.is_some() || !comments.error_matches.is_empty()) {
         (Mode::Pass, true) | (Mode::Panic, true) => errors.push(Error::PatternFoundInPassTest),
         (Mode::Fail, false) => errors.push(Error::NoPatternsFound),
         _ => {}
-    };
+    }
 }
 
 fn check_output(
@@ -374,41 +487,37 @@ fn check_output(
 
 fn output_path(path: &Path, comments: &Comments, kind: String, target: &str) -> PathBuf {
     if comments.stderr_per_bitwidth {
-        return path.with_extension(format!("{}.{kind}", get_pointer_width(target)));
+        return path.with_extension(format!("{}bit.{kind}", get_pointer_width(target)));
     }
     path.with_extension(kind)
 }
 
-fn ignore_file(comments: &Comments, target: &str) -> bool {
-    for s in &comments.ignore {
-        if target.contains(s) {
-            return true;
-        }
-        if get_pointer_width(target) == s {
-            return true;
-        }
+fn test_condition(condition: &Condition, target: &str, config: &Config) -> bool {
+    match condition {
+        Condition::Bitwidth(bits) => get_pointer_width(target) == *bits,
+        Condition::Target(t) => target.contains(t),
+        Condition::OnHost => config.target.is_none(),
     }
-    for s in &comments.only {
-        if !target.contains(s) {
-            return true;
-        }
-        if get_pointer_width(target) != s {
-            return true;
-        }
+}
+
+/// Returns whether according to the in-file conditions, this file should be run.
+fn test_file_conditions(comments: &Comments, target: &str, config: &Config) -> bool {
+    if comments.ignore.iter().any(|c| test_condition(c, target, config)) {
+        return false;
     }
-    false
+    comments.only.iter().all(|c| test_condition(c, target, config))
 }
 
 // Taken 1:1 from compiletest-rs
-fn get_pointer_width(triple: &str) -> &'static str {
+fn get_pointer_width(triple: &str) -> u8 {
     if (triple.contains("64") && !triple.ends_with("gnux32") && !triple.ends_with("gnu_ilp32"))
         || triple.starts_with("s390x")
     {
-        "64bit"
+        64
     } else if triple.starts_with("avr") {
-        "16bit"
+        16
     } else {
-        "32bit"
+        32
     }
 }