]> git.lizzy.rs Git - rust.git/blob - crates/ra_cargo_watch/src/lib.rs
Fix shutdown behavoir of main cargo-watch thread.
[rust.git] / crates / ra_cargo_watch / src / lib.rs
1 //! cargo_check provides the functionality needed to run `cargo check` or
2 //! another compatible command (f.x. clippy) in a background thread and provide
3 //! LSP diagnostics based on the output of the command.
4 use cargo_metadata::Message;
5 use crossbeam_channel::{select, unbounded, Receiver, RecvError, Sender};
6 use lsp_types::{
7     Diagnostic, Url, WorkDoneProgress, WorkDoneProgressBegin, WorkDoneProgressEnd,
8     WorkDoneProgressReport,
9 };
10 use parking_lot::RwLock;
11 use std::{
12     collections::HashMap,
13     path::PathBuf,
14     process::{Command, Stdio},
15     sync::Arc,
16     thread::JoinHandle,
17     time::Instant,
18 };
19
20 mod conv;
21
22 use crate::conv::{map_rust_diagnostic_to_lsp, MappedRustDiagnostic, SuggestedFix};
23
24 #[derive(Clone, Debug)]
25 pub struct CheckOptions {
26     pub enable: bool,
27     pub args: Vec<String>,
28     pub command: String,
29     pub all_targets: bool,
30 }
31
32 /// CheckWatcher wraps the shared state and communication machinery used for
33 /// running `cargo check` (or other compatible command) and providing
34 /// diagnostics based on the output.
35 /// The spawned thread is shut down when this struct is dropped.
36 #[derive(Debug)]
37 pub struct CheckWatcher {
38     pub task_recv: Receiver<CheckTask>,
39     pub cmd_send: Option<Sender<CheckCommand>>,
40     pub shared: Arc<RwLock<CheckWatcherSharedState>>,
41     handle: Option<JoinHandle<()>>,
42 }
43
44 impl CheckWatcher {
45     pub fn new(options: &CheckOptions, workspace_root: PathBuf) -> CheckWatcher {
46         let options = options.clone();
47         let shared = Arc::new(RwLock::new(CheckWatcherSharedState::new()));
48
49         let (task_send, task_recv) = unbounded::<CheckTask>();
50         let (cmd_send, cmd_recv) = unbounded::<CheckCommand>();
51         let shared_ = shared.clone();
52         let handle = std::thread::spawn(move || {
53             let mut check = CheckWatcherState::new(options, workspace_root, shared_);
54             check.run(&task_send, &cmd_recv);
55         });
56         CheckWatcher { task_recv, cmd_send: Some(cmd_send), handle: Some(handle), shared }
57     }
58
59     /// Schedule a re-start of the cargo check worker.
60     pub fn update(&self) {
61         if let Some(cmd_send) = &self.cmd_send {
62             cmd_send.send(CheckCommand::Update).unwrap();
63         }
64     }
65 }
66
67 impl std::ops::Drop for CheckWatcher {
68     fn drop(&mut self) {
69         if let Some(handle) = self.handle.take() {
70             // Take the sender out of the option
71             let recv = self.cmd_send.take();
72
73             // Dropping the sender finishes the thread loop
74             drop(recv);
75
76             // Join the thread, it should finish shortly. We don't really care
77             // whether it panicked, so it is safe to ignore the result
78             let _ = handle.join();
79         }
80     }
81 }
82
83 #[derive(Debug)]
84 pub struct CheckWatcherSharedState {
85     diagnostic_collection: HashMap<Url, Vec<Diagnostic>>,
86     suggested_fix_collection: HashMap<Url, Vec<SuggestedFix>>,
87 }
88
89 impl CheckWatcherSharedState {
90     fn new() -> CheckWatcherSharedState {
91         CheckWatcherSharedState {
92             diagnostic_collection: HashMap::new(),
93             suggested_fix_collection: HashMap::new(),
94         }
95     }
96
97     /// Clear the cached diagnostics, and schedule updating diagnostics by the
98     /// server, to clear stale results.
99     pub fn clear(&mut self, task_send: &Sender<CheckTask>) {
100         let cleared_files: Vec<Url> = self.diagnostic_collection.keys().cloned().collect();
101
102         self.diagnostic_collection.clear();
103         self.suggested_fix_collection.clear();
104
105         for uri in cleared_files {
106             task_send.send(CheckTask::Update(uri.clone())).unwrap();
107         }
108     }
109
110     pub fn diagnostics_for(&self, uri: &Url) -> Option<&[Diagnostic]> {
111         self.diagnostic_collection.get(uri).map(|d| d.as_slice())
112     }
113
114     pub fn fixes_for(&self, uri: &Url) -> Option<&[SuggestedFix]> {
115         self.suggested_fix_collection.get(uri).map(|d| d.as_slice())
116     }
117
118     fn add_diagnostic(&mut self, file_uri: Url, diagnostic: Diagnostic) {
119         let diagnostics = self.diagnostic_collection.entry(file_uri).or_default();
120
121         // If we're building multiple targets it's possible we've already seen this diagnostic
122         let is_duplicate = diagnostics.iter().any(|d| are_diagnostics_equal(d, &diagnostic));
123         if is_duplicate {
124             return;
125         }
126
127         diagnostics.push(diagnostic);
128     }
129
130     fn add_suggested_fix_for_diagnostic(
131         &mut self,
132         mut suggested_fix: SuggestedFix,
133         diagnostic: &Diagnostic,
134     ) {
135         let file_uri = suggested_fix.location.uri.clone();
136         let file_suggestions = self.suggested_fix_collection.entry(file_uri).or_default();
137
138         let existing_suggestion: Option<&mut SuggestedFix> =
139             file_suggestions.iter_mut().find(|s| s == &&suggested_fix);
140         if let Some(existing_suggestion) = existing_suggestion {
141             // The existing suggestion also applies to this new diagnostic
142             existing_suggestion.diagnostics.push(diagnostic.clone());
143         } else {
144             // We haven't seen this suggestion before
145             suggested_fix.diagnostics.push(diagnostic.clone());
146             file_suggestions.push(suggested_fix);
147         }
148     }
149 }
150
151 #[derive(Debug)]
152 pub enum CheckTask {
153     /// Request a update of the given files diagnostics
154     Update(Url),
155
156     /// Request check progress notification to client
157     Status(WorkDoneProgress),
158 }
159
160 pub enum CheckCommand {
161     /// Request re-start of check thread
162     Update,
163 }
164
165 struct CheckWatcherState {
166     options: CheckOptions,
167     workspace_root: PathBuf,
168     watcher: WatchThread,
169     last_update_req: Option<Instant>,
170     shared: Arc<RwLock<CheckWatcherSharedState>>,
171 }
172
173 impl CheckWatcherState {
174     pub fn new(
175         options: CheckOptions,
176         workspace_root: PathBuf,
177         shared: Arc<RwLock<CheckWatcherSharedState>>,
178     ) -> CheckWatcherState {
179         let watcher = WatchThread::new(&options, &workspace_root);
180         CheckWatcherState { options, workspace_root, watcher, last_update_req: None, shared }
181     }
182
183     pub fn run(&mut self, task_send: &Sender<CheckTask>, cmd_recv: &Receiver<CheckCommand>) {
184         loop {
185             select! {
186                 recv(&cmd_recv) -> cmd => match cmd {
187                     Ok(cmd) => self.handle_command(cmd),
188                     Err(RecvError) => {
189                         // Command channel has closed, so shut down
190                         break;
191                     },
192                 },
193                 recv(self.watcher.message_recv) -> msg => match msg {
194                     Ok(msg) => self.handle_message(msg, task_send),
195                     Err(RecvError) => {
196                         // Task channel has closed, so shut down
197                         break;
198                     },
199                 }
200             };
201
202             if self.should_recheck() {
203                 self.last_update_req.take();
204                 self.shared.write().clear(task_send);
205
206                 // By replacing the watcher, we drop the previous one which
207                 // causes it to shut down automatically.
208                 self.watcher = WatchThread::new(&self.options, &self.workspace_root);
209             }
210         }
211     }
212
213     fn should_recheck(&mut self) -> bool {
214         if let Some(_last_update_req) = &self.last_update_req {
215             // We currently only request an update on save, as we need up to
216             // date source on disk for cargo check to do it's magic, so we
217             // don't really need to debounce the requests at this point.
218             return true;
219         }
220         false
221     }
222
223     fn handle_command(&mut self, cmd: CheckCommand) {
224         match cmd {
225             CheckCommand::Update => self.last_update_req = Some(Instant::now()),
226         }
227     }
228
229     fn handle_message(&mut self, msg: CheckEvent, task_send: &Sender<CheckTask>) {
230         match msg {
231             CheckEvent::Begin => {
232                 task_send
233                     .send(CheckTask::Status(WorkDoneProgress::Begin(WorkDoneProgressBegin {
234                         title: "Running 'cargo check'".to_string(),
235                         cancellable: Some(false),
236                         message: None,
237                         percentage: None,
238                     })))
239                     .unwrap();
240             }
241
242             CheckEvent::End => {
243                 task_send
244                     .send(CheckTask::Status(WorkDoneProgress::End(WorkDoneProgressEnd {
245                         message: None,
246                     })))
247                     .unwrap();
248             }
249
250             CheckEvent::Msg(Message::CompilerArtifact(msg)) => {
251                 task_send
252                     .send(CheckTask::Status(WorkDoneProgress::Report(WorkDoneProgressReport {
253                         cancellable: Some(false),
254                         message: Some(msg.target.name),
255                         percentage: None,
256                     })))
257                     .unwrap();
258             }
259
260             CheckEvent::Msg(Message::CompilerMessage(msg)) => {
261                 let map_result =
262                     match map_rust_diagnostic_to_lsp(&msg.message, &self.workspace_root) {
263                         Some(map_result) => map_result,
264                         None => return,
265                     };
266
267                 let MappedRustDiagnostic { location, diagnostic, suggested_fixes } = map_result;
268                 let file_uri = location.uri.clone();
269
270                 if !suggested_fixes.is_empty() {
271                     for suggested_fix in suggested_fixes {
272                         self.shared
273                             .write()
274                             .add_suggested_fix_for_diagnostic(suggested_fix, &diagnostic);
275                     }
276                 }
277                 self.shared.write().add_diagnostic(file_uri, diagnostic);
278
279                 task_send.send(CheckTask::Update(location.uri)).unwrap();
280             }
281
282             CheckEvent::Msg(Message::BuildScriptExecuted(_msg)) => {}
283             CheckEvent::Msg(Message::Unknown) => {}
284         }
285     }
286 }
287
288 /// WatchThread exists to wrap around the communication needed to be able to
289 /// run `cargo check` without blocking. Currently the Rust standard library
290 /// doesn't provide a way to read sub-process output without blocking, so we
291 /// have to wrap sub-processes output handling in a thread and pass messages
292 /// back over a channel.
293 /// The correct way to dispose of the thread is to drop it, on which the
294 /// sub-process will be killed, and the thread will be joined.
295 struct WatchThread {
296     handle: Option<JoinHandle<()>>,
297     message_recv: Receiver<CheckEvent>,
298 }
299
300 enum CheckEvent {
301     Begin,
302     Msg(cargo_metadata::Message),
303     End,
304 }
305
306 impl WatchThread {
307     fn new(options: &CheckOptions, workspace_root: &PathBuf) -> WatchThread {
308         let mut args: Vec<String> = vec![
309             options.command.clone(),
310             "--message-format=json".to_string(),
311             "--manifest-path".to_string(),
312             format!("{}/Cargo.toml", workspace_root.to_string_lossy()),
313         ];
314         if options.all_targets {
315             args.push("--all-targets".to_string());
316         }
317         args.extend(options.args.iter().cloned());
318
319         let (message_send, message_recv) = unbounded();
320         let enabled = options.enable;
321         let handle = std::thread::spawn(move || {
322             if !enabled {
323                 return;
324             }
325
326             let mut command = Command::new("cargo")
327                 .args(&args)
328                 .stdout(Stdio::piped())
329                 .stderr(Stdio::null())
330                 .spawn()
331                 .expect("couldn't launch cargo");
332
333             // If we trigger an error here, we will do so in the loop instead,
334             // which will break out of the loop, and continue the shutdown
335             let _ = message_send.send(CheckEvent::Begin);
336
337             for message in cargo_metadata::parse_messages(command.stdout.take().unwrap()) {
338                 let message = match message {
339                     Ok(message) => message,
340                     Err(err) => {
341                         log::error!("Invalid json from cargo check, ignoring: {}", err);
342                         continue;
343                     }
344                 };
345
346                 match message_send.send(CheckEvent::Msg(message)) {
347                     Ok(()) => {}
348                     Err(_err) => {
349                         // The send channel was closed, so we want to shutdown
350                         break;
351                     }
352                 }
353             }
354
355             // We can ignore any error here, as we are already in the progress
356             // of shutting down.
357             let _ = message_send.send(CheckEvent::End);
358
359             // It is okay to ignore the result, as it only errors if the process is already dead
360             let _ = command.kill();
361
362             // Again, we don't care about the exit status so just ignore the result
363             let _ = command.wait();
364         });
365         WatchThread { handle: Some(handle), message_recv }
366     }
367 }
368
369 impl std::ops::Drop for WatchThread {
370     fn drop(&mut self) {
371         if let Some(handle) = self.handle.take() {
372             // Replace our reciever with dummy one, so we can drop and close the
373             // one actually communicating with the thread
374             let recv = std::mem::replace(&mut self.message_recv, crossbeam_channel::never());
375
376             // Dropping the original reciever initiates thread sub-process shutdown
377             drop(recv);
378
379             // Join the thread, it should finish shortly. We don't really care
380             // whether it panicked, so it is safe to ignore the result
381             let _ = handle.join();
382         }
383     }
384 }
385
386 fn are_diagnostics_equal(left: &Diagnostic, right: &Diagnostic) -> bool {
387     left.source == right.source
388         && left.severity == right.severity
389         && left.range == right.range
390         && left.message == right.message
391 }