]> git.lizzy.rs Git - rust.git/blobdiff - crates/rust-analyzer/src/main_loop.rs
Add toggle to disable cache priming
[rust.git] / crates / rust-analyzer / src / main_loop.rs
index 38e0e9c65b36a113f82d941320ece4fddf731b18..979b3b1522159679d2c1de1d81e5ca0bf2c9453e 100644 (file)
@@ -21,7 +21,7 @@
     handlers, lsp_ext,
     lsp_utils::{apply_document_changes, is_cancelled, notification_is, Progress},
     mem_docs::DocumentData,
-    reload::{BuildDataProgress, ProjectWorkspaceProgress},
+    reload::{self, BuildDataProgress, ProjectWorkspaceProgress},
     Result,
 };
 
@@ -112,7 +112,7 @@ fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
             && self.config.notifications().cargo_toml_not_found
         {
             self.show_message(
-                lsp_types::MessageType::Error,
+                lsp_types::MessageType::ERROR,
                 "rust-analyzer failed to discover workspace".to_string(),
             );
         };
@@ -152,8 +152,10 @@ fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
             );
         }
 
-        self.fetch_workspaces_request();
-        self.fetch_workspaces_if_needed();
+        self.fetch_workspaces_queue.request_op();
+        if self.fetch_workspaces_queue.should_start_op() {
+            self.fetch_workspaces();
+        }
 
         while let Some(event) = self.next_event(&inbox) {
             if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
@@ -164,7 +166,7 @@ fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
             self.handle_event(event)?
         }
 
-        Err("client exited without proper shutdown sequence")?
+        return Err("client exited without proper shutdown sequence".into());
     }
 
     fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
@@ -234,14 +236,14 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                                     (Progress::Report, Some(msg))
                                 }
                                 ProjectWorkspaceProgress::End(workspaces) => {
-                                    self.fetch_workspaces_completed(workspaces);
+                                    self.fetch_workspaces_queue.op_completed(workspaces);
 
                                     let old = Arc::clone(&self.workspaces);
                                     self.switch_workspaces();
                                     let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
 
                                     if self.config.run_build_scripts() && workspaces_updated {
-                                        self.fetch_build_data_request()
+                                        self.fetch_build_data_queue.request_op()
                                     }
 
                                     (Progress::End, None)
@@ -257,7 +259,7 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                                     (Some(Progress::Report), Some(msg))
                                 }
                                 BuildDataProgress::End(build_data_result) => {
-                                    self.fetch_build_data_completed(build_data_result);
+                                    self.fetch_build_data_queue.op_completed(build_data_result);
 
                                     self.switch_workspaces();
 
@@ -294,12 +296,15 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                             ));
                             fraction = Progress::fraction(report.n_done, report.n_total);
                         }
-                        PrimeCachesProgress::End { cancelled: _ } => {
+                        PrimeCachesProgress::End { cancelled } => {
                             state = Progress::End;
                             message = None;
                             fraction = 1.0;
 
                             self.prime_caches_queue.op_completed(());
+                            if cancelled {
+                                self.prime_caches_queue.request_op();
+                            }
                         }
                     };
 
@@ -389,7 +394,10 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                                 flycheck::Progress::DidCancel => (Progress::End, None),
                                 flycheck::Progress::DidFinish(result) => {
                                     if let Err(err) = result {
-                                        tracing::error!("cargo check failed: {}", err)
+                                        self.show_message(
+                                            lsp_types::MessageType::ERROR,
+                                            format!("cargo check failed: {}", err),
+                                        );
                                     }
                                     (Progress::End, None)
                                 }
@@ -398,7 +406,10 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                             // When we're running multiple flychecks, we have to include a disambiguator in
                             // the title, or the editor complains. Note that this is a user-facing string.
                             let title = if self.flycheck.len() == 1 {
-                                "cargo check".to_string()
+                                match self.config.flycheck() {
+                                    Some(config) => format!("{}", config),
+                                    None => "cargo check".to_string(),
+                                }
                             } else {
                                 format!("cargo check (#{})", id + 1)
                             };
@@ -422,29 +433,12 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                 for flycheck in &self.flycheck {
                     flycheck.update();
                 }
+                if self.config.prefill_caches() {
+                    self.prime_caches_queue.request_op();
+                }
             }
 
             if !was_quiescent || state_changed {
-                // Ensure that only one cache priming task can run at a time
-                self.prime_caches_queue.request_op();
-                if self.prime_caches_queue.should_start_op() {
-                    self.task_pool.handle.spawn_with_sender({
-                        let analysis = self.snapshot().analysis;
-                        move |sender| {
-                            sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
-                            let res = analysis.prime_caches(|progress| {
-                                let report = PrimeCachesProgress::Report(progress);
-                                sender.send(Task::PrimeCaches(report)).unwrap();
-                            });
-                            sender
-                                .send(Task::PrimeCaches(PrimeCachesProgress::End {
-                                    cancelled: res.is_err(),
-                                }))
-                                .unwrap();
-                        }
-                    });
-                }
-
                 // Refresh semantic tokens if the client supports it.
                 if self.config.semantic_tokens_refresh() {
                     self.semantic_tokens_cache.lock().clear();
@@ -490,11 +484,43 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
         }
 
         if self.config.cargo_autoreload() {
-            self.fetch_workspaces_if_needed();
+            if self.fetch_workspaces_queue.should_start_op() {
+                self.fetch_workspaces();
+            }
         }
-        self.fetch_build_data_if_needed();
+        if self.fetch_build_data_queue.should_start_op() {
+            self.fetch_build_data();
+        }
+        if self.prime_caches_queue.should_start_op() {
+            self.task_pool.handle.spawn_with_sender({
+                let analysis = self.snapshot().analysis;
+                move |sender| {
+                    sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
+                    let res = analysis.prime_caches(|progress| {
+                        let report = PrimeCachesProgress::Report(progress);
+                        sender.send(Task::PrimeCaches(report)).unwrap();
+                    });
+                    sender
+                        .send(Task::PrimeCaches(PrimeCachesProgress::End {
+                            cancelled: res.is_err(),
+                        }))
+                        .unwrap();
+                }
+            });
+        }
+
+        let status = self.current_status();
+        if self.last_reported_status.as_ref() != Some(&status) {
+            self.last_reported_status = Some(status.clone());
 
-        self.report_new_status_if_needed();
+            if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
+                self.show_message(lsp_types::MessageType::ERROR, message.clone());
+            }
+
+            if self.config.server_status_notification() {
+                self.send_notification::<lsp_ext::ServerStatusNotification>(status);
+            }
+        }
 
         let loop_duration = loop_start.elapsed();
         if loop_duration > Duration::from_millis(100) {
@@ -533,15 +559,14 @@ fn on_request(&mut self, request_received: Instant, req: Request) -> Result<()>
 
         RequestDispatcher { req: Some(req), global_state: self }
             .on_sync_mut::<lsp_ext::ReloadWorkspace>(|s, ()| {
-                s.fetch_workspaces_request();
-                s.fetch_workspaces_if_needed();
+                s.fetch_workspaces_queue.request_op();
                 Ok(())
             })?
             .on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
                 s.shutdown_requested = true;
                 Ok(())
             })?
-            .on_sync_mut::<lsp_ext::MemoryUsage>(|s, p| handlers::handle_memory_usage(s, p))?
+            .on_sync_mut::<lsp_ext::MemoryUsage>(handlers::handle_memory_usage)?
             .on_sync::<lsp_ext::JoinLines>(handlers::handle_join_lines)?
             .on_sync::<lsp_ext::OnEnter>(handlers::handle_on_enter)?
             .on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)?
@@ -643,7 +668,7 @@ fn on_notification(&mut self, not: Notification) -> Result<()> {
                             doc.version = params.text_document.version;
                         }
                         None => {
-                            tracing::error!("expected DidChangeTextDocument: {}", path);
+                            tracing::error!("unexpected DidChangeTextDocument: {}; send DidOpenTextDocument first", path);
                             return Ok(());
                         }
                     };
@@ -676,7 +701,9 @@ fn on_notification(&mut self, not: Notification) -> Result<()> {
                     flycheck.update();
                 }
                 if let Ok(abs_path) = from_proto::abs_path(&params.text_document.uri) {
-                    this.maybe_refresh(&[(abs_path, ChangeKind::Modify)]);
+                    if reload::should_refresh_for_change(&abs_path, ChangeKind::Modify) {
+                        this.fetch_workspaces_queue.request_op();
+                    }
                 }
                 Ok(())
             })?
@@ -747,7 +774,6 @@ fn update_diagnostics(&mut self) {
                             if !is_cancelled(&*err) {
                                 tracing::error!("failed to compute diagnostics: {:?}", err);
                             }
-                            ()
                         })
                         .ok()
                         .map(|diags| (file_id, diags))