]> git.lizzy.rs Git - rust.git/blobdiff - crates/rust-analyzer/src/main_loop.rs
Add toggle to disable cache priming
[rust.git] / crates / rust-analyzer / src / main_loop.rs
index 0518a17f3073885f31ce24d23dadd67a9a9b02c7..979b3b1522159679d2c1de1d81e5ca0bf2c9453e 100644 (file)
@@ -1,18 +1,17 @@
 //! The main loop of `rust-analyzer` responsible for dispatching LSP
 //! requests/replies and notifications back to the client.
 use std::{
-    env, fmt,
+    fmt,
     sync::Arc,
     time::{Duration, Instant},
 };
 
 use always_assert::always;
 use crossbeam_channel::{select, Receiver};
-use ide::{FileId, PrimeCachesProgress};
-use ide_db::base_db::VfsPath;
+use ide_db::base_db::{SourceDatabaseExt, VfsPath};
 use lsp_server::{Connection, Notification, Request};
 use lsp_types::notification::Notification as _;
-use vfs::ChangeKind;
+use vfs::{ChangeKind, FileId};
 
 use crate::{
     config::Config,
     handlers, lsp_ext,
     lsp_utils::{apply_document_changes, is_cancelled, notification_is, Progress},
     mem_docs::DocumentData,
-    reload::{BuildDataProgress, ProjectWorkspaceProgress},
+    reload::{self, BuildDataProgress, ProjectWorkspaceProgress},
     Result,
 };
 
 pub fn main_loop(config: Config, connection: Connection) -> Result<()> {
-    log::info!("initial config: {:#?}", config);
+    tracing::info!("initial config: {:#?}", config);
 
     // Windows scheduler implements priority boosts: if thread waits for an
     // event (like a condvar), and event fires, priority of the thread is
@@ -67,6 +66,13 @@ pub(crate) enum Task {
     FetchBuildData(BuildDataProgress),
 }
 
+#[derive(Debug)]
+pub(crate) enum PrimeCachesProgress {
+    Begin,
+    Report(ide::PrimeCachesProgress),
+    End { cancelled: bool },
+}
+
 impl fmt::Debug for Event {
     fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
         let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
@@ -106,7 +112,7 @@ fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
             && self.config.notifications().cargo_toml_not_found
         {
             self.show_message(
-                lsp_types::MessageType::Error,
+                lsp_types::MessageType::ERROR,
                 "rust-analyzer failed to discover workspace".to_string(),
             );
         };
@@ -146,8 +152,10 @@ fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
             );
         }
 
-        self.fetch_workspaces_request();
-        self.fetch_workspaces_if_needed();
+        self.fetch_workspaces_queue.request_op();
+        if self.fetch_workspaces_queue.should_start_op() {
+            self.fetch_workspaces();
+        }
 
         while let Some(event) = self.next_event(&inbox) {
             if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
@@ -158,7 +166,7 @@ fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
             self.handle_event(event)?
         }
 
-        Err("client exited without proper shutdown sequence")?
+        return Err("client exited without proper shutdown sequence".into());
     }
 
     fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
@@ -182,10 +190,10 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
         // NOTE: don't count blocking select! call as a loop-turn time
         let _p = profile::span("GlobalState::handle_event");
 
-        log::info!("handle_event({:?})", event);
+        tracing::info!("handle_event({:?})", event);
         let task_queue_len = self.task_pool.handle.len();
         if task_queue_len > 0 {
-            log::info!("task queue len: {}", task_queue_len);
+            tracing::info!("task queue len: {}", task_queue_len);
         }
 
         let was_quiescent = self.is_quiescent();
@@ -209,17 +217,17 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                             }
                         }
                         Task::PrimeCaches(progress) => match progress {
-                            PrimeCachesProgress::Started => prime_caches_progress.push(progress),
-                            PrimeCachesProgress::StartedOnCrate { .. } => {
+                            PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
+                            PrimeCachesProgress::Report(_) => {
                                 match prime_caches_progress.last_mut() {
-                                    Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => {
+                                    Some(last @ PrimeCachesProgress::Report(_)) => {
                                         // Coalesce subsequent update events.
                                         *last = progress;
                                     }
                                     _ => prime_caches_progress.push(progress),
                                 }
                             }
-                            PrimeCachesProgress::Finished => prime_caches_progress.push(progress),
+                            PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
                         },
                         Task::FetchWorkspace(progress) => {
                             let (state, msg) = match progress {
@@ -228,14 +236,14 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                                     (Progress::Report, Some(msg))
                                 }
                                 ProjectWorkspaceProgress::End(workspaces) => {
-                                    self.fetch_workspaces_completed(workspaces);
+                                    self.fetch_workspaces_queue.op_completed(workspaces);
 
                                     let old = Arc::clone(&self.workspaces);
                                     self.switch_workspaces();
                                     let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
 
                                     if self.config.run_build_scripts() && workspaces_updated {
-                                        self.fetch_build_data_request()
+                                        self.fetch_build_data_queue.request_op()
                                     }
 
                                     (Progress::End, None)
@@ -251,7 +259,7 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                                     (Some(Progress::Report), Some(msg))
                                 }
                                 BuildDataProgress::End(build_data_result) => {
-                                    self.fetch_build_data_completed(build_data_result);
+                                    self.fetch_build_data_queue.op_completed(build_data_result);
 
                                     self.switch_workspaces();
 
@@ -275,22 +283,28 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                 for progress in prime_caches_progress {
                     let (state, message, fraction);
                     match progress {
-                        PrimeCachesProgress::Started => {
+                        PrimeCachesProgress::Begin => {
                             state = Progress::Begin;
                             message = None;
                             fraction = 0.0;
                         }
-                        PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => {
+                        PrimeCachesProgress::Report(report) => {
                             state = Progress::Report;
-                            message = Some(format!("{}/{} ({})", n_done, n_total, on_crate));
-                            fraction = Progress::fraction(n_done, n_total);
+                            message = Some(format!(
+                                "{}/{} ({})",
+                                report.n_done, report.n_total, report.on_crate
+                            ));
+                            fraction = Progress::fraction(report.n_done, report.n_total);
                         }
-                        PrimeCachesProgress::Finished => {
+                        PrimeCachesProgress::End { cancelled } => {
                             state = Progress::End;
                             message = None;
                             fraction = 1.0;
 
                             self.prime_caches_queue.op_completed(());
+                            if cancelled {
+                                self.prime_caches_queue.request_op();
+                            }
                         }
                     };
 
@@ -359,7 +373,7 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                                         diag.fixes,
                                     ),
                                     Err(err) => {
-                                        log::error!(
+                                        tracing::error!(
                                             "File with cargo diagnostic not found in VFS: {}",
                                             err
                                         );
@@ -380,7 +394,10 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                                 flycheck::Progress::DidCancel => (Progress::End, None),
                                 flycheck::Progress::DidFinish(result) => {
                                     if let Err(err) = result {
-                                        log::error!("cargo check failed: {}", err)
+                                        self.show_message(
+                                            lsp_types::MessageType::ERROR,
+                                            format!("cargo check failed: {}", err),
+                                        );
                                     }
                                     (Progress::End, None)
                                 }
@@ -389,7 +406,10 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
                             // When we're running multiple flychecks, we have to include a disambiguator in
                             // the title, or the editor complains. Note that this is a user-facing string.
                             let title = if self.flycheck.len() == 1 {
-                                "cargo check".to_string()
+                                match self.config.flycheck() {
+                                    Some(config) => format!("{}", config),
+                                    None => "cargo check".to_string(),
+                                }
                             } else {
                                 format!("cargo check (#{})", id + 1)
                             };
@@ -406,30 +426,51 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
         }
 
         let state_changed = self.process_changes();
+        let memdocs_added_or_removed = self.mem_docs.take_changes();
 
-        if self.is_quiescent() && !was_quiescent {
-            for flycheck in &self.flycheck {
-                flycheck.update();
+        if self.is_quiescent() {
+            if !was_quiescent {
+                for flycheck in &self.flycheck {
+                    flycheck.update();
+                }
+                if self.config.prefill_caches() {
+                    self.prime_caches_queue.request_op();
+                }
             }
-        }
 
-        if self.is_quiescent() && (!was_quiescent || state_changed) {
-            self.update_file_notifications_on_threadpool();
+            if !was_quiescent || state_changed {
+                // Refresh semantic tokens if the client supports it.
+                if self.config.semantic_tokens_refresh() {
+                    self.semantic_tokens_cache.lock().clear();
+                    self.send_request::<lsp_types::request::SemanticTokensRefesh>((), |_, _| ());
+                }
 
-            // Refresh semantic tokens if the client supports it.
-            if self.config.semantic_tokens_refresh() {
-                self.semantic_tokens_cache.lock().clear();
-                self.send_request::<lsp_types::request::SemanticTokensRefesh>((), |_, _| ());
+                // Refresh code lens if the client supports it.
+                if self.config.code_lens_refresh() {
+                    self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
+                }
             }
 
-            // Refresh code lens if the client supports it.
-            if self.config.code_lens_refresh() {
-                self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
+            if !was_quiescent || state_changed || memdocs_added_or_removed {
+                if self.config.publish_diagnostics() {
+                    self.update_diagnostics()
+                }
             }
         }
 
         if let Some(diagnostic_changes) = self.diagnostics.take_changes() {
             for file_id in diagnostic_changes {
+                let db = self.analysis_host.raw_database();
+                let source_root = db.file_source_root(file_id);
+                if db.source_root(source_root).is_library {
+                    // Only publish diagnostics for files in the workspace, not from crates.io deps
+                    // or the sysroot.
+                    // While theoretically these should never have errors, we have quite a few false
+                    // positives particularly in the stdlib, and those diagnostics would stay around
+                    // forever if we emitted them here.
+                    continue;
+                }
+
                 let url = file_id_to_url(&self.vfs.read().0, file_id);
                 let diagnostics = self.diagnostics.diagnostics_for(file_id).cloned().collect();
                 let version = from_proto::vfs_path(&url)
@@ -443,21 +484,51 @@ fn handle_event(&mut self, event: Event) -> Result<()> {
         }
 
         if self.config.cargo_autoreload() {
-            self.fetch_workspaces_if_needed();
+            if self.fetch_workspaces_queue.should_start_op() {
+                self.fetch_workspaces();
+            }
         }
-        self.fetch_build_data_if_needed();
+        if self.fetch_build_data_queue.should_start_op() {
+            self.fetch_build_data();
+        }
+        if self.prime_caches_queue.should_start_op() {
+            self.task_pool.handle.spawn_with_sender({
+                let analysis = self.snapshot().analysis;
+                move |sender| {
+                    sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
+                    let res = analysis.prime_caches(|progress| {
+                        let report = PrimeCachesProgress::Report(progress);
+                        sender.send(Task::PrimeCaches(report)).unwrap();
+                    });
+                    sender
+                        .send(Task::PrimeCaches(PrimeCachesProgress::End {
+                            cancelled: res.is_err(),
+                        }))
+                        .unwrap();
+                }
+            });
+        }
+
+        let status = self.current_status();
+        if self.last_reported_status.as_ref() != Some(&status) {
+            self.last_reported_status = Some(status.clone());
 
-        self.report_new_status_if_needed();
+            if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
+                self.show_message(lsp_types::MessageType::ERROR, message.clone());
+            }
+
+            if self.config.server_status_notification() {
+                self.send_notification::<lsp_ext::ServerStatusNotification>(status);
+            }
+        }
 
         let loop_duration = loop_start.elapsed();
         if loop_duration > Duration::from_millis(100) {
-            log::warn!("overly long loop turn: {:?}", loop_duration);
-            if env::var("RA_PROFILE").is_ok() {
-                self.show_message(
-                    lsp_types::MessageType::Error,
-                    format!("overly long loop turn: {:?}", loop_duration),
-                )
-            }
+            tracing::warn!("overly long loop turn: {:?}", loop_duration);
+            self.poke_rust_analyzer_developer(format!(
+                "overly long loop turn: {:?}",
+                loop_duration
+            ));
         }
         Ok(())
     }
@@ -487,24 +558,19 @@ fn on_request(&mut self, request_received: Instant, req: Request) -> Result<()>
         }
 
         RequestDispatcher { req: Some(req), global_state: self }
-            .on_sync::<lsp_ext::ReloadWorkspace>(|s, ()| {
-                s.fetch_workspaces_request();
-                s.fetch_workspaces_if_needed();
+            .on_sync_mut::<lsp_ext::ReloadWorkspace>(|s, ()| {
+                s.fetch_workspaces_queue.request_op();
                 Ok(())
             })?
-            .on_sync::<lsp_ext::JoinLines>(|s, p| handlers::handle_join_lines(s.snapshot(), p))?
-            .on_sync::<lsp_ext::OnEnter>(|s, p| handlers::handle_on_enter(s.snapshot(), p))?
-            .on_sync::<lsp_types::request::Shutdown>(|s, ()| {
+            .on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
                 s.shutdown_requested = true;
                 Ok(())
             })?
-            .on_sync::<lsp_types::request::SelectionRangeRequest>(|s, p| {
-                handlers::handle_selection_range(s.snapshot(), p)
-            })?
-            .on_sync::<lsp_ext::MatchingBrace>(|s, p| {
-                handlers::handle_matching_brace(s.snapshot(), p)
-            })?
-            .on_sync::<lsp_ext::MemoryUsage>(|s, p| handlers::handle_memory_usage(s, p))?
+            .on_sync_mut::<lsp_ext::MemoryUsage>(handlers::handle_memory_usage)?
+            .on_sync::<lsp_ext::JoinLines>(handlers::handle_join_lines)?
+            .on_sync::<lsp_ext::OnEnter>(handlers::handle_on_enter)?
+            .on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)?
+            .on_sync::<lsp_ext::MatchingBrace>(handlers::handle_matching_brace)?
             .on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)
             .on::<lsp_ext::SyntaxTree>(handlers::handle_syntax_tree)
             .on::<lsp_ext::ViewHir>(handlers::handle_view_hir)
@@ -584,52 +650,42 @@ fn on_notification(&mut self, not: Notification) -> Result<()> {
                         .insert(path.clone(), DocumentData::new(params.text_document.version))
                         .is_err()
                     {
-                        log::error!("duplicate DidOpenTextDocument: {}", path)
+                        tracing::error!("duplicate DidOpenTextDocument: {}", path)
                     }
-                    let changed = this
-                        .vfs
+                    this.vfs
                         .write()
                         .0
                         .set_file_contents(path, Some(params.text_document.text.into_bytes()));
-
-                    // If the VFS contents are unchanged, update diagnostics, since `handle_event`
-                    // won't see any changes. This avoids missing diagnostics when opening a file.
-                    //
-                    // If the file *was* changed, `handle_event` will already recompute and send
-                    // diagnostics. We can't do it here, since the *current* file contents might be
-                    // unset in salsa, since the VFS change hasn't been applied to the database yet.
-                    if !changed {
-                        this.maybe_update_diagnostics();
-                    }
                 }
                 Ok(())
             })?
             .on::<lsp_types::notification::DidChangeTextDocument>(|this, params| {
                 if let Ok(path) = from_proto::vfs_path(&params.text_document.uri) {
-                    let doc = match this.mem_docs.get_mut(&path) {
-                        Some(doc) => doc,
+                    match this.mem_docs.get_mut(&path) {
+                        Some(doc) => {
+                            // The version passed in DidChangeTextDocument is the version after all edits are applied
+                            // so we should apply it before the vfs is notified.
+                            doc.version = params.text_document.version;
+                        }
                         None => {
-                            log::error!("expected DidChangeTextDocument: {}", path);
+                            tracing::error!("unexpected DidChangeTextDocument: {}; send DidOpenTextDocument first", path);
                             return Ok(());
                         }
                     };
+
                     let vfs = &mut this.vfs.write().0;
                     let file_id = vfs.file_id(&path).unwrap();
                     let mut text = String::from_utf8(vfs.file_contents(file_id).to_vec()).unwrap();
                     apply_document_changes(&mut text, params.content_changes);
 
-                    // The version passed in DidChangeTextDocument is the version after all edits are applied
-                    // so we should apply it before the vfs is notified.
-                    doc.version = params.text_document.version;
-
-                    vfs.set_file_contents(path.clone(), Some(text.into_bytes()));
+                    vfs.set_file_contents(path, Some(text.into_bytes()));
                 }
                 Ok(())
             })?
             .on::<lsp_types::notification::DidCloseTextDocument>(|this, params| {
                 if let Ok(path) = from_proto::vfs_path(&params.text_document.uri) {
                     if this.mem_docs.remove(&path).is_err() {
-                        log::error!("orphan DidCloseTextDocument: {}", path);
+                        tracing::error!("orphan DidCloseTextDocument: {}", path);
                     }
 
                     this.semantic_tokens_cache.lock().remove(&params.text_document.uri);
@@ -645,7 +701,9 @@ fn on_notification(&mut self, not: Notification) -> Result<()> {
                     flycheck.update();
                 }
                 if let Ok(abs_path) = from_proto::abs_path(&params.text_document.uri) {
-                    this.maybe_refresh(&[(abs_path, ChangeKind::Modify)]);
+                    if reload::should_refresh_for_change(&abs_path, ChangeKind::Modify) {
+                        this.fetch_workspaces_queue.request_op();
+                    }
                 }
                 Ok(())
             })?
@@ -660,12 +718,12 @@ fn on_notification(&mut self, not: Notification) -> Result<()> {
                         }],
                     },
                     |this, resp| {
-                        log::debug!("config update response: '{:?}", resp);
+                        tracing::debug!("config update response: '{:?}", resp);
                         let lsp_server::Response { error, result, .. } = resp;
 
                         match (error, result) {
                             (Some(err), _) => {
-                                log::error!("failed to fetch the server settings: {:?}", err)
+                                tracing::error!("failed to fetch the server settings: {:?}", err)
                             }
                             (None, Some(mut configs)) => {
                                 if let Some(json) = configs.get_mut(0) {
@@ -676,7 +734,7 @@ fn on_notification(&mut self, not: Notification) -> Result<()> {
                                     this.update_configuration(config);
                                 }
                             }
-                            (None, None) => log::error!(
+                            (None, None) => tracing::error!(
                                 "received empty server settings response from the client"
                             ),
                         }
@@ -696,53 +754,32 @@ fn on_notification(&mut self, not: Notification) -> Result<()> {
             .finish();
         Ok(())
     }
-    fn update_file_notifications_on_threadpool(&mut self) {
-        self.maybe_update_diagnostics();
 
-        // Ensure that only one cache priming task can run at a time
-        self.prime_caches_queue.request_op();
-        if self.prime_caches_queue.should_start_op() {
-            self.task_pool.handle.spawn_with_sender({
-                let snap = self.snapshot();
-                move |sender| {
-                    let cb = |progress| {
-                        sender.send(Task::PrimeCaches(progress)).unwrap();
-                    };
-                    match snap.analysis.prime_caches(cb) {
-                        Ok(()) => (),
-                        Err(_canceled) => (),
-                    }
-                }
-            });
-        }
-    }
-    fn maybe_update_diagnostics(&mut self) {
+    fn update_diagnostics(&mut self) {
         let subscriptions = self
             .mem_docs
             .iter()
             .map(|path| self.vfs.read().0.file_id(path).unwrap())
             .collect::<Vec<_>>();
 
-        log::trace!("updating notifications for {:?}", subscriptions);
-        if self.config.publish_diagnostics() {
-            let snapshot = self.snapshot();
-            self.task_pool.handle.spawn(move || {
-                let diagnostics = subscriptions
-                    .into_iter()
-                    .filter_map(|file_id| {
-                        handlers::publish_diagnostics(&snapshot, file_id)
-                            .map_err(|err| {
-                                if !is_cancelled(&*err) {
-                                    log::error!("failed to compute diagnostics: {:?}", err);
-                                }
-                                ()
-                            })
-                            .ok()
-                            .map(|diags| (file_id, diags))
-                    })
-                    .collect::<Vec<_>>();
-                Task::Diagnostics(diagnostics)
-            })
-        }
+        tracing::trace!("updating notifications for {:?}", subscriptions);
+
+        let snapshot = self.snapshot();
+        self.task_pool.handle.spawn(move || {
+            let diagnostics = subscriptions
+                .into_iter()
+                .filter_map(|file_id| {
+                    handlers::publish_diagnostics(&snapshot, file_id)
+                        .map_err(|err| {
+                            if !is_cancelled(&*err) {
+                                tracing::error!("failed to compute diagnostics: {:?}", err);
+                            }
+                        })
+                        .ok()
+                        .map(|diags| (file_id, diags))
+                })
+                .collect::<Vec<_>>();
+            Task::Diagnostics(diagnostics)
+        })
     }
 }