X-Git-Url: https://git.lizzy.rs/?a=blobdiff_plain;f=crates%2Frust-analyzer%2Fsrc%2Fmain_loop.rs;h=979b3b1522159679d2c1de1d81e5ca0bf2c9453e;hb=a68ce62f6a33d3e5777c1d64c946e0f1d9c7f457;hp=62ff922e1ffe1221ef123a2dcdecf1f55b97a85d;hpb=4cc961007f97705f85fc22580247b236d14cf37a;p=rust.git diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs index 62ff922e1ff..979b3b15221 100644 --- a/crates/rust-analyzer/src/main_loop.rs +++ b/crates/rust-analyzer/src/main_loop.rs @@ -1,18 +1,17 @@ //! The main loop of `rust-analyzer` responsible for dispatching LSP //! requests/replies and notifications back to the client. use std::{ - env, fmt, + fmt, sync::Arc, time::{Duration, Instant}, }; use always_assert::always; use crossbeam_channel::{select, Receiver}; -use ide::{FileId, PrimeCachesProgress}; -use ide_db::base_db::VfsPath; +use ide_db::base_db::{SourceDatabaseExt, VfsPath}; use lsp_server::{Connection, Notification, Request}; use lsp_types::notification::Notification as _; -use vfs::ChangeKind; +use vfs::{ChangeKind, FileId}; use crate::{ config::Config, @@ -22,12 +21,12 @@ handlers, lsp_ext, lsp_utils::{apply_document_changes, is_cancelled, notification_is, Progress}, mem_docs::DocumentData, - reload::{BuildDataProgress, ProjectWorkspaceProgress}, + reload::{self, BuildDataProgress, ProjectWorkspaceProgress}, Result, }; pub fn main_loop(config: Config, connection: Connection) -> Result<()> { - log::info!("initial config: {:#?}", config); + tracing::info!("initial config: {:#?}", config); // Windows scheduler implements priority boosts: if thread waits for an // event (like a condvar), and event fires, priority of the thread is @@ -67,6 +66,13 @@ pub(crate) enum Task { FetchBuildData(BuildDataProgress), } +#[derive(Debug)] +pub(crate) enum PrimeCachesProgress { + Begin, + Report(ide::PrimeCachesProgress), + End { cancelled: bool }, +} + impl fmt::Debug for Event { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| { @@ -106,7 +112,7 @@ fn run(mut self, inbox: Receiver) -> Result<()> { && self.config.notifications().cargo_toml_not_found { self.show_message( - lsp_types::MessageType::Error, + lsp_types::MessageType::ERROR, "rust-analyzer failed to discover workspace".to_string(), ); }; @@ -146,8 +152,10 @@ fn run(mut self, inbox: Receiver) -> Result<()> { ); } - self.fetch_workspaces_request(); - self.fetch_workspaces_if_needed(); + self.fetch_workspaces_queue.request_op(); + if self.fetch_workspaces_queue.should_start_op() { + self.fetch_workspaces(); + } while let Some(event) = self.next_event(&inbox) { if let Event::Lsp(lsp_server::Message::Notification(not)) = &event { @@ -158,7 +166,7 @@ fn run(mut self, inbox: Receiver) -> Result<()> { self.handle_event(event)? } - Err("client exited without proper shutdown sequence")? + return Err("client exited without proper shutdown sequence".into()); } fn next_event(&self, inbox: &Receiver) -> Option { @@ -182,10 +190,10 @@ fn handle_event(&mut self, event: Event) -> Result<()> { // NOTE: don't count blocking select! call as a loop-turn time let _p = profile::span("GlobalState::handle_event"); - log::info!("handle_event({:?})", event); + tracing::info!("handle_event({:?})", event); let task_queue_len = self.task_pool.handle.len(); if task_queue_len > 0 { - log::info!("task queue len: {}", task_queue_len); + tracing::info!("task queue len: {}", task_queue_len); } let was_quiescent = self.is_quiescent(); @@ -209,17 +217,17 @@ fn handle_event(&mut self, event: Event) -> Result<()> { } } Task::PrimeCaches(progress) => match progress { - PrimeCachesProgress::Started => prime_caches_progress.push(progress), - PrimeCachesProgress::StartedOnCrate { .. } => { + PrimeCachesProgress::Begin => prime_caches_progress.push(progress), + PrimeCachesProgress::Report(_) => { match prime_caches_progress.last_mut() { - Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => { + Some(last @ PrimeCachesProgress::Report(_)) => { // Coalesce subsequent update events. *last = progress; } _ => prime_caches_progress.push(progress), } } - PrimeCachesProgress::Finished => prime_caches_progress.push(progress), + PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress), }, Task::FetchWorkspace(progress) => { let (state, msg) = match progress { @@ -228,14 +236,14 @@ fn handle_event(&mut self, event: Event) -> Result<()> { (Progress::Report, Some(msg)) } ProjectWorkspaceProgress::End(workspaces) => { - self.fetch_workspaces_completed(workspaces); + self.fetch_workspaces_queue.op_completed(workspaces); let old = Arc::clone(&self.workspaces); self.switch_workspaces(); let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces); if self.config.run_build_scripts() && workspaces_updated { - self.fetch_build_data_request() + self.fetch_build_data_queue.request_op() } (Progress::End, None) @@ -251,7 +259,7 @@ fn handle_event(&mut self, event: Event) -> Result<()> { (Some(Progress::Report), Some(msg)) } BuildDataProgress::End(build_data_result) => { - self.fetch_build_data_completed(build_data_result); + self.fetch_build_data_queue.op_completed(build_data_result); self.switch_workspaces(); @@ -275,22 +283,28 @@ fn handle_event(&mut self, event: Event) -> Result<()> { for progress in prime_caches_progress { let (state, message, fraction); match progress { - PrimeCachesProgress::Started => { + PrimeCachesProgress::Begin => { state = Progress::Begin; message = None; fraction = 0.0; } - PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => { + PrimeCachesProgress::Report(report) => { state = Progress::Report; - message = Some(format!("{}/{} ({})", n_done, n_total, on_crate)); - fraction = Progress::fraction(n_done, n_total); + message = Some(format!( + "{}/{} ({})", + report.n_done, report.n_total, report.on_crate + )); + fraction = Progress::fraction(report.n_done, report.n_total); } - PrimeCachesProgress::Finished => { + PrimeCachesProgress::End { cancelled } => { state = Progress::End; message = None; fraction = 1.0; self.prime_caches_queue.op_completed(()); + if cancelled { + self.prime_caches_queue.request_op(); + } } }; @@ -359,7 +373,7 @@ fn handle_event(&mut self, event: Event) -> Result<()> { diag.fixes, ), Err(err) => { - log::error!( + tracing::error!( "File with cargo diagnostic not found in VFS: {}", err ); @@ -380,7 +394,10 @@ fn handle_event(&mut self, event: Event) -> Result<()> { flycheck::Progress::DidCancel => (Progress::End, None), flycheck::Progress::DidFinish(result) => { if let Err(err) = result { - log::error!("cargo check failed: {}", err) + self.show_message( + lsp_types::MessageType::ERROR, + format!("cargo check failed: {}", err), + ); } (Progress::End, None) } @@ -389,7 +406,10 @@ fn handle_event(&mut self, event: Event) -> Result<()> { // When we're running multiple flychecks, we have to include a disambiguator in // the title, or the editor complains. Note that this is a user-facing string. let title = if self.flycheck.len() == 1 { - "cargo check".to_string() + match self.config.flycheck() { + Some(config) => format!("{}", config), + None => "cargo check".to_string(), + } } else { format!("cargo check (#{})", id + 1) }; @@ -413,26 +433,12 @@ fn handle_event(&mut self, event: Event) -> Result<()> { for flycheck in &self.flycheck { flycheck.update(); } + if self.config.prefill_caches() { + self.prime_caches_queue.request_op(); + } } if !was_quiescent || state_changed { - // Ensure that only one cache priming task can run at a time - self.prime_caches_queue.request_op(); - if self.prime_caches_queue.should_start_op() { - self.task_pool.handle.spawn_with_sender({ - let snap = self.snapshot(); - move |sender| { - let cb = |progress| { - sender.send(Task::PrimeCaches(progress)).unwrap(); - }; - match snap.analysis.prime_caches(cb) { - Ok(()) => (), - Err(_canceled) => (), - } - } - }); - } - // Refresh semantic tokens if the client supports it. if self.config.semantic_tokens_refresh() { self.semantic_tokens_cache.lock().clear(); @@ -454,6 +460,17 @@ fn handle_event(&mut self, event: Event) -> Result<()> { if let Some(diagnostic_changes) = self.diagnostics.take_changes() { for file_id in diagnostic_changes { + let db = self.analysis_host.raw_database(); + let source_root = db.file_source_root(file_id); + if db.source_root(source_root).is_library { + // Only publish diagnostics for files in the workspace, not from crates.io deps + // or the sysroot. + // While theoretically these should never have errors, we have quite a few false + // positives particularly in the stdlib, and those diagnostics would stay around + // forever if we emitted them here. + continue; + } + let url = file_id_to_url(&self.vfs.read().0, file_id); let diagnostics = self.diagnostics.diagnostics_for(file_id).cloned().collect(); let version = from_proto::vfs_path(&url) @@ -467,21 +484,51 @@ fn handle_event(&mut self, event: Event) -> Result<()> { } if self.config.cargo_autoreload() { - self.fetch_workspaces_if_needed(); + if self.fetch_workspaces_queue.should_start_op() { + self.fetch_workspaces(); + } + } + if self.fetch_build_data_queue.should_start_op() { + self.fetch_build_data(); + } + if self.prime_caches_queue.should_start_op() { + self.task_pool.handle.spawn_with_sender({ + let analysis = self.snapshot().analysis; + move |sender| { + sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap(); + let res = analysis.prime_caches(|progress| { + let report = PrimeCachesProgress::Report(progress); + sender.send(Task::PrimeCaches(report)).unwrap(); + }); + sender + .send(Task::PrimeCaches(PrimeCachesProgress::End { + cancelled: res.is_err(), + })) + .unwrap(); + } + }); } - self.fetch_build_data_if_needed(); - self.report_new_status_if_needed(); + let status = self.current_status(); + if self.last_reported_status.as_ref() != Some(&status) { + self.last_reported_status = Some(status.clone()); + + if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) { + self.show_message(lsp_types::MessageType::ERROR, message.clone()); + } + + if self.config.server_status_notification() { + self.send_notification::(status); + } + } let loop_duration = loop_start.elapsed(); if loop_duration > Duration::from_millis(100) { - log::warn!("overly long loop turn: {:?}", loop_duration); - if env::var("RA_PROFILE").is_ok() { - self.show_message( - lsp_types::MessageType::Error, - format!("overly long loop turn: {:?}", loop_duration), - ) - } + tracing::warn!("overly long loop turn: {:?}", loop_duration); + self.poke_rust_analyzer_developer(format!( + "overly long loop turn: {:?}", + loop_duration + )); } Ok(()) } @@ -511,24 +558,19 @@ fn on_request(&mut self, request_received: Instant, req: Request) -> Result<()> } RequestDispatcher { req: Some(req), global_state: self } - .on_sync::(|s, ()| { - s.fetch_workspaces_request(); - s.fetch_workspaces_if_needed(); + .on_sync_mut::(|s, ()| { + s.fetch_workspaces_queue.request_op(); Ok(()) })? - .on_sync::(|s, p| handlers::handle_join_lines(s.snapshot(), p))? - .on_sync::(|s, p| handlers::handle_on_enter(s.snapshot(), p))? - .on_sync::(|s, ()| { + .on_sync_mut::(|s, ()| { s.shutdown_requested = true; Ok(()) })? - .on_sync::(|s, p| { - handlers::handle_selection_range(s.snapshot(), p) - })? - .on_sync::(|s, p| { - handlers::handle_matching_brace(s.snapshot(), p) - })? - .on_sync::(|s, p| handlers::handle_memory_usage(s, p))? + .on_sync_mut::(handlers::handle_memory_usage)? + .on_sync::(handlers::handle_join_lines)? + .on_sync::(handlers::handle_on_enter)? + .on_sync::(handlers::handle_selection_range)? + .on_sync::(handlers::handle_matching_brace)? .on::(handlers::handle_analyzer_status) .on::(handlers::handle_syntax_tree) .on::(handlers::handle_view_hir) @@ -608,7 +650,7 @@ fn on_notification(&mut self, not: Notification) -> Result<()> { .insert(path.clone(), DocumentData::new(params.text_document.version)) .is_err() { - log::error!("duplicate DidOpenTextDocument: {}", path) + tracing::error!("duplicate DidOpenTextDocument: {}", path) } this.vfs .write() @@ -626,7 +668,7 @@ fn on_notification(&mut self, not: Notification) -> Result<()> { doc.version = params.text_document.version; } None => { - log::error!("expected DidChangeTextDocument: {}", path); + tracing::error!("unexpected DidChangeTextDocument: {}; send DidOpenTextDocument first", path); return Ok(()); } }; @@ -643,7 +685,7 @@ fn on_notification(&mut self, not: Notification) -> Result<()> { .on::(|this, params| { if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) { if this.mem_docs.remove(&path).is_err() { - log::error!("orphan DidCloseTextDocument: {}", path); + tracing::error!("orphan DidCloseTextDocument: {}", path); } this.semantic_tokens_cache.lock().remove(¶ms.text_document.uri); @@ -659,7 +701,9 @@ fn on_notification(&mut self, not: Notification) -> Result<()> { flycheck.update(); } if let Ok(abs_path) = from_proto::abs_path(¶ms.text_document.uri) { - this.maybe_refresh(&[(abs_path, ChangeKind::Modify)]); + if reload::should_refresh_for_change(&abs_path, ChangeKind::Modify) { + this.fetch_workspaces_queue.request_op(); + } } Ok(()) })? @@ -674,12 +718,12 @@ fn on_notification(&mut self, not: Notification) -> Result<()> { }], }, |this, resp| { - log::debug!("config update response: '{:?}", resp); + tracing::debug!("config update response: '{:?}", resp); let lsp_server::Response { error, result, .. } = resp; match (error, result) { (Some(err), _) => { - log::error!("failed to fetch the server settings: {:?}", err) + tracing::error!("failed to fetch the server settings: {:?}", err) } (None, Some(mut configs)) => { if let Some(json) = configs.get_mut(0) { @@ -690,7 +734,7 @@ fn on_notification(&mut self, not: Notification) -> Result<()> { this.update_configuration(config); } } - (None, None) => log::error!( + (None, None) => tracing::error!( "received empty server settings response from the client" ), } @@ -718,7 +762,7 @@ fn update_diagnostics(&mut self) { .map(|path| self.vfs.read().0.file_id(path).unwrap()) .collect::>(); - log::trace!("updating notifications for {:?}", subscriptions); + tracing::trace!("updating notifications for {:?}", subscriptions); let snapshot = self.snapshot(); self.task_pool.handle.spawn(move || { @@ -728,9 +772,8 @@ fn update_diagnostics(&mut self) { handlers::publish_diagnostics(&snapshot, file_id) .map_err(|err| { if !is_cancelled(&*err) { - log::error!("failed to compute diagnostics: {:?}", err); + tracing::error!("failed to compute diagnostics: {:?}", err); } - () }) .ok() .map(|diags| (file_id, diags))