//! The main loop of `rust-analyzer` responsible for dispatching LSP
//! requests/replies and notifications back to the client.
use std::{
- env, fmt,
+ fmt,
+ sync::Arc,
time::{Duration, Instant},
};
use always_assert::always;
use crossbeam_channel::{select, Receiver};
-use ide::PrimeCachesProgress;
-use ide::{Canceled, FileId};
-use ide_db::base_db::VfsPath;
-use lsp_server::{Connection, Notification, Request, Response};
+use ide_db::base_db::{SourceDatabaseExt, VfsPath};
+use lsp_server::{Connection, Notification, Request};
use lsp_types::notification::Notification as _;
-use vfs::ChangeKind;
+use vfs::{ChangeKind, FileId};
use crate::{
config::Config,
dispatch::{NotificationDispatcher, RequestDispatcher},
- document::DocumentData,
from_proto,
- global_state::{file_id_to_url, url_to_file_id, GlobalState, Status},
+ global_state::{file_id_to_url, url_to_file_id, GlobalState},
handlers, lsp_ext,
- lsp_utils::{apply_document_changes, is_canceled, notification_is, Progress},
- reload::{BuildDataProgress, ProjectWorkspaceProgress},
+ lsp_utils::{apply_document_changes, is_cancelled, notification_is, Progress},
+ mem_docs::DocumentData,
+ reload::{self, BuildDataProgress, ProjectWorkspaceProgress},
Result,
};
pub fn main_loop(config: Config, connection: Connection) -> Result<()> {
- log::info!("initial config: {:#?}", config);
+ tracing::info!("initial config: {:#?}", config);
// Windows scheduler implements priority boosts: if thread waits for an
// event (like a condvar), and event fires, priority of the thread is
#[derive(Debug)]
pub(crate) enum Task {
- Response(Response),
+ Response(lsp_server::Response),
Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
PrimeCaches(PrimeCachesProgress),
FetchWorkspace(ProjectWorkspaceProgress),
FetchBuildData(BuildDataProgress),
}
+#[derive(Debug)]
+pub(crate) enum PrimeCachesProgress {
+ Begin,
+ Report(ide::PrimeCachesProgress),
+ End { cancelled: bool },
+}
+
impl fmt::Debug for Event {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
impl GlobalState {
fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
if self.config.linked_projects().is_empty()
+ && self.config.detached_files().is_empty()
&& self.config.notifications().cargo_toml_not_found
{
self.show_message(
- lsp_types::MessageType::Error,
+ lsp_types::MessageType::ERROR,
"rust-analyzer failed to discover workspace".to_string(),
);
};
);
}
- self.fetch_workspaces_request();
- self.fetch_workspaces_if_needed();
+ self.fetch_workspaces_queue.request_op();
+ if self.fetch_workspaces_queue.should_start_op() {
+ self.fetch_workspaces();
+ }
while let Some(event) = self.next_event(&inbox) {
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
self.handle_event(event)?
}
- Err("client exited without proper shutdown sequence")?
+ return Err("client exited without proper shutdown sequence".into());
}
fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
// NOTE: don't count blocking select! call as a loop-turn time
let _p = profile::span("GlobalState::handle_event");
- log::info!("handle_event({:?})", event);
+ tracing::info!("handle_event({:?})", event);
let task_queue_len = self.task_pool.handle.len();
if task_queue_len > 0 {
- log::info!("task queue len: {}", task_queue_len);
+ tracing::info!("task queue len: {}", task_queue_len);
}
- let mut new_status = self.status;
+ let was_quiescent = self.is_quiescent();
match event {
Event::Lsp(msg) => match msg {
lsp_server::Message::Request(req) => self.on_request(loop_start, req)?,
}
}
Task::PrimeCaches(progress) => match progress {
- PrimeCachesProgress::Started => prime_caches_progress.push(progress),
- PrimeCachesProgress::StartedOnCrate { .. } => {
+ PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
+ PrimeCachesProgress::Report(_) => {
match prime_caches_progress.last_mut() {
- Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => {
+ Some(last @ PrimeCachesProgress::Report(_)) => {
// Coalesce subsequent update events.
*last = progress;
}
_ => prime_caches_progress.push(progress),
}
}
- PrimeCachesProgress::Finished => prime_caches_progress.push(progress),
+ PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
},
Task::FetchWorkspace(progress) => {
let (state, msg) = match progress {
(Progress::Report, Some(msg))
}
ProjectWorkspaceProgress::End(workspaces) => {
- self.fetch_workspaces_completed();
- self.switch_workspaces(workspaces, None);
+ self.fetch_workspaces_queue.op_completed(workspaces);
+
+ let old = Arc::clone(&self.workspaces);
+ self.switch_workspaces();
+ let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
+
+ if self.config.run_build_scripts() && workspaces_updated {
+ self.fetch_build_data_queue.request_op()
+ }
+
(Progress::End, None)
}
};
- self.report_progress("fetching", state, msg, None);
+
+ self.report_progress("Fetching", state, msg, None);
}
Task::FetchBuildData(progress) => {
let (state, msg) = match progress {
BuildDataProgress::Report(msg) => {
(Some(Progress::Report), Some(msg))
}
- BuildDataProgress::End(collector) => {
- self.fetch_build_data_completed();
- let workspaces =
- (*self.workspaces).clone().into_iter().map(Ok).collect();
- self.switch_workspaces(workspaces, Some(collector));
+ BuildDataProgress::End(build_data_result) => {
+ self.fetch_build_data_queue.op_completed(build_data_result);
+
+ self.switch_workspaces();
+
(Some(Progress::End), None)
}
};
+
if let Some(state) = state {
- self.report_progress("loading", state, msg, None);
+ self.report_progress("Loading", state, msg, None);
}
}
}
+
// Coalesce multiple task events into one loop turn
task = match self.task_pool.receiver.try_recv() {
Ok(task) => task,
for progress in prime_caches_progress {
let (state, message, fraction);
match progress {
- PrimeCachesProgress::Started => {
+ PrimeCachesProgress::Begin => {
state = Progress::Begin;
message = None;
fraction = 0.0;
}
- PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => {
+ PrimeCachesProgress::Report(report) => {
state = Progress::Report;
- message = Some(format!("{}/{} ({})", n_done, n_total, on_crate));
- fraction = Progress::fraction(n_done, n_total);
+ message = Some(format!(
+ "{}/{} ({})",
+ report.n_done, report.n_total, report.on_crate
+ ));
+ fraction = Progress::fraction(report.n_done, report.n_total);
}
- PrimeCachesProgress::Finished => {
+ PrimeCachesProgress::End { cancelled } => {
state = Progress::End;
message = None;
fraction = 1.0;
+
+ self.prime_caches_queue.op_completed(());
+ if cancelled {
+ self.prime_caches_queue.request_op();
+ }
}
};
- self.report_progress("indexing", state, message, Some(fraction));
+ self.report_progress("Indexing", state, message, Some(fraction));
}
}
Event::Vfs(mut task) => {
let vfs = &mut self.vfs.write().0;
for (path, contents) in files {
let path = VfsPath::from(path);
- if !self.mem_docs.contains_key(&path) {
+ if !self.mem_docs.contains(&path) {
vfs.set_file_contents(path, contents);
}
}
}
vfs::loader::Message::Progress { n_total, n_done, config_version } => {
always!(config_version <= self.vfs_config_version);
- if n_total == 0 {
- new_status = Status::Invalid;
+
+ self.vfs_progress_config_version = config_version;
+ self.vfs_progress_n_total = n_total;
+ self.vfs_progress_n_done = n_done;
+
+ let state = if n_done == 0 {
+ Progress::Begin
+ } else if n_done < n_total {
+ Progress::Report
} else {
- let state = if n_done == 0 {
- new_status = Status::Loading;
- Progress::Begin
- } else if n_done < n_total {
- Progress::Report
- } else {
- assert_eq!(n_done, n_total);
- new_status = Status::Ready {
- partial: self.config.run_build_scripts()
- && self.workspace_build_data.is_none()
- || config_version < self.vfs_config_version,
- };
- Progress::End
- };
- self.report_progress(
- "roots scanned",
- state,
- Some(format!("{}/{}", n_done, n_total)),
- Some(Progress::fraction(n_done, n_total)),
- )
- }
+ assert_eq!(n_done, n_total);
+ Progress::End
+ };
+ self.report_progress(
+ "Roots Scanned",
+ state,
+ Some(format!("{}/{}", n_done, n_total)),
+ Some(Progress::fraction(n_done, n_total)),
+ )
}
}
// Coalesce many VFS event into a single loop turn
diag.fixes,
),
Err(err) => {
- log::error!(
+ tracing::error!(
"File with cargo diagnostic not found in VFS: {}",
err
);
flycheck::Progress::DidCancel => (Progress::End, None),
flycheck::Progress::DidFinish(result) => {
if let Err(err) = result {
- log::error!("cargo check failed: {}", err)
+ self.show_message(
+ lsp_types::MessageType::ERROR,
+ format!("cargo check failed: {}", err),
+ );
}
(Progress::End, None)
}
// When we're running multiple flychecks, we have to include a disambiguator in
// the title, or the editor complains. Note that this is a user-facing string.
let title = if self.flycheck.len() == 1 {
- "cargo check".to_string()
+ match self.config.flycheck() {
+ Some(config) => format!("{}", config),
+ None => "cargo check".to_string(),
+ }
} else {
format!("cargo check (#{})", id + 1)
};
}
let state_changed = self.process_changes();
- let prev_status = self.status;
- if prev_status != new_status {
- self.transition(new_status);
- }
- let is_ready = matches!(self.status, Status::Ready { .. });
- if prev_status == Status::Loading && is_ready {
- for flycheck in &self.flycheck {
- flycheck.update();
+ let memdocs_added_or_removed = self.mem_docs.take_changes();
+
+ if self.is_quiescent() {
+ if !was_quiescent {
+ for flycheck in &self.flycheck {
+ flycheck.update();
+ }
+ if self.config.prefill_caches() {
+ self.prime_caches_queue.request_op();
+ }
}
- }
- if is_ready && (state_changed || prev_status == Status::Loading) {
- self.update_file_notifications_on_threadpool();
+ if !was_quiescent || state_changed {
+ // Refresh semantic tokens if the client supports it.
+ if self.config.semantic_tokens_refresh() {
+ self.semantic_tokens_cache.lock().clear();
+ self.send_request::<lsp_types::request::SemanticTokensRefesh>((), |_, _| ());
+ }
- // Refresh semantic tokens if the client supports it.
- if self.config.semantic_tokens_refresh() {
- self.semantic_tokens_cache.lock().clear();
- self.send_request::<lsp_types::request::SemanticTokensRefesh>((), |_, _| ());
+ // Refresh code lens if the client supports it.
+ if self.config.code_lens_refresh() {
+ self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
+ }
}
- // Refresh code lens if the client supports it.
- if self.config.code_lens_refresh() {
- self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
+ if !was_quiescent || state_changed || memdocs_added_or_removed {
+ if self.config.publish_diagnostics() {
+ self.update_diagnostics()
+ }
}
}
if let Some(diagnostic_changes) = self.diagnostics.take_changes() {
for file_id in diagnostic_changes {
+ let db = self.analysis_host.raw_database();
+ let source_root = db.file_source_root(file_id);
+ if db.source_root(source_root).is_library {
+ // Only publish diagnostics for files in the workspace, not from crates.io deps
+ // or the sysroot.
+ // While theoretically these should never have errors, we have quite a few false
+ // positives particularly in the stdlib, and those diagnostics would stay around
+ // forever if we emitted them here.
+ continue;
+ }
+
let url = file_id_to_url(&self.vfs.read().0, file_id);
let diagnostics = self.diagnostics.diagnostics_for(file_id).cloned().collect();
let version = from_proto::vfs_path(&url)
}
}
- self.fetch_workspaces_if_needed();
- self.fetch_build_data_if_needed();
+ if self.config.cargo_autoreload() {
+ if self.fetch_workspaces_queue.should_start_op() {
+ self.fetch_workspaces();
+ }
+ }
+ if self.fetch_build_data_queue.should_start_op() {
+ self.fetch_build_data();
+ }
+ if self.prime_caches_queue.should_start_op() {
+ self.task_pool.handle.spawn_with_sender({
+ let analysis = self.snapshot().analysis;
+ move |sender| {
+ sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
+ let res = analysis.prime_caches(|progress| {
+ let report = PrimeCachesProgress::Report(progress);
+ sender.send(Task::PrimeCaches(report)).unwrap();
+ });
+ sender
+ .send(Task::PrimeCaches(PrimeCachesProgress::End {
+ cancelled: res.is_err(),
+ }))
+ .unwrap();
+ }
+ });
+ }
+
+ let status = self.current_status();
+ if self.last_reported_status.as_ref() != Some(&status) {
+ self.last_reported_status = Some(status.clone());
+
+ if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
+ self.show_message(lsp_types::MessageType::ERROR, message.clone());
+ }
+
+ if self.config.server_status_notification() {
+ self.send_notification::<lsp_ext::ServerStatusNotification>(status);
+ }
+ }
let loop_duration = loop_start.elapsed();
if loop_duration > Duration::from_millis(100) {
- log::warn!("overly long loop turn: {:?}", loop_duration);
- if env::var("RA_PROFILE").is_ok() {
- self.show_message(
- lsp_types::MessageType::Error,
- format!("overly long loop turn: {:?}", loop_duration),
- )
- }
+ tracing::warn!("overly long loop turn: {:?}", loop_duration);
+ self.poke_rust_analyzer_developer(format!(
+ "overly long loop turn: {:?}",
+ loop_duration
+ ));
}
Ok(())
}
self.register_request(&req, request_received);
if self.shutdown_requested {
- self.respond(Response::new_err(
+ self.respond(lsp_server::Response::new_err(
req.id,
lsp_server::ErrorCode::InvalidRequest as i32,
"Shutdown already requested.".to_owned(),
return Ok(());
}
- if self.status == Status::Loading && req.method != "shutdown" {
+ // Avoid flashing a bunch of unresolved references during initial load.
+ if self.workspaces.is_empty() && !self.is_quiescent() {
self.respond(lsp_server::Response::new_err(
req.id,
// FIXME: i32 should impl From<ErrorCode> (from() guarantees lossless conversion)
lsp_server::ErrorCode::ContentModified as i32,
- "Rust Analyzer is still loading...".to_owned(),
+ "waiting for cargo metadata or cargo check".to_owned(),
));
return Ok(());
}
RequestDispatcher { req: Some(req), global_state: self }
- .on_sync::<lsp_ext::ReloadWorkspace>(|s, ()| Ok(s.fetch_workspaces_request()))?
- .on_sync::<lsp_ext::JoinLines>(|s, p| handlers::handle_join_lines(s.snapshot(), p))?
- .on_sync::<lsp_ext::OnEnter>(|s, p| handlers::handle_on_enter(s.snapshot(), p))?
- .on_sync::<lsp_types::request::Shutdown>(|s, ()| {
- s.shutdown_requested = true;
+ .on_sync_mut::<lsp_ext::ReloadWorkspace>(|s, ()| {
+ s.fetch_workspaces_queue.request_op();
Ok(())
})?
- .on_sync::<lsp_types::request::SelectionRangeRequest>(|s, p| {
- handlers::handle_selection_range(s.snapshot(), p)
- })?
- .on_sync::<lsp_ext::MatchingBrace>(|s, p| {
- handlers::handle_matching_brace(s.snapshot(), p)
+ .on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
+ s.shutdown_requested = true;
+ Ok(())
})?
- .on_sync::<lsp_ext::MemoryUsage>(|s, p| handlers::handle_memory_usage(s, p))?
+ .on_sync_mut::<lsp_ext::MemoryUsage>(handlers::handle_memory_usage)?
+ .on_sync::<lsp_ext::JoinLines>(handlers::handle_join_lines)?
+ .on_sync::<lsp_ext::OnEnter>(handlers::handle_on_enter)?
+ .on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)?
+ .on_sync::<lsp_ext::MatchingBrace>(handlers::handle_matching_brace)?
.on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)
.on::<lsp_ext::SyntaxTree>(handlers::handle_syntax_tree)
.on::<lsp_ext::ViewHir>(handlers::handle_view_hir)
+ .on::<lsp_ext::ViewCrateGraph>(handlers::handle_view_crate_graph)
+ .on::<lsp_ext::ViewItemTree>(handlers::handle_view_item_tree)
.on::<lsp_ext::ExpandMacro>(handlers::handle_expand_macro)
.on::<lsp_ext::ParentModule>(handlers::handle_parent_module)
.on::<lsp_ext::Runnables>(handlers::handle_runnables)
.on::<lsp_ext::ExternalDocs>(handlers::handle_open_docs)
.on::<lsp_ext::OpenCargoToml>(handlers::handle_open_cargo_toml)
.on::<lsp_ext::MoveItem>(handlers::handle_move_item)
+ .on::<lsp_ext::WorkspaceSymbol>(handlers::handle_workspace_symbol)
.on::<lsp_types::request::OnTypeFormatting>(handlers::handle_on_type_formatting)
.on::<lsp_types::request::DocumentSymbolRequest>(handlers::handle_document_symbol)
- .on::<lsp_types::request::WorkspaceSymbol>(handlers::handle_workspace_symbol)
.on::<lsp_types::request::GotoDefinition>(handlers::handle_goto_definition)
+ .on::<lsp_types::request::GotoDeclaration>(handlers::handle_goto_declaration)
.on::<lsp_types::request::GotoImplementation>(handlers::handle_goto_implementation)
.on::<lsp_types::request::GotoTypeDefinition>(handlers::handle_goto_type_definition)
.on::<lsp_types::request::Completion>(handlers::handle_completion)
.on::<lsp_types::request::Rename>(handlers::handle_rename)
.on::<lsp_types::request::References>(handlers::handle_references)
.on::<lsp_types::request::Formatting>(handlers::handle_formatting)
+ .on::<lsp_types::request::RangeFormatting>(handlers::handle_range_formatting)
.on::<lsp_types::request::DocumentHighlightRequest>(handlers::handle_document_highlight)
.on::<lsp_types::request::CallHierarchyPrepare>(handlers::handle_call_hierarchy_prepare)
.on::<lsp_types::request::CallHierarchyIncomingCalls>(
this.cancel(id);
Ok(())
})?
+ .on::<lsp_types::notification::WorkDoneProgressCancel>(|_this, _params| {
+ // Just ignore this. It is OK to continue sending progress
+ // notifications for this token, as the client can't know when
+ // we accepted notification.
+ Ok(())
+ })?
.on::<lsp_types::notification::DidOpenTextDocument>(|this, params| {
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
if this
.mem_docs
.insert(path.clone(), DocumentData::new(params.text_document.version))
- .is_some()
+ .is_err()
{
- log::error!("duplicate DidOpenTextDocument: {}", path)
+ tracing::error!("duplicate DidOpenTextDocument: {}", path)
}
- let changed = this
- .vfs
+ this.vfs
.write()
.0
.set_file_contents(path, Some(params.text_document.text.into_bytes()));
-
- // If the VFS contents are unchanged, update diagnostics, since `handle_event`
- // won't see any changes. This avoids missing diagnostics when opening a file.
- //
- // If the file *was* changed, `handle_event` will already recompute and send
- // diagnostics. We can't do it here, since the *current* file contents might be
- // unset in salsa, since the VFS change hasn't been applied to the database yet.
- if !changed {
- this.maybe_update_diagnostics();
- }
}
Ok(())
})?
.on::<lsp_types::notification::DidChangeTextDocument>(|this, params| {
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
- let doc = match this.mem_docs.get_mut(&path) {
- Some(doc) => doc,
+ match this.mem_docs.get_mut(&path) {
+ Some(doc) => {
+ // The version passed in DidChangeTextDocument is the version after all edits are applied
+ // so we should apply it before the vfs is notified.
+ doc.version = params.text_document.version;
+ }
None => {
- log::error!("expected DidChangeTextDocument: {}", path);
+ tracing::error!("unexpected DidChangeTextDocument: {}; send DidOpenTextDocument first", path);
return Ok(());
}
};
+
let vfs = &mut this.vfs.write().0;
let file_id = vfs.file_id(&path).unwrap();
let mut text = String::from_utf8(vfs.file_contents(file_id).to_vec()).unwrap();
apply_document_changes(&mut text, params.content_changes);
- // The version passed in DidChangeTextDocument is the version after all edits are applied
- // so we should apply it before the vfs is notified.
- doc.version = params.text_document.version;
-
- vfs.set_file_contents(path.clone(), Some(text.into_bytes()));
+ vfs.set_file_contents(path, Some(text.into_bytes()));
}
Ok(())
})?
.on::<lsp_types::notification::DidCloseTextDocument>(|this, params| {
- let mut version = None;
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
- match this.mem_docs.remove(&path) {
- Some(doc) => version = Some(doc.version),
- None => log::error!("orphan DidCloseTextDocument: {}", path),
+ if this.mem_docs.remove(&path).is_err() {
+ tracing::error!("orphan DidCloseTextDocument: {}", path);
}
this.semantic_tokens_cache.lock().remove(¶ms.text_document.uri);
this.loader.handle.invalidate(path.to_path_buf());
}
}
-
- // Clear the diagnostics for the previously known version of the file.
- // This prevents stale "cargo check" diagnostics if the file is
- // closed, "cargo check" is run and then the file is reopened.
- this.send_notification::<lsp_types::notification::PublishDiagnostics>(
- lsp_types::PublishDiagnosticsParams {
- uri: params.text_document.uri,
- diagnostics: Vec::new(),
- version,
- },
- );
Ok(())
})?
.on::<lsp_types::notification::DidSaveTextDocument>(|this, params| {
flycheck.update();
}
if let Ok(abs_path) = from_proto::abs_path(¶ms.text_document.uri) {
- this.maybe_refresh(&[(abs_path, ChangeKind::Modify)]);
+ if reload::should_refresh_for_change(&abs_path, ChangeKind::Modify) {
+ this.fetch_workspaces_queue.request_op();
+ }
}
Ok(())
})?
}],
},
|this, resp| {
- log::debug!("config update response: '{:?}", resp);
- let Response { error, result, .. } = resp;
+ tracing::debug!("config update response: '{:?}", resp);
+ let lsp_server::Response { error, result, .. } = resp;
match (error, result) {
(Some(err), _) => {
- log::error!("failed to fetch the server settings: {:?}", err)
+ tracing::error!("failed to fetch the server settings: {:?}", err)
}
(None, Some(mut configs)) => {
if let Some(json) = configs.get_mut(0) {
this.update_configuration(config);
}
}
- (None, None) => log::error!(
+ (None, None) => tracing::error!(
"received empty server settings response from the client"
),
}
},
);
- return Ok(());
+ Ok(())
})?
.on::<lsp_types::notification::DidChangeWatchedFiles>(|this, params| {
for change in params.changes {
.finish();
Ok(())
}
- fn update_file_notifications_on_threadpool(&mut self) {
- self.maybe_update_diagnostics();
- self.task_pool.handle.spawn_with_sender({
- let snap = self.snapshot();
- move |sender| {
- snap.analysis
- .prime_caches(|progress| {
- sender.send(Task::PrimeCaches(progress)).unwrap();
- })
- .unwrap_or_else(|_: Canceled| {
- // Pretend that we're done, so that the progress bar is removed. Otherwise
- // the editor may complain about it already existing.
- sender.send(Task::PrimeCaches(PrimeCachesProgress::Finished)).unwrap()
- });
- }
- });
- }
- fn maybe_update_diagnostics(&mut self) {
+
+ fn update_diagnostics(&mut self) {
let subscriptions = self
.mem_docs
- .keys()
- .map(|path| self.vfs.read().0.file_id(&path).unwrap())
+ .iter()
+ .map(|path| self.vfs.read().0.file_id(path).unwrap())
.collect::<Vec<_>>();
- log::trace!("updating notifications for {:?}", subscriptions);
- if self.config.publish_diagnostics() {
- let snapshot = self.snapshot();
- self.task_pool.handle.spawn(move || {
- let diagnostics = subscriptions
- .into_iter()
- .filter_map(|file_id| {
- handlers::publish_diagnostics(&snapshot, file_id)
- .map_err(|err| {
- if !is_canceled(&*err) {
- log::error!("failed to compute diagnostics: {:?}", err);
- }
- ()
- })
- .ok()
- .map(|diags| (file_id, diags))
- })
- .collect::<Vec<_>>();
- Task::Diagnostics(diagnostics)
- })
- }
+ tracing::trace!("updating notifications for {:?}", subscriptions);
+
+ let snapshot = self.snapshot();
+ self.task_pool.handle.spawn(move || {
+ let diagnostics = subscriptions
+ .into_iter()
+ .filter_map(|file_id| {
+ handlers::publish_diagnostics(&snapshot, file_id)
+ .map_err(|err| {
+ if !is_cancelled(&*err) {
+ tracing::error!("failed to compute diagnostics: {:?}", err);
+ }
+ })
+ .ok()
+ .map(|diags| (file_id, diags))
+ })
+ .collect::<Vec<_>>();
+ Task::Diagnostics(diagnostics)
+ })
}
}