1 //! The main loop of `rust-analyzer` responsible for dispatching LSP
2 //! requests/replies and notifications back to the client.
6 time::{Duration, Instant},
9 use always_assert::always;
10 use crossbeam_channel::{select, Receiver};
11 use ide_db::base_db::{SourceDatabaseExt, VfsPath};
12 use lsp_server::{Connection, Notification, Request};
13 use lsp_types::notification::Notification as _;
14 use vfs::{ChangeKind, FileId};
18 dispatch::{NotificationDispatcher, RequestDispatcher},
20 global_state::{file_id_to_url, url_to_file_id, GlobalState},
22 lsp_utils::{apply_document_changes, is_cancelled, notification_is, Progress},
23 mem_docs::DocumentData,
24 reload::{self, BuildDataProgress, ProjectWorkspaceProgress},
28 pub fn main_loop(config: Config, connection: Connection) -> Result<()> {
29 tracing::info!("initial config: {:#?}", config);
31 // Windows scheduler implements priority boosts: if thread waits for an
32 // event (like a condvar), and event fires, priority of the thread is
33 // temporary bumped. This optimization backfires in our case: each time the
34 // `main_loop` schedules a task to run on a threadpool, the worker threads
35 // gets a higher priority, and (on a machine with fewer cores) displaces the
36 // main loop! We work-around this by marking the main loop as a
37 // higher-priority thread.
39 // https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities
40 // https://docs.microsoft.com/en-us/windows/win32/procthread/priority-boosts
41 // https://github.com/rust-analyzer/rust-analyzer/issues/2835
44 use winapi::um::processthreadsapi::*;
45 let thread = GetCurrentThread();
46 let thread_priority_above_normal = 1;
47 SetThreadPriority(thread, thread_priority_above_normal);
50 GlobalState::new(connection.sender, config).run(connection.receiver)
54 Lsp(lsp_server::Message),
56 Vfs(vfs::loader::Message),
57 Flycheck(flycheck::Message),
61 pub(crate) enum Task {
62 Response(lsp_server::Response),
63 Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
64 PrimeCaches(PrimeCachesProgress),
65 FetchWorkspace(ProjectWorkspaceProgress),
66 FetchBuildData(BuildDataProgress),
70 pub(crate) enum PrimeCachesProgress {
72 Report(ide::ParallelPrimeCachesProgress),
73 End { cancelled: bool },
76 impl fmt::Debug for Event {
77 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
78 let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
79 f.debug_struct("Notification").field("method", ¬.method).finish()
83 Event::Lsp(lsp_server::Message::Notification(not)) => {
84 if notification_is::<lsp_types::notification::DidOpenTextDocument>(not)
85 || notification_is::<lsp_types::notification::DidChangeTextDocument>(not)
87 return debug_verbose_not(not, f);
90 Event::Task(Task::Response(resp)) => {
92 .debug_struct("Response")
93 .field("id", &resp.id)
94 .field("error", &resp.error)
100 Event::Lsp(it) => fmt::Debug::fmt(it, f),
101 Event::Task(it) => fmt::Debug::fmt(it, f),
102 Event::Vfs(it) => fmt::Debug::fmt(it, f),
103 Event::Flycheck(it) => fmt::Debug::fmt(it, f),
109 fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
110 if self.config.linked_projects().is_empty()
111 && self.config.detached_files().is_empty()
112 && self.config.notifications().cargo_toml_not_found
114 self.show_and_log_error("rust-analyzer failed to discover workspace".to_string(), None);
117 if self.config.did_save_text_document_dynamic_registration() {
118 let save_registration_options = lsp_types::TextDocumentSaveRegistrationOptions {
119 include_text: Some(false),
120 text_document_registration_options: lsp_types::TextDocumentRegistrationOptions {
121 document_selector: Some(vec![
122 lsp_types::DocumentFilter {
125 pattern: Some("**/*.rs".into()),
127 lsp_types::DocumentFilter {
130 pattern: Some("**/Cargo.toml".into()),
132 lsp_types::DocumentFilter {
135 pattern: Some("**/Cargo.lock".into()),
141 let registration = lsp_types::Registration {
142 id: "textDocument/didSave".to_string(),
143 method: "textDocument/didSave".to_string(),
144 register_options: Some(serde_json::to_value(save_registration_options).unwrap()),
146 self.send_request::<lsp_types::request::RegisterCapability>(
147 lsp_types::RegistrationParams { registrations: vec![registration] },
152 self.fetch_workspaces_queue.request_op("startup".to_string());
153 if let Some(cause) = self.fetch_workspaces_queue.should_start_op() {
154 self.fetch_workspaces(cause);
157 while let Some(event) = self.next_event(&inbox) {
158 if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
159 if not.method == lsp_types::notification::Exit::METHOD {
163 self.handle_event(event)?
166 Err("client exited without proper shutdown sequence".into())
169 fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
171 recv(inbox) -> msg =>
172 msg.ok().map(Event::Lsp),
174 recv(self.task_pool.receiver) -> task =>
175 Some(Event::Task(task.unwrap())),
177 recv(self.loader.receiver) -> task =>
178 Some(Event::Vfs(task.unwrap())),
180 recv(self.flycheck_receiver) -> task =>
181 Some(Event::Flycheck(task.unwrap())),
185 fn handle_event(&mut self, event: Event) -> Result<()> {
186 let loop_start = Instant::now();
187 // NOTE: don't count blocking select! call as a loop-turn time
188 let _p = profile::span("GlobalState::handle_event");
190 tracing::debug!("handle_event({:?})", event);
191 let task_queue_len = self.task_pool.handle.len();
192 if task_queue_len > 0 {
193 tracing::info!("task queue len: {}", task_queue_len);
196 let was_quiescent = self.is_quiescent();
198 Event::Lsp(msg) => match msg {
199 lsp_server::Message::Request(req) => self.on_request(loop_start, req)?,
200 lsp_server::Message::Notification(not) => {
201 self.on_notification(not)?;
203 lsp_server::Message::Response(resp) => self.complete_request(resp),
205 Event::Task(mut task) => {
206 let _p = profile::span("GlobalState::handle_event/task");
207 let mut prime_caches_progress = Vec::new();
210 Task::Response(response) => self.respond(response),
211 Task::Diagnostics(diagnostics_per_file) => {
212 for (file_id, diagnostics) in diagnostics_per_file {
213 self.diagnostics.set_native_diagnostics(file_id, diagnostics)
216 Task::PrimeCaches(progress) => match progress {
217 PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
218 PrimeCachesProgress::Report(_) => {
219 match prime_caches_progress.last_mut() {
220 Some(last @ PrimeCachesProgress::Report(_)) => {
221 // Coalesce subsequent update events.
224 _ => prime_caches_progress.push(progress),
227 PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
229 Task::FetchWorkspace(progress) => {
230 let (state, msg) = match progress {
231 ProjectWorkspaceProgress::Begin => (Progress::Begin, None),
232 ProjectWorkspaceProgress::Report(msg) => {
233 (Progress::Report, Some(msg))
235 ProjectWorkspaceProgress::End(workspaces) => {
236 self.fetch_workspaces_queue.op_completed(workspaces);
238 let old = Arc::clone(&self.workspaces);
239 self.switch_workspaces("fetched workspace".to_string());
240 let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
242 if self.config.run_build_scripts() && workspaces_updated {
243 self.fetch_build_data_queue
244 .request_op(format!("workspace updated"));
247 (Progress::End, None)
251 self.report_progress("Fetching", state, msg, None);
253 Task::FetchBuildData(progress) => {
254 let (state, msg) = match progress {
255 BuildDataProgress::Begin => (Some(Progress::Begin), None),
256 BuildDataProgress::Report(msg) => {
257 (Some(Progress::Report), Some(msg))
259 BuildDataProgress::End(build_data_result) => {
260 self.fetch_build_data_queue.op_completed(build_data_result);
262 self.switch_workspaces("fetched build data".to_string());
264 (Some(Progress::End), None)
268 if let Some(state) = state {
269 self.report_progress("Loading", state, msg, None);
274 // Coalesce multiple task events into one loop turn
275 task = match self.task_pool.receiver.try_recv() {
281 for progress in prime_caches_progress {
282 let (state, message, fraction);
284 PrimeCachesProgress::Begin => {
285 state = Progress::Begin;
289 PrimeCachesProgress::Report(report) => {
290 state = Progress::Report;
292 message = match &report.crates_currently_indexing[..] {
293 [crate_name] => Some(format!(
295 report.crates_done, report.crates_total, crate_name
297 [crate_name, rest @ ..] => Some(format!(
298 "{}/{} ({} + {} more)",
307 fraction = Progress::fraction(report.crates_done, report.crates_total);
309 PrimeCachesProgress::End { cancelled } => {
310 state = Progress::End;
314 self.prime_caches_queue.op_completed(());
316 self.prime_caches_queue
317 .request_op("restart after cancellation".to_string());
322 self.report_progress("Indexing", state, message, Some(fraction));
325 Event::Vfs(mut task) => {
326 let _p = profile::span("GlobalState::handle_event/vfs");
329 vfs::loader::Message::Loaded { files } => {
330 let vfs = &mut self.vfs.write().0;
331 for (path, contents) in files {
332 let path = VfsPath::from(path);
333 if !self.mem_docs.contains(&path) {
334 vfs.set_file_contents(path, contents);
338 vfs::loader::Message::Progress { n_total, n_done, config_version } => {
339 always!(config_version <= self.vfs_config_version);
341 self.vfs_progress_config_version = config_version;
342 self.vfs_progress_n_total = n_total;
343 self.vfs_progress_n_done = n_done;
345 let state = if n_done == 0 {
347 } else if n_done < n_total {
350 assert_eq!(n_done, n_total);
353 self.report_progress(
356 Some(format!("{}/{}", n_done, n_total)),
357 Some(Progress::fraction(n_done, n_total)),
361 // Coalesce many VFS event into a single loop turn
362 task = match self.loader.receiver.try_recv() {
368 Event::Flycheck(mut task) => {
369 let _p = profile::span("GlobalState::handle_event/flycheck");
372 flycheck::Message::AddDiagnostic { workspace_root, diagnostic } => {
373 let snap = self.snapshot();
375 crate::diagnostics::to_proto::map_rust_diagnostic_to_lsp(
376 &self.config.diagnostics_map(),
381 for diag in diagnostics {
382 match url_to_file_id(&self.vfs.read().0, &diag.url) {
383 Ok(file_id) => self.diagnostics.add_check_diagnostic(
390 "File with cargo diagnostic not found in VFS: {}",
398 flycheck::Message::Progress { id, progress } => {
399 let (state, message) = match progress {
400 flycheck::Progress::DidStart => {
401 self.diagnostics.clear_check();
402 (Progress::Begin, None)
404 flycheck::Progress::DidCheckCrate(target) => {
405 (Progress::Report, Some(target))
407 flycheck::Progress::DidCancel => (Progress::End, None),
408 flycheck::Progress::DidFinish(result) => {
409 if let Err(err) = result {
410 self.show_and_log_error(
411 "cargo check failed".to_string(),
412 Some(err.to_string()),
415 (Progress::End, None)
419 // When we're running multiple flychecks, we have to include a disambiguator in
420 // the title, or the editor complains. Note that this is a user-facing string.
421 let title = if self.flycheck.len() == 1 {
422 match self.config.flycheck() {
423 Some(config) => format!("{}", config),
424 None => "cargo check".to_string(),
427 format!("cargo check (#{})", id + 1)
429 self.report_progress(&title, state, message, None);
432 // Coalesce many flycheck updates into a single loop turn
433 task = match self.flycheck_receiver.try_recv() {
441 let state_changed = self.process_changes();
442 let memdocs_added_or_removed = self.mem_docs.take_changes();
444 if self.is_quiescent() {
446 for flycheck in &self.flycheck {
449 if self.config.prefill_caches() {
450 self.prime_caches_queue.request_op("became quiescent".to_string());
454 if !was_quiescent || state_changed {
455 // Refresh semantic tokens if the client supports it.
456 if self.config.semantic_tokens_refresh() {
457 self.semantic_tokens_cache.lock().clear();
458 self.send_request::<lsp_types::request::SemanticTokensRefresh>((), |_, _| ());
461 // Refresh code lens if the client supports it.
462 if self.config.code_lens_refresh() {
463 self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
467 if !was_quiescent || state_changed || memdocs_added_or_removed {
468 if self.config.publish_diagnostics() {
469 self.update_diagnostics()
474 if let Some(diagnostic_changes) = self.diagnostics.take_changes() {
475 for file_id in diagnostic_changes {
476 let db = self.analysis_host.raw_database();
477 let source_root = db.file_source_root(file_id);
478 if db.source_root(source_root).is_library {
479 // Only publish diagnostics for files in the workspace, not from crates.io deps
481 // While theoretically these should never have errors, we have quite a few false
482 // positives particularly in the stdlib, and those diagnostics would stay around
483 // forever if we emitted them here.
487 let url = file_id_to_url(&self.vfs.read().0, file_id);
488 let diagnostics = self.diagnostics.diagnostics_for(file_id).cloned().collect();
489 let version = from_proto::vfs_path(&url)
490 .map(|path| self.mem_docs.get(&path).map(|it| it.version))
491 .unwrap_or_default();
493 self.send_notification::<lsp_types::notification::PublishDiagnostics>(
494 lsp_types::PublishDiagnosticsParams { uri: url, diagnostics, version },
499 if self.config.cargo_autoreload() {
500 if let Some(cause) = self.fetch_workspaces_queue.should_start_op() {
501 self.fetch_workspaces(cause);
505 if !self.fetch_workspaces_queue.op_in_progress() {
506 if let Some(cause) = self.fetch_build_data_queue.should_start_op() {
507 self.fetch_build_data(cause);
511 if let Some(cause) = self.prime_caches_queue.should_start_op() {
512 tracing::debug!(%cause, "will prime caches");
513 let num_worker_threads = self.config.prime_caches_num_threads();
515 self.task_pool.handle.spawn_with_sender({
516 let analysis = self.snapshot().analysis;
518 sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
519 let res = analysis.parallel_prime_caches(num_worker_threads, |progress| {
520 let report = PrimeCachesProgress::Report(progress);
521 sender.send(Task::PrimeCaches(report)).unwrap();
524 .send(Task::PrimeCaches(PrimeCachesProgress::End {
525 cancelled: res.is_err(),
532 let status = self.current_status();
533 if self.last_reported_status.as_ref() != Some(&status) {
534 self.last_reported_status = Some(status.clone());
536 if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
537 self.show_message(lsp_types::MessageType::ERROR, message.clone());
540 if self.config.server_status_notification() {
541 self.send_notification::<lsp_ext::ServerStatusNotification>(status);
545 let loop_duration = loop_start.elapsed();
546 if loop_duration > Duration::from_millis(100) && was_quiescent {
547 tracing::warn!("overly long loop turn: {:?}", loop_duration);
548 self.poke_rust_analyzer_developer(format!(
549 "overly long loop turn: {:?}",
556 fn on_request(&mut self, request_received: Instant, req: Request) -> Result<()> {
557 self.register_request(&req, request_received);
559 if self.shutdown_requested {
560 self.respond(lsp_server::Response::new_err(
562 lsp_server::ErrorCode::InvalidRequest as i32,
563 "Shutdown already requested.".to_owned(),
569 // Avoid flashing a bunch of unresolved references during initial load.
570 if self.workspaces.is_empty() && !self.is_quiescent() {
571 self.respond(lsp_server::Response::new_err(
573 lsp_server::ErrorCode::ContentModified as i32,
574 "waiting for cargo metadata or cargo check".to_owned(),
579 RequestDispatcher { req: Some(req), global_state: self }
580 .on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
581 s.shutdown_requested = true;
584 .on_sync_mut::<lsp_ext::ReloadWorkspace>(handlers::handle_workspace_reload)?
585 .on_sync_mut::<lsp_ext::MemoryUsage>(handlers::handle_memory_usage)?
586 .on_sync_mut::<lsp_ext::ShuffleCrateGraph>(handlers::handle_shuffle_crate_graph)?
587 .on_sync::<lsp_ext::JoinLines>(handlers::handle_join_lines)?
588 .on_sync::<lsp_ext::OnEnter>(handlers::handle_on_enter)?
589 .on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)?
590 .on_sync::<lsp_ext::MatchingBrace>(handlers::handle_matching_brace)?
591 .on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)
592 .on::<lsp_ext::SyntaxTree>(handlers::handle_syntax_tree)
593 .on::<lsp_ext::ViewHir>(handlers::handle_view_hir)
594 .on::<lsp_ext::ViewFileText>(handlers::handle_view_file_text)
595 .on::<lsp_ext::ViewCrateGraph>(handlers::handle_view_crate_graph)
596 .on::<lsp_ext::ViewItemTree>(handlers::handle_view_item_tree)
597 .on::<lsp_ext::ExpandMacro>(handlers::handle_expand_macro)
598 .on::<lsp_ext::ParentModule>(handlers::handle_parent_module)
599 .on::<lsp_ext::Runnables>(handlers::handle_runnables)
600 .on::<lsp_ext::RelatedTests>(handlers::handle_related_tests)
601 .on::<lsp_ext::CodeActionRequest>(handlers::handle_code_action)
602 .on::<lsp_ext::CodeActionResolveRequest>(handlers::handle_code_action_resolve)
603 .on::<lsp_ext::HoverRequest>(handlers::handle_hover)
604 .on::<lsp_ext::ExternalDocs>(handlers::handle_open_docs)
605 .on::<lsp_ext::OpenCargoToml>(handlers::handle_open_cargo_toml)
606 .on::<lsp_ext::MoveItem>(handlers::handle_move_item)
607 .on::<lsp_ext::WorkspaceSymbol>(handlers::handle_workspace_symbol)
608 .on::<lsp_types::request::OnTypeFormatting>(handlers::handle_on_type_formatting)
609 .on::<lsp_types::request::DocumentSymbolRequest>(handlers::handle_document_symbol)
610 .on::<lsp_types::request::GotoDefinition>(handlers::handle_goto_definition)
611 .on::<lsp_types::request::GotoDeclaration>(handlers::handle_goto_declaration)
612 .on::<lsp_types::request::GotoImplementation>(handlers::handle_goto_implementation)
613 .on::<lsp_types::request::GotoTypeDefinition>(handlers::handle_goto_type_definition)
614 .on::<lsp_types::request::InlayHintRequest>(handlers::handle_inlay_hints)
615 .on::<lsp_types::request::Completion>(handlers::handle_completion)
616 .on::<lsp_types::request::ResolveCompletionItem>(handlers::handle_completion_resolve)
617 .on::<lsp_types::request::CodeLensRequest>(handlers::handle_code_lens)
618 .on::<lsp_types::request::CodeLensResolve>(handlers::handle_code_lens_resolve)
619 .on::<lsp_types::request::FoldingRangeRequest>(handlers::handle_folding_range)
620 .on::<lsp_types::request::SignatureHelpRequest>(handlers::handle_signature_help)
621 .on::<lsp_types::request::PrepareRenameRequest>(handlers::handle_prepare_rename)
622 .on::<lsp_types::request::Rename>(handlers::handle_rename)
623 .on::<lsp_types::request::References>(handlers::handle_references)
624 .on::<lsp_types::request::Formatting>(handlers::handle_formatting)
625 .on::<lsp_types::request::RangeFormatting>(handlers::handle_range_formatting)
626 .on::<lsp_types::request::DocumentHighlightRequest>(handlers::handle_document_highlight)
627 .on::<lsp_types::request::CallHierarchyPrepare>(handlers::handle_call_hierarchy_prepare)
628 .on::<lsp_types::request::CallHierarchyIncomingCalls>(
629 handlers::handle_call_hierarchy_incoming,
631 .on::<lsp_types::request::CallHierarchyOutgoingCalls>(
632 handlers::handle_call_hierarchy_outgoing,
634 .on::<lsp_types::request::SemanticTokensFullRequest>(
635 handlers::handle_semantic_tokens_full,
637 .on::<lsp_types::request::SemanticTokensFullDeltaRequest>(
638 handlers::handle_semantic_tokens_full_delta,
640 .on::<lsp_types::request::SemanticTokensRangeRequest>(
641 handlers::handle_semantic_tokens_range,
643 .on::<lsp_types::request::WillRenameFiles>(handlers::handle_will_rename_files)
644 .on::<lsp_ext::Ssr>(handlers::handle_ssr)
648 fn on_notification(&mut self, not: Notification) -> Result<()> {
649 NotificationDispatcher { not: Some(not), global_state: self }
650 .on::<lsp_types::notification::Cancel>(|this, params| {
651 let id: lsp_server::RequestId = match params.id {
652 lsp_types::NumberOrString::Number(id) => id.into(),
653 lsp_types::NumberOrString::String(id) => id.into(),
658 .on::<lsp_types::notification::WorkDoneProgressCancel>(|_this, _params| {
659 // Just ignore this. It is OK to continue sending progress
660 // notifications for this token, as the client can't know when
661 // we accepted notification.
664 .on::<lsp_types::notification::DidOpenTextDocument>(|this, params| {
665 if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
668 .insert(path.clone(), DocumentData::new(params.text_document.version))
671 tracing::error!("duplicate DidOpenTextDocument: {}", path)
676 .set_file_contents(path, Some(params.text_document.text.into_bytes()));
680 .on::<lsp_types::notification::DidChangeTextDocument>(|this, params| {
681 if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
682 match this.mem_docs.get_mut(&path) {
684 // The version passed in DidChangeTextDocument is the version after all edits are applied
685 // so we should apply it before the vfs is notified.
686 doc.version = params.text_document.version;
689 tracing::error!("unexpected DidChangeTextDocument: {}; send DidOpenTextDocument first", path);
694 let vfs = &mut this.vfs.write().0;
695 let file_id = vfs.file_id(&path).unwrap();
696 let mut text = String::from_utf8(vfs.file_contents(file_id).to_vec()).unwrap();
697 apply_document_changes(&mut text, params.content_changes);
699 vfs.set_file_contents(path, Some(text.into_bytes()));
703 .on::<lsp_types::notification::DidCloseTextDocument>(|this, params| {
704 if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
705 if this.mem_docs.remove(&path).is_err() {
706 tracing::error!("orphan DidCloseTextDocument: {}", path);
709 this.semantic_tokens_cache.lock().remove(¶ms.text_document.uri);
711 if let Some(path) = path.as_path() {
712 this.loader.handle.invalidate(path.to_path_buf());
717 .on::<lsp_types::notification::DidSaveTextDocument>(|this, params| {
718 for flycheck in &this.flycheck {
721 if let Ok(abs_path) = from_proto::abs_path(¶ms.text_document.uri) {
722 if reload::should_refresh_for_change(&abs_path, ChangeKind::Modify) {
723 this.fetch_workspaces_queue.request_op(format!("DidSaveTextDocument {}", abs_path.display()));
728 .on::<lsp_types::notification::DidChangeConfiguration>(|this, _params| {
729 // As stated in https://github.com/microsoft/language-server-protocol/issues/676,
730 // this notification's parameters should be ignored and the actual config queried separately.
731 this.send_request::<lsp_types::request::WorkspaceConfiguration>(
732 lsp_types::ConfigurationParams {
733 items: vec![lsp_types::ConfigurationItem {
735 section: Some("rust-analyzer".to_string()),
739 tracing::debug!("config update response: '{:?}", resp);
740 let lsp_server::Response { error, result, .. } = resp;
742 match (error, result) {
744 tracing::error!("failed to fetch the server settings: {:?}", err)
746 (None, Some(mut configs)) => {
747 if let Some(json) = configs.get_mut(0) {
748 // Note that json can be null according to the spec if the client can't
749 // provide a configuration. This is handled in Config::update below.
750 let mut config = Config::clone(&*this.config);
751 if let Err(error) = config.update(json.take()) {
752 this.show_message(lsp_types::MessageType::WARNING, error.to_string());
754 this.update_configuration(config);
757 (None, None) => tracing::error!(
758 "received empty server settings response from the client"
766 .on::<lsp_types::notification::DidChangeWatchedFiles>(|this, params| {
767 for change in params.changes {
768 if let Ok(path) = from_proto::abs_path(&change.uri) {
769 this.loader.handle.invalidate(path);
778 fn update_diagnostics(&mut self) {
779 let subscriptions = self
782 .map(|path| self.vfs.read().0.file_id(path).unwrap())
783 .collect::<Vec<_>>();
785 tracing::trace!("updating notifications for {:?}", subscriptions);
787 let snapshot = self.snapshot();
788 self.task_pool.handle.spawn(move || {
789 let diagnostics = subscriptions
791 .filter_map(|file_id| {
792 handlers::publish_diagnostics(&snapshot, file_id)
794 if !is_cancelled(&*err) {
795 tracing::error!("failed to compute diagnostics: {:?}", err);
799 .map(|diags| (file_id, diags))
801 .collect::<Vec<_>>();
802 Task::Diagnostics(diagnostics)