1 //! The main loop of `rust-analyzer` responsible for dispatching LSP
2 //! requests/replies and notifications back to the client.
7 time::{Duration, Instant},
10 use always_assert::always;
11 use crossbeam_channel::{select, Receiver};
12 use flycheck::FlycheckHandle;
13 use ide_db::base_db::{SourceDatabase, SourceDatabaseExt, VfsPath};
14 use itertools::Itertools;
15 use lsp_server::{Connection, Notification, Request};
16 use lsp_types::notification::Notification as _;
17 use vfs::{ChangeKind, FileId};
21 dispatch::{NotificationDispatcher, RequestDispatcher},
23 global_state::{file_id_to_url, url_to_file_id, GlobalState},
25 lsp_utils::{apply_document_changes, notification_is, Progress},
26 mem_docs::DocumentData,
27 reload::{self, BuildDataProgress, ProjectWorkspaceProgress},
31 pub fn main_loop(config: Config, connection: Connection) -> Result<()> {
32 tracing::info!("initial config: {:#?}", config);
34 // Windows scheduler implements priority boosts: if thread waits for an
35 // event (like a condvar), and event fires, priority of the thread is
36 // temporary bumped. This optimization backfires in our case: each time the
37 // `main_loop` schedules a task to run on a threadpool, the worker threads
38 // gets a higher priority, and (on a machine with fewer cores) displaces the
39 // main loop! We work-around this by marking the main loop as a
40 // higher-priority thread.
42 // https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities
43 // https://docs.microsoft.com/en-us/windows/win32/procthread/priority-boosts
44 // https://github.com/rust-lang/rust-analyzer/issues/2835
47 use winapi::um::processthreadsapi::*;
48 let thread = GetCurrentThread();
49 let thread_priority_above_normal = 1;
50 SetThreadPriority(thread, thread_priority_above_normal);
53 GlobalState::new(connection.sender, config).run(connection.receiver)
57 Lsp(lsp_server::Message),
59 Vfs(vfs::loader::Message),
60 Flycheck(flycheck::Message),
64 pub(crate) enum Task {
65 Response(lsp_server::Response),
66 Retry(lsp_server::Request),
67 Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
68 PrimeCaches(PrimeCachesProgress),
69 FetchWorkspace(ProjectWorkspaceProgress),
70 FetchBuildData(BuildDataProgress),
74 pub(crate) enum PrimeCachesProgress {
76 Report(ide::ParallelPrimeCachesProgress),
77 End { cancelled: bool },
80 impl fmt::Debug for Event {
81 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
82 let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter<'_>| {
83 f.debug_struct("Notification").field("method", ¬.method).finish()
87 Event::Lsp(lsp_server::Message::Notification(not)) => {
88 if notification_is::<lsp_types::notification::DidOpenTextDocument>(not)
89 || notification_is::<lsp_types::notification::DidChangeTextDocument>(not)
91 return debug_verbose_not(not, f);
94 Event::Task(Task::Response(resp)) => {
96 .debug_struct("Response")
97 .field("id", &resp.id)
98 .field("error", &resp.error)
104 Event::Lsp(it) => fmt::Debug::fmt(it, f),
105 Event::Task(it) => fmt::Debug::fmt(it, f),
106 Event::Vfs(it) => fmt::Debug::fmt(it, f),
107 Event::Flycheck(it) => fmt::Debug::fmt(it, f),
113 fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
114 if self.config.linked_projects().is_empty()
115 && self.config.detached_files().is_empty()
116 && self.config.notifications().cargo_toml_not_found
118 self.show_and_log_error("rust-analyzer failed to discover workspace".to_string(), None);
121 if self.config.did_save_text_document_dynamic_registration() {
122 let save_registration_options = lsp_types::TextDocumentSaveRegistrationOptions {
123 include_text: Some(false),
124 text_document_registration_options: lsp_types::TextDocumentRegistrationOptions {
125 document_selector: Some(vec![
126 lsp_types::DocumentFilter {
129 pattern: Some("**/*.rs".into()),
131 lsp_types::DocumentFilter {
134 pattern: Some("**/Cargo.toml".into()),
136 lsp_types::DocumentFilter {
139 pattern: Some("**/Cargo.lock".into()),
145 let registration = lsp_types::Registration {
146 id: "textDocument/didSave".to_string(),
147 method: "textDocument/didSave".to_string(),
148 register_options: Some(serde_json::to_value(save_registration_options).unwrap()),
150 self.send_request::<lsp_types::request::RegisterCapability>(
151 lsp_types::RegistrationParams { registrations: vec![registration] },
156 self.fetch_workspaces_queue.request_op("startup".to_string());
157 if let Some(cause) = self.fetch_workspaces_queue.should_start_op() {
158 self.fetch_workspaces(cause);
161 while let Some(event) = self.next_event(&inbox) {
162 if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
163 if not.method == lsp_types::notification::Exit::METHOD {
167 self.handle_event(event)?
170 Err("client exited without proper shutdown sequence".into())
173 fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
175 recv(inbox) -> msg =>
176 msg.ok().map(Event::Lsp),
178 recv(self.task_pool.receiver) -> task =>
179 Some(Event::Task(task.unwrap())),
181 recv(self.loader.receiver) -> task =>
182 Some(Event::Vfs(task.unwrap())),
184 recv(self.flycheck_receiver) -> task =>
185 Some(Event::Flycheck(task.unwrap())),
189 fn handle_event(&mut self, event: Event) -> Result<()> {
190 let loop_start = Instant::now();
191 // NOTE: don't count blocking select! call as a loop-turn time
192 let _p = profile::span("GlobalState::handle_event");
194 tracing::debug!("handle_event({:?})", event);
195 let task_queue_len = self.task_pool.handle.len();
196 if task_queue_len > 0 {
197 tracing::info!("task queue len: {}", task_queue_len);
200 let was_quiescent = self.is_quiescent();
202 Event::Lsp(msg) => match msg {
203 lsp_server::Message::Request(req) => self.on_new_request(loop_start, req),
204 lsp_server::Message::Notification(not) => {
205 self.on_notification(not)?;
207 lsp_server::Message::Response(resp) => self.complete_request(resp),
209 Event::Task(task) => {
210 let _p = profile::span("GlobalState::handle_event/task");
211 let mut prime_caches_progress = Vec::new();
213 self.handle_task(&mut prime_caches_progress, task);
214 // Coalesce multiple task events into one loop turn
215 while let Ok(task) = self.task_pool.receiver.try_recv() {
216 self.handle_task(&mut prime_caches_progress, task);
219 for progress in prime_caches_progress {
220 let (state, message, fraction);
222 PrimeCachesProgress::Begin => {
223 state = Progress::Begin;
227 PrimeCachesProgress::Report(report) => {
228 state = Progress::Report;
230 message = match &report.crates_currently_indexing[..] {
231 [crate_name] => Some(format!(
233 report.crates_done, report.crates_total, crate_name
235 [crate_name, rest @ ..] => Some(format!(
236 "{}/{} ({} + {} more)",
245 fraction = Progress::fraction(report.crates_done, report.crates_total);
247 PrimeCachesProgress::End { cancelled } => {
248 state = Progress::End;
252 self.prime_caches_queue.op_completed(());
254 self.prime_caches_queue
255 .request_op("restart after cancellation".to_string());
260 self.report_progress("Indexing", state, message, Some(fraction));
263 Event::Vfs(message) => {
264 let _p = profile::span("GlobalState::handle_event/vfs");
265 self.handle_vfs_msg(message);
266 // Coalesce many VFS event into a single loop turn
267 while let Ok(message) = self.loader.receiver.try_recv() {
268 self.handle_vfs_msg(message);
271 Event::Flycheck(message) => {
272 let _p = profile::span("GlobalState::handle_event/flycheck");
273 self.handle_flycheck_msg(message);
274 // Coalesce many flycheck updates into a single loop turn
275 while let Ok(message) = self.flycheck_receiver.try_recv() {
276 self.handle_flycheck_msg(message);
281 let state_changed = self.process_changes();
282 let memdocs_added_or_removed = self.mem_docs.take_changes();
284 if self.is_quiescent() {
285 let became_quiescent = !(was_quiescent
286 || self.fetch_workspaces_queue.op_requested()
287 || self.fetch_build_data_queue.op_requested());
289 if became_quiescent {
290 // Project has loaded properly, kick off initial flycheck
291 self.flycheck.iter().for_each(FlycheckHandle::restart);
292 if self.config.prefill_caches() {
293 self.prime_caches_queue.request_op("became quiescent".to_string());
297 if !was_quiescent || state_changed {
298 // Refresh semantic tokens if the client supports it.
299 if self.config.semantic_tokens_refresh() {
300 self.semantic_tokens_cache.lock().clear();
301 self.send_request::<lsp_types::request::SemanticTokensRefresh>((), |_, _| ());
304 // Refresh code lens if the client supports it.
305 if self.config.code_lens_refresh() {
306 self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
310 if !was_quiescent || state_changed || memdocs_added_or_removed {
311 if self.config.publish_diagnostics() {
312 self.update_diagnostics()
317 if let Some(diagnostic_changes) = self.diagnostics.take_changes() {
318 for file_id in diagnostic_changes {
319 let db = self.analysis_host.raw_database();
320 let source_root = db.file_source_root(file_id);
321 if db.source_root(source_root).is_library {
322 // Only publish diagnostics for files in the workspace, not from crates.io deps
324 // While theoretically these should never have errors, we have quite a few false
325 // positives particularly in the stdlib, and those diagnostics would stay around
326 // forever if we emitted them here.
330 let uri = file_id_to_url(&self.vfs.read().0, file_id);
331 let mut diagnostics =
332 self.diagnostics.diagnostics_for(file_id).cloned().collect::<Vec<_>>();
334 // VSCode assumes diagnostic messages to be non-empty strings, so we need to patch
335 // empty diagnostics. Neither the docs of VSCode nor the LSP spec say whether
336 // diagnostic messages are actually allowed to be empty or not and patching this
337 // in the VSCode client does not work as the assertion happens in the protocol
338 // conversion. So this hack is here to stay, and will be considered a hack
339 // until the LSP decides to state that empty messages are allowed.
341 // See https://github.com/rust-lang/rust-analyzer/issues/11404
342 // See https://github.com/rust-lang/rust-analyzer/issues/13130
343 let patch_empty = |message: &mut String| {
344 if message.is_empty() {
345 *message = " ".to_string();
349 for d in &mut diagnostics {
350 patch_empty(&mut d.message);
351 if let Some(dri) = &mut d.related_information {
353 patch_empty(&mut dri.message);
358 let version = from_proto::vfs_path(&uri)
359 .map(|path| self.mem_docs.get(&path).map(|it| it.version))
360 .unwrap_or_default();
362 self.send_notification::<lsp_types::notification::PublishDiagnostics>(
363 lsp_types::PublishDiagnosticsParams { uri, diagnostics, version },
368 if self.config.cargo_autoreload() {
369 if let Some(cause) = self.fetch_workspaces_queue.should_start_op() {
370 self.fetch_workspaces(cause);
374 if !self.fetch_workspaces_queue.op_in_progress() {
375 if let Some(cause) = self.fetch_build_data_queue.should_start_op() {
376 self.fetch_build_data(cause);
380 if let Some(cause) = self.prime_caches_queue.should_start_op() {
381 tracing::debug!(%cause, "will prime caches");
382 let num_worker_threads = self.config.prime_caches_num_threads();
384 self.task_pool.handle.spawn_with_sender({
385 let analysis = self.snapshot().analysis;
387 sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
388 let res = analysis.parallel_prime_caches(num_worker_threads, |progress| {
389 let report = PrimeCachesProgress::Report(progress);
390 sender.send(Task::PrimeCaches(report)).unwrap();
393 .send(Task::PrimeCaches(PrimeCachesProgress::End {
394 cancelled: res.is_err(),
401 let status = self.current_status();
402 if self.last_reported_status.as_ref() != Some(&status) {
403 self.last_reported_status = Some(status.clone());
405 if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
406 self.show_message(lsp_types::MessageType::ERROR, message.clone());
409 if self.config.server_status_notification() {
410 self.send_notification::<lsp_ext::ServerStatusNotification>(status);
414 let loop_duration = loop_start.elapsed();
415 if loop_duration > Duration::from_millis(100) && was_quiescent {
416 tracing::warn!("overly long loop turn: {:?}", loop_duration);
417 self.poke_rust_analyzer_developer(format!(
418 "overly long loop turn: {:?}",
425 fn handle_task(&mut self, prime_caches_progress: &mut Vec<PrimeCachesProgress>, task: Task) {
427 Task::Response(response) => self.respond(response),
428 Task::Retry(req) => self.on_request(req),
429 Task::Diagnostics(diagnostics_per_file) => {
430 for (file_id, diagnostics) in diagnostics_per_file {
431 self.diagnostics.set_native_diagnostics(file_id, diagnostics)
434 Task::PrimeCaches(progress) => match progress {
435 PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
436 PrimeCachesProgress::Report(_) => {
437 match prime_caches_progress.last_mut() {
438 Some(last @ PrimeCachesProgress::Report(_)) => {
439 // Coalesce subsequent update events.
442 _ => prime_caches_progress.push(progress),
445 PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
447 Task::FetchWorkspace(progress) => {
448 let (state, msg) = match progress {
449 ProjectWorkspaceProgress::Begin => (Progress::Begin, None),
450 ProjectWorkspaceProgress::Report(msg) => (Progress::Report, Some(msg)),
451 ProjectWorkspaceProgress::End(workspaces) => {
452 self.fetch_workspaces_queue.op_completed(workspaces);
454 let old = Arc::clone(&self.workspaces);
455 self.switch_workspaces("fetched workspace".to_string());
456 let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
458 if self.config.run_build_scripts() && workspaces_updated {
459 self.fetch_build_data_queue.request_op(format!("workspace updated"));
462 (Progress::End, None)
466 self.report_progress("Fetching", state, msg, None);
468 Task::FetchBuildData(progress) => {
469 let (state, msg) = match progress {
470 BuildDataProgress::Begin => (Some(Progress::Begin), None),
471 BuildDataProgress::Report(msg) => (Some(Progress::Report), Some(msg)),
472 BuildDataProgress::End(build_data_result) => {
473 self.fetch_build_data_queue.op_completed(build_data_result);
475 self.switch_workspaces("fetched build data".to_string());
477 (Some(Progress::End), None)
481 if let Some(state) = state {
482 self.report_progress("Loading", state, msg, None);
488 fn handle_vfs_msg(&mut self, message: vfs::loader::Message) {
490 vfs::loader::Message::Loaded { files } => {
491 let vfs = &mut self.vfs.write().0;
492 for (path, contents) in files {
493 let path = VfsPath::from(path);
494 if !self.mem_docs.contains(&path) {
495 vfs.set_file_contents(path, contents);
499 vfs::loader::Message::Progress { n_total, n_done, config_version } => {
500 always!(config_version <= self.vfs_config_version);
502 self.vfs_progress_config_version = config_version;
503 self.vfs_progress_n_total = n_total;
504 self.vfs_progress_n_done = n_done;
506 let state = if n_done == 0 {
508 } else if n_done < n_total {
511 assert_eq!(n_done, n_total);
514 self.report_progress(
517 Some(format!("{}/{}", n_done, n_total)),
518 Some(Progress::fraction(n_done, n_total)),
524 fn handle_flycheck_msg(&mut self, message: flycheck::Message) {
526 flycheck::Message::AddDiagnostic { id, workspace_root, diagnostic } => {
527 let snap = self.snapshot();
528 let diagnostics = crate::diagnostics::to_proto::map_rust_diagnostic_to_lsp(
529 &self.config.diagnostics_map(),
534 for diag in diagnostics {
535 match url_to_file_id(&self.vfs.read().0, &diag.url) {
536 Ok(file_id) => self.diagnostics.add_check_diagnostic(
543 tracing::error!("File with cargo diagnostic not found in VFS: {}", err);
549 flycheck::Message::Progress { id, progress } => {
550 let (state, message) = match progress {
551 flycheck::Progress::DidStart => {
552 self.diagnostics.clear_check(id);
553 (Progress::Begin, None)
555 flycheck::Progress::DidCheckCrate(target) => (Progress::Report, Some(target)),
556 flycheck::Progress::DidCancel => (Progress::End, None),
557 flycheck::Progress::DidFailToRestart(err) => {
558 self.show_and_log_error(
559 "cargo check failed".to_string(),
560 Some(err.to_string()),
564 flycheck::Progress::DidFinish(result) => {
565 if let Err(err) = result {
566 self.show_and_log_error(
567 "cargo check failed".to_string(),
568 Some(err.to_string()),
571 (Progress::End, None)
575 // When we're running multiple flychecks, we have to include a disambiguator in
576 // the title, or the editor complains. Note that this is a user-facing string.
577 let title = if self.flycheck.len() == 1 {
578 match self.config.flycheck() {
579 Some(config) => format!("{}", config),
580 None => "cargo check".to_string(),
583 format!("cargo check (#{})", id + 1)
585 self.report_progress(&title, state, message, None);
590 /// Registers and handles a request. This should only be called once per incoming request.
591 fn on_new_request(&mut self, request_received: Instant, req: Request) {
592 self.register_request(&req, request_received);
593 self.on_request(req);
596 /// Handles a request.
597 fn on_request(&mut self, req: Request) {
598 if self.shutdown_requested {
599 self.respond(lsp_server::Response::new_err(
601 lsp_server::ErrorCode::InvalidRequest as i32,
602 "Shutdown already requested.".to_owned(),
607 // Avoid flashing a bunch of unresolved references during initial load.
608 if self.workspaces.is_empty() && !self.is_quiescent() {
609 self.respond(lsp_server::Response::new_err(
611 lsp_server::ErrorCode::ContentModified as i32,
612 "waiting for cargo metadata or cargo check".to_owned(),
617 RequestDispatcher { req: Some(req), global_state: self }
618 .on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
619 s.shutdown_requested = true;
622 .on_sync_mut::<lsp_ext::ReloadWorkspace>(handlers::handle_workspace_reload)
623 .on_sync_mut::<lsp_ext::MemoryUsage>(handlers::handle_memory_usage)
624 .on_sync_mut::<lsp_ext::ShuffleCrateGraph>(handlers::handle_shuffle_crate_graph)
625 .on_sync_mut::<lsp_ext::CancelFlycheck>(handlers::handle_cancel_flycheck)
626 .on_sync::<lsp_ext::JoinLines>(handlers::handle_join_lines)
627 .on_sync::<lsp_ext::OnEnter>(handlers::handle_on_enter)
628 .on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)
629 .on_sync::<lsp_ext::MatchingBrace>(handlers::handle_matching_brace)
630 .on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)
631 .on::<lsp_ext::SyntaxTree>(handlers::handle_syntax_tree)
632 .on::<lsp_ext::ViewHir>(handlers::handle_view_hir)
633 .on::<lsp_ext::ViewFileText>(handlers::handle_view_file_text)
634 .on::<lsp_ext::ViewCrateGraph>(handlers::handle_view_crate_graph)
635 .on::<lsp_ext::ViewItemTree>(handlers::handle_view_item_tree)
636 .on::<lsp_ext::ExpandMacro>(handlers::handle_expand_macro)
637 .on::<lsp_ext::ParentModule>(handlers::handle_parent_module)
638 .on::<lsp_ext::Runnables>(handlers::handle_runnables)
639 .on::<lsp_ext::RelatedTests>(handlers::handle_related_tests)
640 .on::<lsp_ext::CodeActionRequest>(handlers::handle_code_action)
641 .on::<lsp_ext::CodeActionResolveRequest>(handlers::handle_code_action_resolve)
642 .on::<lsp_ext::HoverRequest>(handlers::handle_hover)
643 .on::<lsp_ext::ExternalDocs>(handlers::handle_open_docs)
644 .on::<lsp_ext::OpenCargoToml>(handlers::handle_open_cargo_toml)
645 .on::<lsp_ext::MoveItem>(handlers::handle_move_item)
646 .on::<lsp_ext::WorkspaceSymbol>(handlers::handle_workspace_symbol)
647 .on::<lsp_ext::OnTypeFormatting>(handlers::handle_on_type_formatting)
648 .on::<lsp_types::request::DocumentSymbolRequest>(handlers::handle_document_symbol)
649 .on::<lsp_types::request::GotoDefinition>(handlers::handle_goto_definition)
650 .on::<lsp_types::request::GotoDeclaration>(handlers::handle_goto_declaration)
651 .on::<lsp_types::request::GotoImplementation>(handlers::handle_goto_implementation)
652 .on::<lsp_types::request::GotoTypeDefinition>(handlers::handle_goto_type_definition)
653 .on::<lsp_types::request::InlayHintRequest>(handlers::handle_inlay_hints)
654 .on::<lsp_types::request::InlayHintResolveRequest>(handlers::handle_inlay_hints_resolve)
655 .on::<lsp_types::request::Completion>(handlers::handle_completion)
656 .on::<lsp_types::request::ResolveCompletionItem>(handlers::handle_completion_resolve)
657 .on::<lsp_types::request::CodeLensRequest>(handlers::handle_code_lens)
658 .on::<lsp_types::request::CodeLensResolve>(handlers::handle_code_lens_resolve)
659 .on::<lsp_types::request::FoldingRangeRequest>(handlers::handle_folding_range)
660 .on::<lsp_types::request::SignatureHelpRequest>(handlers::handle_signature_help)
661 .on::<lsp_types::request::PrepareRenameRequest>(handlers::handle_prepare_rename)
662 .on::<lsp_types::request::Rename>(handlers::handle_rename)
663 .on::<lsp_types::request::References>(handlers::handle_references)
664 .on::<lsp_types::request::Formatting>(handlers::handle_formatting)
665 .on::<lsp_types::request::RangeFormatting>(handlers::handle_range_formatting)
666 .on::<lsp_types::request::DocumentHighlightRequest>(handlers::handle_document_highlight)
667 .on::<lsp_types::request::CallHierarchyPrepare>(handlers::handle_call_hierarchy_prepare)
668 .on::<lsp_types::request::CallHierarchyIncomingCalls>(
669 handlers::handle_call_hierarchy_incoming,
671 .on::<lsp_types::request::CallHierarchyOutgoingCalls>(
672 handlers::handle_call_hierarchy_outgoing,
674 .on::<lsp_types::request::SemanticTokensFullRequest>(
675 handlers::handle_semantic_tokens_full,
677 .on::<lsp_types::request::SemanticTokensFullDeltaRequest>(
678 handlers::handle_semantic_tokens_full_delta,
680 .on::<lsp_types::request::SemanticTokensRangeRequest>(
681 handlers::handle_semantic_tokens_range,
683 .on::<lsp_types::request::WillRenameFiles>(handlers::handle_will_rename_files)
684 .on::<lsp_ext::Ssr>(handlers::handle_ssr)
688 /// Handles an incoming notification.
689 fn on_notification(&mut self, not: Notification) -> Result<()> {
690 NotificationDispatcher { not: Some(not), global_state: self }
691 .on::<lsp_types::notification::Cancel>(|this, params| {
692 let id: lsp_server::RequestId = match params.id {
693 lsp_types::NumberOrString::Number(id) => id.into(),
694 lsp_types::NumberOrString::String(id) => id.into(),
699 .on::<lsp_types::notification::WorkDoneProgressCancel>(|_this, _params| {
700 // Just ignore this. It is OK to continue sending progress
701 // notifications for this token, as the client can't know when
702 // we accepted notification.
705 .on::<lsp_types::notification::DidOpenTextDocument>(|this, params| {
706 if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
707 let already_exists = this
709 .insert(path.clone(), DocumentData::new(params.text_document.version))
712 tracing::error!("duplicate DidOpenTextDocument: {}", path)
717 .set_file_contents(path, Some(params.text_document.text.into_bytes()));
721 .on::<lsp_types::notification::DidChangeTextDocument>(|this, params| {
722 if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
723 match this.mem_docs.get_mut(&path) {
725 // The version passed in DidChangeTextDocument is the version after all edits are applied
726 // so we should apply it before the vfs is notified.
727 doc.version = params.text_document.version;
730 tracing::error!("unexpected DidChangeTextDocument: {}", path);
735 let vfs = &mut this.vfs.write().0;
736 let file_id = vfs.file_id(&path).unwrap();
737 let mut text = String::from_utf8(vfs.file_contents(file_id).to_vec()).unwrap();
738 apply_document_changes(&mut text, params.content_changes);
740 vfs.set_file_contents(path, Some(text.into_bytes()));
744 .on::<lsp_types::notification::DidCloseTextDocument>(|this, params| {
745 if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
746 if this.mem_docs.remove(&path).is_err() {
747 tracing::error!("orphan DidCloseTextDocument: {}", path);
750 this.semantic_tokens_cache.lock().remove(¶ms.text_document.uri);
752 if let Some(path) = path.as_path() {
753 this.loader.handle.invalidate(path.to_path_buf());
758 .on::<lsp_types::notification::DidSaveTextDocument>(|this, params| {
759 let mut updated = false;
760 if let Ok(vfs_path) = from_proto::vfs_path(¶ms.text_document.uri) {
761 let (vfs, _) = &*this.vfs.read();
763 // Trigger flychecks for all workspaces that depend on the saved file
764 if let Some(file_id) = vfs.file_id(&vfs_path) {
765 let analysis = this.analysis_host.analysis();
766 // Crates containing or depending on the saved file
767 let crate_ids: Vec<_> = analysis
774 .transitive_rev_deps(id)
780 let crate_root_paths: Vec<_> = crate_ids
782 .filter_map(|&crate_id| {
784 .crate_root(crate_id)
786 vfs.file_path(file_id).as_path().map(ToOwned::to_owned)
790 .collect::<ide::Cancellable<_>>()?;
791 let crate_root_paths: Vec<_> =
792 crate_root_paths.iter().map(Deref::deref).collect();
794 // Find all workspaces that have at least one target containing the saved file
796 this.workspaces.iter().enumerate().filter(|(_, ws)| match ws {
797 project_model::ProjectWorkspace::Cargo { cargo, .. } => {
798 cargo.packages().any(|pkg| {
799 cargo[pkg].targets.iter().any(|&it| {
800 crate_root_paths.contains(&cargo[it].root.as_path())
804 project_model::ProjectWorkspace::Json { project, .. } => project
806 .any(|(c, _)| crate_ids.iter().any(|&crate_id| crate_id == c)),
807 project_model::ProjectWorkspace::DetachedFiles { .. } => false,
810 // Find and trigger corresponding flychecks
811 for flycheck in &this.flycheck {
812 for (id, _) in workspace_ids.clone() {
813 if id == flycheck.id() {
822 // Re-fetch workspaces if a workspace related file has changed
823 if let Some(abs_path) = vfs_path.as_path() {
824 if reload::should_refresh_for_change(&abs_path, ChangeKind::Modify) {
825 this.fetch_workspaces_queue
826 .request_op(format!("DidSaveTextDocument {}", abs_path.display()));
831 // No specific flycheck was triggered, so let's trigger all of them.
833 for flycheck in &this.flycheck {
839 .on::<lsp_types::notification::DidChangeConfiguration>(|this, _params| {
840 // As stated in https://github.com/microsoft/language-server-protocol/issues/676,
841 // this notification's parameters should be ignored and the actual config queried separately.
842 this.send_request::<lsp_types::request::WorkspaceConfiguration>(
843 lsp_types::ConfigurationParams {
844 items: vec![lsp_types::ConfigurationItem {
846 section: Some("rust-analyzer".to_string()),
850 tracing::debug!("config update response: '{:?}", resp);
851 let lsp_server::Response { error, result, .. } = resp;
853 match (error, result) {
855 tracing::error!("failed to fetch the server settings: {:?}", err)
857 (None, Some(mut configs)) => {
858 if let Some(json) = configs.get_mut(0) {
859 // Note that json can be null according to the spec if the client can't
860 // provide a configuration. This is handled in Config::update below.
861 let mut config = Config::clone(&*this.config);
862 if let Err(error) = config.update(json.take()) {
864 lsp_types::MessageType::WARNING,
868 this.update_configuration(config);
871 (None, None) => tracing::error!(
872 "received empty server settings response from the client"
880 .on::<lsp_types::notification::DidChangeWatchedFiles>(|this, params| {
881 for change in params.changes {
882 if let Ok(path) = from_proto::abs_path(&change.uri) {
883 this.loader.handle.invalidate(path);
892 fn update_diagnostics(&mut self) {
893 let subscriptions = self
896 .map(|path| self.vfs.read().0.file_id(path).unwrap())
897 .collect::<Vec<_>>();
899 tracing::trace!("updating notifications for {:?}", subscriptions);
901 let snapshot = self.snapshot();
902 self.task_pool.handle.spawn(move || {
903 let diagnostics = subscriptions
905 .filter_map(|file_id| {
906 handlers::publish_diagnostics(&snapshot, file_id)
908 .map(|diags| (file_id, diags))
910 .collect::<Vec<_>>();
911 Task::Diagnostics(diagnostics)