1 //! The main loop of `rust-analyzer` responsible for dispatching LSP
2 //! requests/replies and notifications back to the client.
6 time::{Duration, Instant},
9 use always_assert::always;
10 use crossbeam_channel::{select, Receiver};
11 use ide::{FileId, PrimeCachesProgress};
12 use ide_db::base_db::{SourceDatabaseExt, VfsPath};
13 use lsp_server::{Connection, Notification, Request};
14 use lsp_types::notification::Notification as _;
19 dispatch::{NotificationDispatcher, RequestDispatcher},
21 global_state::{file_id_to_url, url_to_file_id, GlobalState},
23 lsp_utils::{apply_document_changes, is_cancelled, notification_is, Progress},
24 mem_docs::DocumentData,
25 reload::{BuildDataProgress, ProjectWorkspaceProgress},
29 pub fn main_loop(config: Config, connection: Connection) -> Result<()> {
30 log::info!("initial config: {:#?}", config);
32 // Windows scheduler implements priority boosts: if thread waits for an
33 // event (like a condvar), and event fires, priority of the thread is
34 // temporary bumped. This optimization backfires in our case: each time the
35 // `main_loop` schedules a task to run on a threadpool, the worker threads
36 // gets a higher priority, and (on a machine with fewer cores) displaces the
37 // main loop! We work-around this by marking the main loop as a
38 // higher-priority thread.
40 // https://docs.microsoft.com/en-us/windows/win32/procthread/scheduling-priorities
41 // https://docs.microsoft.com/en-us/windows/win32/procthread/priority-boosts
42 // https://github.com/rust-analyzer/rust-analyzer/issues/2835
45 use winapi::um::processthreadsapi::*;
46 let thread = GetCurrentThread();
47 let thread_priority_above_normal = 1;
48 SetThreadPriority(thread, thread_priority_above_normal);
51 GlobalState::new(connection.sender, config).run(connection.receiver)
55 Lsp(lsp_server::Message),
57 Vfs(vfs::loader::Message),
58 Flycheck(flycheck::Message),
62 pub(crate) enum Task {
63 Response(lsp_server::Response),
64 Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
65 PrimeCaches(PrimeCachesProgress),
66 FetchWorkspace(ProjectWorkspaceProgress),
67 FetchBuildData(BuildDataProgress),
70 impl fmt::Debug for Event {
71 fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
72 let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
73 f.debug_struct("Notification").field("method", ¬.method).finish()
77 Event::Lsp(lsp_server::Message::Notification(not)) => {
78 if notification_is::<lsp_types::notification::DidOpenTextDocument>(not)
79 || notification_is::<lsp_types::notification::DidChangeTextDocument>(not)
81 return debug_verbose_not(not, f);
84 Event::Task(Task::Response(resp)) => {
86 .debug_struct("Response")
87 .field("id", &resp.id)
88 .field("error", &resp.error)
94 Event::Lsp(it) => fmt::Debug::fmt(it, f),
95 Event::Task(it) => fmt::Debug::fmt(it, f),
96 Event::Vfs(it) => fmt::Debug::fmt(it, f),
97 Event::Flycheck(it) => fmt::Debug::fmt(it, f),
103 fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
104 if self.config.linked_projects().is_empty()
105 && self.config.detached_files().is_empty()
106 && self.config.notifications().cargo_toml_not_found
109 lsp_types::MessageType::Error,
110 "rust-analyzer failed to discover workspace".to_string(),
114 if self.config.did_save_text_document_dynamic_registration() {
115 let save_registration_options = lsp_types::TextDocumentSaveRegistrationOptions {
116 include_text: Some(false),
117 text_document_registration_options: lsp_types::TextDocumentRegistrationOptions {
118 document_selector: Some(vec![
119 lsp_types::DocumentFilter {
122 pattern: Some("**/*.rs".into()),
124 lsp_types::DocumentFilter {
127 pattern: Some("**/Cargo.toml".into()),
129 lsp_types::DocumentFilter {
132 pattern: Some("**/Cargo.lock".into()),
138 let registration = lsp_types::Registration {
139 id: "textDocument/didSave".to_string(),
140 method: "textDocument/didSave".to_string(),
141 register_options: Some(serde_json::to_value(save_registration_options).unwrap()),
143 self.send_request::<lsp_types::request::RegisterCapability>(
144 lsp_types::RegistrationParams { registrations: vec![registration] },
149 self.fetch_workspaces_request();
150 self.fetch_workspaces_if_needed();
152 while let Some(event) = self.next_event(&inbox) {
153 if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
154 if not.method == lsp_types::notification::Exit::METHOD {
158 self.handle_event(event)?
161 Err("client exited without proper shutdown sequence")?
164 fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
166 recv(inbox) -> msg =>
167 msg.ok().map(Event::Lsp),
169 recv(self.task_pool.receiver) -> task =>
170 Some(Event::Task(task.unwrap())),
172 recv(self.loader.receiver) -> task =>
173 Some(Event::Vfs(task.unwrap())),
175 recv(self.flycheck_receiver) -> task =>
176 Some(Event::Flycheck(task.unwrap())),
180 fn handle_event(&mut self, event: Event) -> Result<()> {
181 let loop_start = Instant::now();
182 // NOTE: don't count blocking select! call as a loop-turn time
183 let _p = profile::span("GlobalState::handle_event");
185 log::info!("handle_event({:?})", event);
186 let task_queue_len = self.task_pool.handle.len();
187 if task_queue_len > 0 {
188 log::info!("task queue len: {}", task_queue_len);
191 let was_quiescent = self.is_quiescent();
193 Event::Lsp(msg) => match msg {
194 lsp_server::Message::Request(req) => self.on_request(loop_start, req)?,
195 lsp_server::Message::Notification(not) => {
196 self.on_notification(not)?;
198 lsp_server::Message::Response(resp) => self.complete_request(resp),
200 Event::Task(mut task) => {
201 let _p = profile::span("GlobalState::handle_event/task");
202 let mut prime_caches_progress = Vec::new();
205 Task::Response(response) => self.respond(response),
206 Task::Diagnostics(diagnostics_per_file) => {
207 for (file_id, diagnostics) in diagnostics_per_file {
208 self.diagnostics.set_native_diagnostics(file_id, diagnostics)
211 Task::PrimeCaches(progress) => match progress {
212 PrimeCachesProgress::Started => prime_caches_progress.push(progress),
213 PrimeCachesProgress::StartedOnCrate { .. } => {
214 match prime_caches_progress.last_mut() {
215 Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => {
216 // Coalesce subsequent update events.
219 _ => prime_caches_progress.push(progress),
222 PrimeCachesProgress::Finished => prime_caches_progress.push(progress),
224 Task::FetchWorkspace(progress) => {
225 let (state, msg) = match progress {
226 ProjectWorkspaceProgress::Begin => (Progress::Begin, None),
227 ProjectWorkspaceProgress::Report(msg) => {
228 (Progress::Report, Some(msg))
230 ProjectWorkspaceProgress::End(workspaces) => {
231 self.fetch_workspaces_completed(workspaces);
233 let old = Arc::clone(&self.workspaces);
234 self.switch_workspaces();
235 let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
237 if self.config.run_build_scripts() && workspaces_updated {
238 self.fetch_build_data_request()
241 (Progress::End, None)
245 self.report_progress("Fetching", state, msg, None);
247 Task::FetchBuildData(progress) => {
248 let (state, msg) = match progress {
249 BuildDataProgress::Begin => (Some(Progress::Begin), None),
250 BuildDataProgress::Report(msg) => {
251 (Some(Progress::Report), Some(msg))
253 BuildDataProgress::End(build_data_result) => {
254 self.fetch_build_data_completed(build_data_result);
256 self.switch_workspaces();
258 (Some(Progress::End), None)
262 if let Some(state) = state {
263 self.report_progress("Loading", state, msg, None);
268 // Coalesce multiple task events into one loop turn
269 task = match self.task_pool.receiver.try_recv() {
275 for progress in prime_caches_progress {
276 let (state, message, fraction);
278 PrimeCachesProgress::Started => {
279 state = Progress::Begin;
283 PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => {
284 state = Progress::Report;
285 message = Some(format!("{}/{} ({})", n_done, n_total, on_crate));
286 fraction = Progress::fraction(n_done, n_total);
288 PrimeCachesProgress::Finished => {
289 state = Progress::End;
293 self.prime_caches_queue.op_completed(());
297 self.report_progress("Indexing", state, message, Some(fraction));
300 Event::Vfs(mut task) => {
301 let _p = profile::span("GlobalState::handle_event/vfs");
304 vfs::loader::Message::Loaded { files } => {
305 let vfs = &mut self.vfs.write().0;
306 for (path, contents) in files {
307 let path = VfsPath::from(path);
308 if !self.mem_docs.contains(&path) {
309 vfs.set_file_contents(path, contents);
313 vfs::loader::Message::Progress { n_total, n_done, config_version } => {
314 always!(config_version <= self.vfs_config_version);
316 self.vfs_progress_config_version = config_version;
317 self.vfs_progress_n_total = n_total;
318 self.vfs_progress_n_done = n_done;
320 let state = if n_done == 0 {
322 } else if n_done < n_total {
325 assert_eq!(n_done, n_total);
328 self.report_progress(
331 Some(format!("{}/{}", n_done, n_total)),
332 Some(Progress::fraction(n_done, n_total)),
336 // Coalesce many VFS event into a single loop turn
337 task = match self.loader.receiver.try_recv() {
343 Event::Flycheck(mut task) => {
344 let _p = profile::span("GlobalState::handle_event/flycheck");
347 flycheck::Message::AddDiagnostic { workspace_root, diagnostic } => {
349 crate::diagnostics::to_proto::map_rust_diagnostic_to_lsp(
350 &self.config.diagnostics_map(),
354 for diag in diagnostics {
355 match url_to_file_id(&self.vfs.read().0, &diag.url) {
356 Ok(file_id) => self.diagnostics.add_check_diagnostic(
363 "File with cargo diagnostic not found in VFS: {}",
371 flycheck::Message::Progress { id, progress } => {
372 let (state, message) = match progress {
373 flycheck::Progress::DidStart => {
374 self.diagnostics.clear_check();
375 (Progress::Begin, None)
377 flycheck::Progress::DidCheckCrate(target) => {
378 (Progress::Report, Some(target))
380 flycheck::Progress::DidCancel => (Progress::End, None),
381 flycheck::Progress::DidFinish(result) => {
382 if let Err(err) = result {
383 log::error!("cargo check failed: {}", err)
385 (Progress::End, None)
389 // When we're running multiple flychecks, we have to include a disambiguator in
390 // the title, or the editor complains. Note that this is a user-facing string.
391 let title = if self.flycheck.len() == 1 {
392 "cargo check".to_string()
394 format!("cargo check (#{})", id + 1)
396 self.report_progress(&title, state, message, None);
399 // Coalesce many flycheck updates into a single loop turn
400 task = match self.flycheck_receiver.try_recv() {
408 let state_changed = self.process_changes();
409 let memdocs_added_or_removed = self.mem_docs.take_changes();
411 if self.is_quiescent() {
413 for flycheck in &self.flycheck {
418 if !was_quiescent || state_changed {
419 // Ensure that only one cache priming task can run at a time
420 self.prime_caches_queue.request_op();
421 if self.prime_caches_queue.should_start_op() {
422 self.task_pool.handle.spawn_with_sender({
423 let snap = self.snapshot();
425 let cb = |progress| {
426 sender.send(Task::PrimeCaches(progress)).unwrap();
428 match snap.analysis.prime_caches(cb) {
430 Err(_canceled) => (),
436 // Refresh semantic tokens if the client supports it.
437 if self.config.semantic_tokens_refresh() {
438 self.semantic_tokens_cache.lock().clear();
439 self.send_request::<lsp_types::request::SemanticTokensRefesh>((), |_, _| ());
442 // Refresh code lens if the client supports it.
443 if self.config.code_lens_refresh() {
444 self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
448 if !was_quiescent || state_changed || memdocs_added_or_removed {
449 if self.config.publish_diagnostics() {
450 self.update_diagnostics()
455 if let Some(diagnostic_changes) = self.diagnostics.take_changes() {
456 for file_id in diagnostic_changes {
457 let db = self.analysis_host.raw_database();
458 let source_root = db.file_source_root(file_id);
459 if db.source_root(source_root).is_library {
460 // Only publish diagnostics for files in the workspace, not from crates.io deps
462 // While theoretically these should never have errors, we have quite a few false
463 // positives particularly in the stdlib, and those diagnostics would stay around
464 // forever if we emitted them here.
468 let url = file_id_to_url(&self.vfs.read().0, file_id);
469 let diagnostics = self.diagnostics.diagnostics_for(file_id).cloned().collect();
470 let version = from_proto::vfs_path(&url)
471 .map(|path| self.mem_docs.get(&path).map(|it| it.version))
472 .unwrap_or_default();
474 self.send_notification::<lsp_types::notification::PublishDiagnostics>(
475 lsp_types::PublishDiagnosticsParams { uri: url, diagnostics, version },
480 if self.config.cargo_autoreload() {
481 self.fetch_workspaces_if_needed();
483 self.fetch_build_data_if_needed();
485 self.report_new_status_if_needed();
487 let loop_duration = loop_start.elapsed();
488 if loop_duration > Duration::from_millis(100) {
489 log::warn!("overly long loop turn: {:?}", loop_duration);
490 if env::var("RA_PROFILE").is_ok() {
492 lsp_types::MessageType::Error,
493 format!("overly long loop turn: {:?}", loop_duration),
500 fn on_request(&mut self, request_received: Instant, req: Request) -> Result<()> {
501 self.register_request(&req, request_received);
503 if self.shutdown_requested {
504 self.respond(lsp_server::Response::new_err(
506 lsp_server::ErrorCode::InvalidRequest as i32,
507 "Shutdown already requested.".to_owned(),
513 // Avoid flashing a bunch of unresolved references during initial load.
514 if self.workspaces.is_empty() && !self.is_quiescent() {
515 self.respond(lsp_server::Response::new_err(
517 // FIXME: i32 should impl From<ErrorCode> (from() guarantees lossless conversion)
518 lsp_server::ErrorCode::ContentModified as i32,
519 "waiting for cargo metadata or cargo check".to_owned(),
524 RequestDispatcher { req: Some(req), global_state: self }
525 .on_sync::<lsp_ext::ReloadWorkspace>(|s, ()| {
526 s.fetch_workspaces_request();
527 s.fetch_workspaces_if_needed();
530 .on_sync::<lsp_ext::JoinLines>(|s, p| handlers::handle_join_lines(s.snapshot(), p))?
531 .on_sync::<lsp_ext::OnEnter>(|s, p| handlers::handle_on_enter(s.snapshot(), p))?
532 .on_sync::<lsp_types::request::Shutdown>(|s, ()| {
533 s.shutdown_requested = true;
536 .on_sync::<lsp_types::request::SelectionRangeRequest>(|s, p| {
537 handlers::handle_selection_range(s.snapshot(), p)
539 .on_sync::<lsp_ext::MatchingBrace>(|s, p| {
540 handlers::handle_matching_brace(s.snapshot(), p)
542 .on_sync::<lsp_ext::MemoryUsage>(|s, p| handlers::handle_memory_usage(s, p))?
543 .on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)
544 .on::<lsp_ext::SyntaxTree>(handlers::handle_syntax_tree)
545 .on::<lsp_ext::ViewHir>(handlers::handle_view_hir)
546 .on::<lsp_ext::ViewCrateGraph>(handlers::handle_view_crate_graph)
547 .on::<lsp_ext::ViewItemTree>(handlers::handle_view_item_tree)
548 .on::<lsp_ext::ExpandMacro>(handlers::handle_expand_macro)
549 .on::<lsp_ext::ParentModule>(handlers::handle_parent_module)
550 .on::<lsp_ext::Runnables>(handlers::handle_runnables)
551 .on::<lsp_ext::RelatedTests>(handlers::handle_related_tests)
552 .on::<lsp_ext::InlayHints>(handlers::handle_inlay_hints)
553 .on::<lsp_ext::CodeActionRequest>(handlers::handle_code_action)
554 .on::<lsp_ext::CodeActionResolveRequest>(handlers::handle_code_action_resolve)
555 .on::<lsp_ext::HoverRequest>(handlers::handle_hover)
556 .on::<lsp_ext::ExternalDocs>(handlers::handle_open_docs)
557 .on::<lsp_ext::OpenCargoToml>(handlers::handle_open_cargo_toml)
558 .on::<lsp_ext::MoveItem>(handlers::handle_move_item)
559 .on::<lsp_ext::WorkspaceSymbol>(handlers::handle_workspace_symbol)
560 .on::<lsp_types::request::OnTypeFormatting>(handlers::handle_on_type_formatting)
561 .on::<lsp_types::request::DocumentSymbolRequest>(handlers::handle_document_symbol)
562 .on::<lsp_types::request::GotoDefinition>(handlers::handle_goto_definition)
563 .on::<lsp_types::request::GotoDeclaration>(handlers::handle_goto_declaration)
564 .on::<lsp_types::request::GotoImplementation>(handlers::handle_goto_implementation)
565 .on::<lsp_types::request::GotoTypeDefinition>(handlers::handle_goto_type_definition)
566 .on::<lsp_types::request::Completion>(handlers::handle_completion)
567 .on::<lsp_types::request::ResolveCompletionItem>(handlers::handle_completion_resolve)
568 .on::<lsp_types::request::CodeLensRequest>(handlers::handle_code_lens)
569 .on::<lsp_types::request::CodeLensResolve>(handlers::handle_code_lens_resolve)
570 .on::<lsp_types::request::FoldingRangeRequest>(handlers::handle_folding_range)
571 .on::<lsp_types::request::SignatureHelpRequest>(handlers::handle_signature_help)
572 .on::<lsp_types::request::PrepareRenameRequest>(handlers::handle_prepare_rename)
573 .on::<lsp_types::request::Rename>(handlers::handle_rename)
574 .on::<lsp_types::request::References>(handlers::handle_references)
575 .on::<lsp_types::request::Formatting>(handlers::handle_formatting)
576 .on::<lsp_types::request::RangeFormatting>(handlers::handle_range_formatting)
577 .on::<lsp_types::request::DocumentHighlightRequest>(handlers::handle_document_highlight)
578 .on::<lsp_types::request::CallHierarchyPrepare>(handlers::handle_call_hierarchy_prepare)
579 .on::<lsp_types::request::CallHierarchyIncomingCalls>(
580 handlers::handle_call_hierarchy_incoming,
582 .on::<lsp_types::request::CallHierarchyOutgoingCalls>(
583 handlers::handle_call_hierarchy_outgoing,
585 .on::<lsp_types::request::SemanticTokensFullRequest>(
586 handlers::handle_semantic_tokens_full,
588 .on::<lsp_types::request::SemanticTokensFullDeltaRequest>(
589 handlers::handle_semantic_tokens_full_delta,
591 .on::<lsp_types::request::SemanticTokensRangeRequest>(
592 handlers::handle_semantic_tokens_range,
594 .on::<lsp_types::request::WillRenameFiles>(handlers::handle_will_rename_files)
595 .on::<lsp_ext::Ssr>(handlers::handle_ssr)
599 fn on_notification(&mut self, not: Notification) -> Result<()> {
600 NotificationDispatcher { not: Some(not), global_state: self }
601 .on::<lsp_types::notification::Cancel>(|this, params| {
602 let id: lsp_server::RequestId = match params.id {
603 lsp_types::NumberOrString::Number(id) => id.into(),
604 lsp_types::NumberOrString::String(id) => id.into(),
609 .on::<lsp_types::notification::WorkDoneProgressCancel>(|_this, _params| {
610 // Just ignore this. It is OK to continue sending progress
611 // notifications for this token, as the client can't know when
612 // we accepted notification.
615 .on::<lsp_types::notification::DidOpenTextDocument>(|this, params| {
616 if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
619 .insert(path.clone(), DocumentData::new(params.text_document.version))
622 log::error!("duplicate DidOpenTextDocument: {}", path)
627 .set_file_contents(path, Some(params.text_document.text.into_bytes()));
631 .on::<lsp_types::notification::DidChangeTextDocument>(|this, params| {
632 if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
633 match this.mem_docs.get_mut(&path) {
635 // The version passed in DidChangeTextDocument is the version after all edits are applied
636 // so we should apply it before the vfs is notified.
637 doc.version = params.text_document.version;
640 log::error!("expected DidChangeTextDocument: {}", path);
645 let vfs = &mut this.vfs.write().0;
646 let file_id = vfs.file_id(&path).unwrap();
647 let mut text = String::from_utf8(vfs.file_contents(file_id).to_vec()).unwrap();
648 apply_document_changes(&mut text, params.content_changes);
650 vfs.set_file_contents(path, Some(text.into_bytes()));
654 .on::<lsp_types::notification::DidCloseTextDocument>(|this, params| {
655 if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
656 if this.mem_docs.remove(&path).is_err() {
657 log::error!("orphan DidCloseTextDocument: {}", path);
660 this.semantic_tokens_cache.lock().remove(¶ms.text_document.uri);
662 if let Some(path) = path.as_path() {
663 this.loader.handle.invalidate(path.to_path_buf());
668 .on::<lsp_types::notification::DidSaveTextDocument>(|this, params| {
669 for flycheck in &this.flycheck {
672 if let Ok(abs_path) = from_proto::abs_path(¶ms.text_document.uri) {
673 this.maybe_refresh(&[(abs_path, ChangeKind::Modify)]);
677 .on::<lsp_types::notification::DidChangeConfiguration>(|this, _params| {
678 // As stated in https://github.com/microsoft/language-server-protocol/issues/676,
679 // this notification's parameters should be ignored and the actual config queried separately.
680 this.send_request::<lsp_types::request::WorkspaceConfiguration>(
681 lsp_types::ConfigurationParams {
682 items: vec![lsp_types::ConfigurationItem {
684 section: Some("rust-analyzer".to_string()),
688 log::debug!("config update response: '{:?}", resp);
689 let lsp_server::Response { error, result, .. } = resp;
691 match (error, result) {
693 log::error!("failed to fetch the server settings: {:?}", err)
695 (None, Some(mut configs)) => {
696 if let Some(json) = configs.get_mut(0) {
697 // Note that json can be null according to the spec if the client can't
698 // provide a configuration. This is handled in Config::update below.
699 let mut config = Config::clone(&*this.config);
700 config.update(json.take());
701 this.update_configuration(config);
704 (None, None) => log::error!(
705 "received empty server settings response from the client"
713 .on::<lsp_types::notification::DidChangeWatchedFiles>(|this, params| {
714 for change in params.changes {
715 if let Ok(path) = from_proto::abs_path(&change.uri) {
716 this.loader.handle.invalidate(path);
725 fn update_diagnostics(&mut self) {
726 let subscriptions = self
729 .map(|path| self.vfs.read().0.file_id(path).unwrap())
730 .collect::<Vec<_>>();
732 log::trace!("updating notifications for {:?}", subscriptions);
734 let snapshot = self.snapshot();
735 self.task_pool.handle.spawn(move || {
736 let diagnostics = subscriptions
738 .filter_map(|file_id| {
739 handlers::publish_diagnostics(&snapshot, file_id)
741 if !is_cancelled(&*err) {
742 log::error!("failed to compute diagnostics: {:?}", err);
747 .map(|diags| (file_id, diags))
749 .collect::<Vec<_>>();
750 Task::Diagnostics(diagnostics)