use always_assert::always;
use crossbeam_channel::{select, Receiver};
-use ide::PrimeCachesProgress;
-use ide::{Canceled, FileId};
+use ide::{FileId, PrimeCachesProgress};
use ide_db::base_db::VfsPath;
-use lsp_server::{Connection, Notification, Request, Response};
+use lsp_server::{Connection, Notification, Request};
use lsp_types::notification::Notification as _;
-use project_model::BuildDataCollector;
use vfs::ChangeKind;
use crate::{
config::Config,
dispatch::{NotificationDispatcher, RequestDispatcher},
- document::DocumentData,
from_proto,
- global_state::{file_id_to_url, url_to_file_id, GlobalState, Status},
+ global_state::{file_id_to_url, url_to_file_id, GlobalState},
handlers, lsp_ext,
- lsp_utils::{apply_document_changes, is_canceled, notification_is, Progress},
+ lsp_utils::{apply_document_changes, is_cancelled, notification_is, Progress},
+ mem_docs::DocumentData,
reload::{BuildDataProgress, ProjectWorkspaceProgress},
Result,
};
#[derive(Debug)]
pub(crate) enum Task {
- Response(Response),
+ Response(lsp_server::Response),
Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
PrimeCaches(PrimeCachesProgress),
FetchWorkspace(ProjectWorkspaceProgress),
impl GlobalState {
fn run(mut self, inbox: Receiver<lsp_server::Message>) -> Result<()> {
if self.config.linked_projects().is_empty()
+ && self.config.detached_files().is_empty()
&& self.config.notifications().cargo_toml_not_found
{
self.show_message(
log::info!("task queue len: {}", task_queue_len);
}
- let mut new_status = self.status;
+ let was_quiescent = self.is_quiescent();
match event {
Event::Lsp(msg) => match msg {
lsp_server::Message::Request(req) => self.on_request(loop_start, req)?,
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
if self.config.run_build_scripts() && workspaces_updated {
- let mut collector = BuildDataCollector::default();
- for ws in self.workspaces.iter() {
- ws.collect_build_data_configs(&mut collector);
- }
- self.fetch_build_data_request(collector)
+ self.fetch_build_data_request()
}
(Progress::End, None)
}
};
- self.report_progress("fetching", state, msg, None);
+ self.report_progress("Fetching", state, msg, None);
}
Task::FetchBuildData(progress) => {
let (state, msg) = match progress {
};
if let Some(state) = state {
- self.report_progress("loading", state, msg, None);
+ self.report_progress("Loading", state, msg, None);
}
}
}
state = Progress::End;
message = None;
fraction = 1.0;
+
+ self.prime_caches_queue.op_completed(());
}
};
- self.report_progress("indexing", state, message, Some(fraction));
+ self.report_progress("Indexing", state, message, Some(fraction));
}
}
Event::Vfs(mut task) => {
let vfs = &mut self.vfs.write().0;
for (path, contents) in files {
let path = VfsPath::from(path);
- if !self.mem_docs.contains_key(&path) {
+ if !self.mem_docs.contains(&path) {
vfs.set_file_contents(path, contents);
}
}
}
vfs::loader::Message::Progress { n_total, n_done, config_version } => {
+ always!(config_version <= self.vfs_config_version);
+
+ self.vfs_progress_config_version = config_version;
self.vfs_progress_n_total = n_total;
self.vfs_progress_n_done = n_done;
- always!(config_version <= self.vfs_config_version);
+
let state = if n_done == 0 {
Progress::Begin
} else if n_done < n_total {
Progress::End
};
self.report_progress(
- "roots scanned",
+ "Roots Scanned",
state,
Some(format!("{}/{}", n_done, n_total)),
Some(Progress::fraction(n_done, n_total)),
}
let state_changed = self.process_changes();
- let prev_status = self.status;
- if prev_status != new_status {
- self.transition(new_status);
- }
- let is_ready = matches!(self.status, Status::Ready { .. });
- if prev_status == Status::Loading && is_ready {
- for flycheck in &self.flycheck {
- flycheck.update();
+ let memdocs_added_or_removed = self.mem_docs.take_changes();
+
+ if self.is_quiescent() {
+ if !was_quiescent {
+ for flycheck in &self.flycheck {
+ flycheck.update();
+ }
}
- }
- if is_ready && (state_changed || prev_status == Status::Loading) {
- self.update_file_notifications_on_threadpool();
+ if !was_quiescent || state_changed {
+ // Ensure that only one cache priming task can run at a time
+ self.prime_caches_queue.request_op();
+ if self.prime_caches_queue.should_start_op() {
+ self.task_pool.handle.spawn_with_sender({
+ let snap = self.snapshot();
+ move |sender| {
+ let cb = |progress| {
+ sender.send(Task::PrimeCaches(progress)).unwrap();
+ };
+ match snap.analysis.prime_caches(cb) {
+ Ok(()) => (),
+ Err(_canceled) => (),
+ }
+ }
+ });
+ }
- // Refresh semantic tokens if the client supports it.
- if self.config.semantic_tokens_refresh() {
- self.semantic_tokens_cache.lock().clear();
- self.send_request::<lsp_types::request::SemanticTokensRefesh>((), |_, _| ());
+ // Refresh semantic tokens if the client supports it.
+ if self.config.semantic_tokens_refresh() {
+ self.semantic_tokens_cache.lock().clear();
+ self.send_request::<lsp_types::request::SemanticTokensRefesh>((), |_, _| ());
+ }
+
+ // Refresh code lens if the client supports it.
+ if self.config.code_lens_refresh() {
+ self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
+ }
}
- // Refresh code lens if the client supports it.
- if self.config.code_lens_refresh() {
- self.send_request::<lsp_types::request::CodeLensRefresh>((), |_, _| ());
+ if !was_quiescent || state_changed || memdocs_added_or_removed {
+ if self.config.publish_diagnostics() {
+ self.update_diagnostics()
+ }
}
}
}
self.fetch_build_data_if_needed();
+ self.report_new_status_if_needed();
+
let loop_duration = loop_start.elapsed();
if loop_duration > Duration::from_millis(100) {
log::warn!("overly long loop turn: {:?}", loop_duration);
self.register_request(&req, request_received);
if self.shutdown_requested {
- self.respond(Response::new_err(
+ self.respond(lsp_server::Response::new_err(
req.id,
lsp_server::ErrorCode::InvalidRequest as i32,
"Shutdown already requested.".to_owned(),
return Ok(());
}
- if self.status == Status::Loading && req.method != "shutdown" {
+ // Avoid flashing a bunch of unresolved references during initial load.
+ if self.workspaces.is_empty() && !self.is_quiescent() {
self.respond(lsp_server::Response::new_err(
req.id,
// FIXME: i32 should impl From<ErrorCode> (from() guarantees lossless conversion)
lsp_server::ErrorCode::ContentModified as i32,
- "Rust Analyzer is still loading...".to_owned(),
+ "waiting for cargo metadata or cargo check".to_owned(),
));
return Ok(());
}
.on::<lsp_ext::AnalyzerStatus>(handlers::handle_analyzer_status)
.on::<lsp_ext::SyntaxTree>(handlers::handle_syntax_tree)
.on::<lsp_ext::ViewHir>(handlers::handle_view_hir)
+ .on::<lsp_ext::ViewCrateGraph>(handlers::handle_view_crate_graph)
+ .on::<lsp_ext::ViewItemTree>(handlers::handle_view_item_tree)
.on::<lsp_ext::ExpandMacro>(handlers::handle_expand_macro)
.on::<lsp_ext::ParentModule>(handlers::handle_parent_module)
.on::<lsp_ext::Runnables>(handlers::handle_runnables)
.on::<lsp_ext::ExternalDocs>(handlers::handle_open_docs)
.on::<lsp_ext::OpenCargoToml>(handlers::handle_open_cargo_toml)
.on::<lsp_ext::MoveItem>(handlers::handle_move_item)
+ .on::<lsp_ext::WorkspaceSymbol>(handlers::handle_workspace_symbol)
.on::<lsp_types::request::OnTypeFormatting>(handlers::handle_on_type_formatting)
.on::<lsp_types::request::DocumentSymbolRequest>(handlers::handle_document_symbol)
- .on::<lsp_types::request::WorkspaceSymbol>(handlers::handle_workspace_symbol)
.on::<lsp_types::request::GotoDefinition>(handlers::handle_goto_definition)
+ .on::<lsp_types::request::GotoDeclaration>(handlers::handle_goto_declaration)
.on::<lsp_types::request::GotoImplementation>(handlers::handle_goto_implementation)
.on::<lsp_types::request::GotoTypeDefinition>(handlers::handle_goto_type_definition)
.on::<lsp_types::request::Completion>(handlers::handle_completion)
.on::<lsp_types::request::Rename>(handlers::handle_rename)
.on::<lsp_types::request::References>(handlers::handle_references)
.on::<lsp_types::request::Formatting>(handlers::handle_formatting)
+ .on::<lsp_types::request::RangeFormatting>(handlers::handle_range_formatting)
.on::<lsp_types::request::DocumentHighlightRequest>(handlers::handle_document_highlight)
.on::<lsp_types::request::CallHierarchyPrepare>(handlers::handle_call_hierarchy_prepare)
.on::<lsp_types::request::CallHierarchyIncomingCalls>(
this.cancel(id);
Ok(())
})?
+ .on::<lsp_types::notification::WorkDoneProgressCancel>(|_this, _params| {
+ // Just ignore this. It is OK to continue sending progress
+ // notifications for this token, as the client can't know when
+ // we accepted notification.
+ Ok(())
+ })?
.on::<lsp_types::notification::DidOpenTextDocument>(|this, params| {
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
if this
.mem_docs
.insert(path.clone(), DocumentData::new(params.text_document.version))
- .is_some()
+ .is_err()
{
log::error!("duplicate DidOpenTextDocument: {}", path)
}
- let changed = this
- .vfs
+ this.vfs
.write()
.0
.set_file_contents(path, Some(params.text_document.text.into_bytes()));
-
- // If the VFS contents are unchanged, update diagnostics, since `handle_event`
- // won't see any changes. This avoids missing diagnostics when opening a file.
- //
- // If the file *was* changed, `handle_event` will already recompute and send
- // diagnostics. We can't do it here, since the *current* file contents might be
- // unset in salsa, since the VFS change hasn't been applied to the database yet.
- if !changed {
- this.maybe_update_diagnostics();
- }
}
Ok(())
})?
.on::<lsp_types::notification::DidChangeTextDocument>(|this, params| {
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
- let doc = match this.mem_docs.get_mut(&path) {
- Some(doc) => doc,
+ match this.mem_docs.get_mut(&path) {
+ Some(doc) => {
+ // The version passed in DidChangeTextDocument is the version after all edits are applied
+ // so we should apply it before the vfs is notified.
+ doc.version = params.text_document.version;
+ }
None => {
log::error!("expected DidChangeTextDocument: {}", path);
return Ok(());
}
};
+
let vfs = &mut this.vfs.write().0;
let file_id = vfs.file_id(&path).unwrap();
let mut text = String::from_utf8(vfs.file_contents(file_id).to_vec()).unwrap();
apply_document_changes(&mut text, params.content_changes);
- // The version passed in DidChangeTextDocument is the version after all edits are applied
- // so we should apply it before the vfs is notified.
- doc.version = params.text_document.version;
-
vfs.set_file_contents(path.clone(), Some(text.into_bytes()));
}
Ok(())
})?
.on::<lsp_types::notification::DidCloseTextDocument>(|this, params| {
- let mut version = None;
if let Ok(path) = from_proto::vfs_path(¶ms.text_document.uri) {
- match this.mem_docs.remove(&path) {
- Some(doc) => version = Some(doc.version),
- None => log::error!("orphan DidCloseTextDocument: {}", path),
+ if this.mem_docs.remove(&path).is_err() {
+ log::error!("orphan DidCloseTextDocument: {}", path);
}
this.semantic_tokens_cache.lock().remove(¶ms.text_document.uri);
this.loader.handle.invalidate(path.to_path_buf());
}
}
-
- // Clear the diagnostics for the previously known version of the file.
- // This prevents stale "cargo check" diagnostics if the file is
- // closed, "cargo check" is run and then the file is reopened.
- this.send_notification::<lsp_types::notification::PublishDiagnostics>(
- lsp_types::PublishDiagnosticsParams {
- uri: params.text_document.uri,
- diagnostics: Vec::new(),
- version,
- },
- );
Ok(())
})?
.on::<lsp_types::notification::DidSaveTextDocument>(|this, params| {
},
|this, resp| {
log::debug!("config update response: '{:?}", resp);
- let Response { error, result, .. } = resp;
+ let lsp_server::Response { error, result, .. } = resp;
match (error, result) {
(Some(err), _) => {
},
);
- return Ok(());
+ Ok(())
})?
.on::<lsp_types::notification::DidChangeWatchedFiles>(|this, params| {
for change in params.changes {
.finish();
Ok(())
}
- fn update_file_notifications_on_threadpool(&mut self) {
- self.maybe_update_diagnostics();
- self.task_pool.handle.spawn_with_sender({
- let snap = self.snapshot();
- move |sender| {
- snap.analysis
- .prime_caches(|progress| {
- sender.send(Task::PrimeCaches(progress)).unwrap();
- })
- .unwrap_or_else(|_: Canceled| {
- // Pretend that we're done, so that the progress bar is removed. Otherwise
- // the editor may complain about it already existing.
- sender.send(Task::PrimeCaches(PrimeCachesProgress::Finished)).unwrap()
- });
- }
- });
- }
- fn maybe_update_diagnostics(&mut self) {
+
+ fn update_diagnostics(&mut self) {
let subscriptions = self
.mem_docs
- .keys()
- .map(|path| self.vfs.read().0.file_id(&path).unwrap())
+ .iter()
+ .map(|path| self.vfs.read().0.file_id(path).unwrap())
.collect::<Vec<_>>();
log::trace!("updating notifications for {:?}", subscriptions);
- if self.config.publish_diagnostics() {
- let snapshot = self.snapshot();
- self.task_pool.handle.spawn(move || {
- let diagnostics = subscriptions
- .into_iter()
- .filter_map(|file_id| {
- handlers::publish_diagnostics(&snapshot, file_id)
- .map_err(|err| {
- if !is_canceled(&*err) {
- log::error!("failed to compute diagnostics: {:?}", err);
- }
- ()
- })
- .ok()
- .map(|diags| (file_id, diags))
- })
- .collect::<Vec<_>>();
- Task::Diagnostics(diagnostics)
- })
- }
+
+ let snapshot = self.snapshot();
+ self.task_pool.handle.spawn(move || {
+ let diagnostics = subscriptions
+ .into_iter()
+ .filter_map(|file_id| {
+ handlers::publish_diagnostics(&snapshot, file_id)
+ .map_err(|err| {
+ if !is_cancelled(&*err) {
+ log::error!("failed to compute diagnostics: {:?}", err);
+ }
+ ()
+ })
+ .ok()
+ .map(|diags| (file_id, diags))
+ })
+ .collect::<Vec<_>>();
+ Task::Diagnostics(diagnostics)
+ })
}
}