use always_assert::always;
use crossbeam_channel::{select, Receiver};
-use ide::{FileId, PrimeCachesProgress};
use ide_db::base_db::{SourceDatabaseExt, VfsPath};
use lsp_server::{Connection, Notification, Request};
use lsp_types::notification::Notification as _;
-use vfs::ChangeKind;
+use vfs::{ChangeKind, FileId};
use crate::{
config::Config,
handlers, lsp_ext,
lsp_utils::{apply_document_changes, is_cancelled, notification_is, Progress},
mem_docs::DocumentData,
- reload::{BuildDataProgress, ProjectWorkspaceProgress},
+ reload::{self, BuildDataProgress, ProjectWorkspaceProgress},
Result,
};
FetchBuildData(BuildDataProgress),
}
+#[derive(Debug)]
+pub(crate) enum PrimeCachesProgress {
+ Begin,
+ Report(ide::PrimeCachesProgress),
+ End { cancelled: bool },
+}
+
impl fmt::Debug for Event {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
&& self.config.notifications().cargo_toml_not_found
{
self.show_message(
- lsp_types::MessageType::Error,
+ lsp_types::MessageType::ERROR,
"rust-analyzer failed to discover workspace".to_string(),
);
};
);
}
- self.fetch_workspaces_request();
- self.fetch_workspaces_if_needed();
+ self.fetch_workspaces_queue.request_op();
+ if self.fetch_workspaces_queue.should_start_op() {
+ self.fetch_workspaces();
+ }
while let Some(event) = self.next_event(&inbox) {
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
self.handle_event(event)?
}
- Err("client exited without proper shutdown sequence")?
+ return Err("client exited without proper shutdown sequence".into());
}
fn next_event(&self, inbox: &Receiver<lsp_server::Message>) -> Option<Event> {
}
}
Task::PrimeCaches(progress) => match progress {
- PrimeCachesProgress::Started => prime_caches_progress.push(progress),
- PrimeCachesProgress::StartedOnCrate { .. } => {
+ PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
+ PrimeCachesProgress::Report(_) => {
match prime_caches_progress.last_mut() {
- Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => {
+ Some(last @ PrimeCachesProgress::Report(_)) => {
// Coalesce subsequent update events.
*last = progress;
}
_ => prime_caches_progress.push(progress),
}
}
- PrimeCachesProgress::Finished => prime_caches_progress.push(progress),
+ PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
},
Task::FetchWorkspace(progress) => {
let (state, msg) = match progress {
(Progress::Report, Some(msg))
}
ProjectWorkspaceProgress::End(workspaces) => {
- self.fetch_workspaces_completed(workspaces);
+ self.fetch_workspaces_queue.op_completed(workspaces);
let old = Arc::clone(&self.workspaces);
self.switch_workspaces();
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
if self.config.run_build_scripts() && workspaces_updated {
- self.fetch_build_data_request()
+ self.fetch_build_data_queue.request_op()
}
(Progress::End, None)
(Some(Progress::Report), Some(msg))
}
BuildDataProgress::End(build_data_result) => {
- self.fetch_build_data_completed(build_data_result);
+ self.fetch_build_data_queue.op_completed(build_data_result);
self.switch_workspaces();
for progress in prime_caches_progress {
let (state, message, fraction);
match progress {
- PrimeCachesProgress::Started => {
+ PrimeCachesProgress::Begin => {
state = Progress::Begin;
message = None;
fraction = 0.0;
}
- PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => {
+ PrimeCachesProgress::Report(report) => {
state = Progress::Report;
- message = Some(format!("{}/{} ({})", n_done, n_total, on_crate));
- fraction = Progress::fraction(n_done, n_total);
+ message = Some(format!(
+ "{}/{} ({})",
+ report.n_done, report.n_total, report.on_crate
+ ));
+ fraction = Progress::fraction(report.n_done, report.n_total);
}
- PrimeCachesProgress::Finished => {
+ PrimeCachesProgress::End { cancelled } => {
state = Progress::End;
message = None;
fraction = 1.0;
self.prime_caches_queue.op_completed(());
+ if cancelled {
+ self.prime_caches_queue.request_op();
+ }
}
};
flycheck::Progress::DidCancel => (Progress::End, None),
flycheck::Progress::DidFinish(result) => {
if let Err(err) = result {
- tracing::error!("cargo check failed: {}", err)
+ self.show_message(
+ lsp_types::MessageType::ERROR,
+ format!("cargo check failed: {}", err),
+ );
}
(Progress::End, None)
}
// When we're running multiple flychecks, we have to include a disambiguator in
// the title, or the editor complains. Note that this is a user-facing string.
let title = if self.flycheck.len() == 1 {
- "cargo check".to_string()
+ match self.config.flycheck() {
+ Some(config) => format!("{}", config),
+ None => "cargo check".to_string(),
+ }
} else {
format!("cargo check (#{})", id + 1)
};
for flycheck in &self.flycheck {
flycheck.update();
}
+ if self.config.prefill_caches() {
+ self.prime_caches_queue.request_op();
+ }
}
if !was_quiescent || state_changed {
- // Ensure that only one cache priming task can run at a time
- self.prime_caches_queue.request_op();
- if self.prime_caches_queue.should_start_op() {
- self.task_pool.handle.spawn_with_sender({
- let analysis = self.snapshot().analysis;
- move |sender| {
- let cb = |progress| {
- sender.send(Task::PrimeCaches(progress)).unwrap();
- };
- match analysis.prime_caches(cb) {
- Ok(()) => (),
- Err(_canceled) => (),
- }
- }
- });
- }
-
// Refresh semantic tokens if the client supports it.
if self.config.semantic_tokens_refresh() {
self.semantic_tokens_cache.lock().clear();
}
if self.config.cargo_autoreload() {
- self.fetch_workspaces_if_needed();
+ if self.fetch_workspaces_queue.should_start_op() {
+ self.fetch_workspaces();
+ }
+ }
+ if self.fetch_build_data_queue.should_start_op() {
+ self.fetch_build_data();
+ }
+ if self.prime_caches_queue.should_start_op() {
+ self.task_pool.handle.spawn_with_sender({
+ let analysis = self.snapshot().analysis;
+ move |sender| {
+ sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
+ let res = analysis.prime_caches(|progress| {
+ let report = PrimeCachesProgress::Report(progress);
+ sender.send(Task::PrimeCaches(report)).unwrap();
+ });
+ sender
+ .send(Task::PrimeCaches(PrimeCachesProgress::End {
+ cancelled: res.is_err(),
+ }))
+ .unwrap();
+ }
+ });
}
- self.fetch_build_data_if_needed();
- self.report_new_status_if_needed();
+ let status = self.current_status();
+ if self.last_reported_status.as_ref() != Some(&status) {
+ self.last_reported_status = Some(status.clone());
+
+ if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
+ self.show_message(lsp_types::MessageType::ERROR, message.clone());
+ }
+
+ if self.config.server_status_notification() {
+ self.send_notification::<lsp_ext::ServerStatusNotification>(status);
+ }
+ }
let loop_duration = loop_start.elapsed();
if loop_duration > Duration::from_millis(100) {
RequestDispatcher { req: Some(req), global_state: self }
.on_sync_mut::<lsp_ext::ReloadWorkspace>(|s, ()| {
- s.fetch_workspaces_request();
- s.fetch_workspaces_if_needed();
+ s.fetch_workspaces_queue.request_op();
Ok(())
})?
.on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
s.shutdown_requested = true;
Ok(())
})?
- .on_sync_mut::<lsp_ext::MemoryUsage>(|s, p| handlers::handle_memory_usage(s, p))?
+ .on_sync_mut::<lsp_ext::MemoryUsage>(handlers::handle_memory_usage)?
.on_sync::<lsp_ext::JoinLines>(handlers::handle_join_lines)?
.on_sync::<lsp_ext::OnEnter>(handlers::handle_on_enter)?
.on_sync::<lsp_types::request::SelectionRangeRequest>(handlers::handle_selection_range)?
doc.version = params.text_document.version;
}
None => {
- tracing::error!("expected DidChangeTextDocument: {}", path);
+ tracing::error!("unexpected DidChangeTextDocument: {}; send DidOpenTextDocument first", path);
return Ok(());
}
};
flycheck.update();
}
if let Ok(abs_path) = from_proto::abs_path(¶ms.text_document.uri) {
- this.maybe_refresh(&[(abs_path, ChangeKind::Modify)]);
+ if reload::should_refresh_for_change(&abs_path, ChangeKind::Modify) {
+ this.fetch_workspaces_queue.request_op();
+ }
}
Ok(())
})?
if !is_cancelled(&*err) {
tracing::error!("failed to compute diagnostics: {:?}", err);
}
- ()
})
.ok()
.map(|diags| (file_id, diags))