1 //! Project loading & configuration updates
2 use std::{mem, sync::Arc};
4 use flycheck::{FlycheckConfig, FlycheckHandle};
5 use hir::db::DefDatabase;
8 CrateGraph, Env, ProcMacro, ProcMacroExpander, ProcMacroExpansionError, ProcMacroKind,
11 use proc_macro_api::{MacroDylib, ProcMacroServer};
12 use project_model::{ProjectWorkspace, WorkspaceBuildScripts};
13 use vfs::{file_set::FileSetConfig, AbsPath, AbsPathBuf, ChangeKind};
16 config::{Config, FilesWatcher, LinkedProject},
17 global_state::GlobalState,
23 pub(crate) enum ProjectWorkspaceProgress {
26 End(Vec<anyhow::Result<ProjectWorkspace>>),
30 pub(crate) enum BuildDataProgress {
33 End((Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>)),
37 pub(crate) fn is_quiescent(&self) -> bool {
38 !(self.fetch_workspaces_queue.op_in_progress()
39 || self.fetch_build_data_queue.op_in_progress()
40 || self.vfs_progress_config_version < self.vfs_config_version
41 || self.vfs_progress_n_done < self.vfs_progress_n_total)
44 pub(crate) fn update_configuration(&mut self, config: Config) {
45 let _p = profile::span("GlobalState::update_configuration");
46 let old_config = mem::replace(&mut self.config, Arc::new(config));
47 if self.config.lru_capacity() != old_config.lru_capacity() {
48 self.analysis_host.update_lru_capacity(self.config.lru_capacity());
50 if self.config.linked_projects() != old_config.linked_projects() {
51 self.fetch_workspaces_queue.request_op()
52 } else if self.config.flycheck() != old_config.flycheck() {
53 self.reload_flycheck();
56 // Apply experimental feature flags.
59 .set_enable_proc_attr_macros(self.config.expand_proc_attr_macros());
61 pub(crate) fn maybe_refresh(&mut self, changes: &[(AbsPathBuf, ChangeKind)]) {
62 if !changes.iter().any(|(path, kind)| is_interesting(path, *kind)) {
66 "Requesting workspace reload because of the following changes: {}",
70 .filter(|(path, kind)| is_interesting(path, *kind))
71 .map(|(path, kind)| format!("{}: {:?}", path.display(), kind)),
75 self.fetch_workspaces_queue.request_op();
77 fn is_interesting(path: &AbsPath, change_kind: ChangeKind) -> bool {
78 const IMPLICIT_TARGET_FILES: &[&str] = &["build.rs", "src/main.rs", "src/lib.rs"];
79 const IMPLICIT_TARGET_DIRS: &[&str] = &["src/bin", "examples", "tests", "benches"];
80 let file_name = path.file_name().unwrap_or_default();
82 if file_name == "Cargo.toml" || file_name == "Cargo.lock" {
85 if change_kind == ChangeKind::Modify {
88 if path.extension().unwrap_or_default() != "rs" {
91 if IMPLICIT_TARGET_FILES.iter().any(|it| path.as_ref().ends_with(it)) {
94 let parent = match path.parent() {
98 if IMPLICIT_TARGET_DIRS.iter().any(|it| parent.as_ref().ends_with(it)) {
101 if file_name == "main.rs" {
102 let grand_parent = match parent.parent() {
104 None => return false,
106 if IMPLICIT_TARGET_DIRS.iter().any(|it| grand_parent.as_ref().ends_with(it)) {
114 pub(crate) fn current_status(&self) -> lsp_ext::ServerStatusParams {
115 let mut status = lsp_ext::ServerStatusParams {
116 health: lsp_ext::Health::Ok,
117 quiescent: self.is_quiescent(),
121 if let Some(error) = self.fetch_build_data_error() {
122 status.health = lsp_ext::Health::Warning;
123 status.message = Some(error)
125 if !self.config.cargo_autoreload()
126 && self.is_quiescent()
127 && self.fetch_workspaces_queue.op_requested()
129 status.health = lsp_ext::Health::Warning;
130 status.message = Some("Workspace reload required".to_string())
133 if let Some(error) = self.fetch_workspace_error() {
134 status.health = lsp_ext::Health::Error;
135 status.message = Some(error)
140 pub(crate) fn fetch_workspaces(&mut self) {
141 tracing::info!("will fetch workspaces");
143 self.task_pool.handle.spawn_with_sender({
144 let linked_projects = self.config.linked_projects();
145 let detached_files = self.config.detached_files().to_vec();
146 let cargo_config = self.config.cargo();
150 let sender = sender.clone();
153 .send(Task::FetchWorkspace(ProjectWorkspaceProgress::Report(msg)))
158 sender.send(Task::FetchWorkspace(ProjectWorkspaceProgress::Begin)).unwrap();
160 let mut workspaces = linked_projects
162 .map(|project| match project {
163 LinkedProject::ProjectManifest(manifest) => {
164 project_model::ProjectWorkspace::load(
170 LinkedProject::InlineJsonProject(it) => {
171 project_model::ProjectWorkspace::load_inline(
173 cargo_config.target.as_deref(),
177 .collect::<Vec<_>>();
179 if !detached_files.is_empty() {
181 .push(project_model::ProjectWorkspace::load_detached_files(detached_files));
184 tracing::info!("did fetch workspaces {:?}", workspaces);
186 .send(Task::FetchWorkspace(ProjectWorkspaceProgress::End(workspaces)))
192 pub(crate) fn fetch_build_data(&mut self) {
193 let workspaces = Arc::clone(&self.workspaces);
194 let config = self.config.cargo();
195 self.task_pool.handle.spawn_with_sender(move |sender| {
196 sender.send(Task::FetchBuildData(BuildDataProgress::Begin)).unwrap();
199 let sender = sender.clone();
201 sender.send(Task::FetchBuildData(BuildDataProgress::Report(msg))).unwrap()
204 let mut res = Vec::new();
205 for ws in workspaces.iter() {
206 res.push(ws.run_build_scripts(&config, &progress));
208 sender.send(Task::FetchBuildData(BuildDataProgress::End((workspaces, res)))).unwrap();
212 pub(crate) fn switch_workspaces(&mut self) {
213 let _p = profile::span("GlobalState::switch_workspaces");
214 tracing::info!("will switch workspaces");
216 if let Some(error_message) = self.fetch_workspace_error() {
217 tracing::error!("failed to switch workspaces: {}", error_message);
218 if !self.workspaces.is_empty() {
219 // It only makes sense to switch to a partially broken workspace
220 // if we don't have any workspace at all yet.
225 if let Some(error_message) = self.fetch_build_data_error() {
226 tracing::error!("failed to switch build data: {}", error_message);
229 let workspaces = self
230 .fetch_workspaces_queue
233 .filter_map(|res| res.as_ref().ok().cloned())
234 .collect::<Vec<_>>();
236 fn eq_ignore_build_data<'a>(
237 left: &'a ProjectWorkspace,
238 right: &'a ProjectWorkspace,
240 let key = |p: &'a ProjectWorkspace| match p {
241 ProjectWorkspace::Cargo {
249 } => Some((cargo, sysroot, rustc, rustc_cfg, cfg_overrides)),
252 match (key(left), key(right)) {
253 (Some(lk), Some(rk)) => lk == rk,
258 let same_workspaces = workspaces.len() == self.workspaces.len()
261 .zip(self.workspaces.iter())
262 .all(|(l, r)| eq_ignore_build_data(l, r));
265 let (workspaces, build_scripts) = self.fetch_build_data_queue.last_op_result();
266 if Arc::ptr_eq(&workspaces, &self.workspaces) {
267 let workspaces = workspaces
271 .map(|(mut ws, bs)| {
272 ws.set_build_scripts(bs.as_ref().ok().cloned().unwrap_or_default());
275 .collect::<Vec<_>>();
277 // Workspaces are the same, but we've updated build data.
278 self.workspaces = Arc::new(workspaces);
280 // Current build scripts do not match the version of the active
281 // workspace, so there's nothing for us to update.
285 // Here, we completely changed the workspace (Cargo.toml edit), so
286 // we don't care about build-script results, they are stale.
287 self.workspaces = Arc::new(workspaces)
290 if let FilesWatcher::Client = self.config.files().watcher {
291 if self.config.did_change_watched_files_dynamic_registration() {
292 let registration_options = lsp_types::DidChangeWatchedFilesRegistrationOptions {
296 .flat_map(|ws| ws.to_roots())
297 .filter(|it| it.is_local)
299 root.include.into_iter().flat_map(|it| {
301 format!("{}/**/*.rs", it.display()),
302 format!("{}/**/Cargo.toml", it.display()),
303 format!("{}/**/Cargo.lock", it.display()),
307 .map(|glob_pattern| lsp_types::FileSystemWatcher {
313 let registration = lsp_types::Registration {
314 id: "workspace/didChangeWatchedFiles".to_string(),
315 method: "workspace/didChangeWatchedFiles".to_string(),
316 register_options: Some(serde_json::to_value(registration_options).unwrap()),
318 self.send_request::<lsp_types::request::RegisterCapability>(
319 lsp_types::RegistrationParams { registrations: vec![registration] },
325 let mut change = Change::new();
327 let files_config = self.config.files();
328 let project_folders = ProjectFolders::new(&self.workspaces, &files_config.exclude);
330 if self.proc_macro_client.is_none() {
331 self.proc_macro_client = match self.config.proc_macro_srv() {
333 Some((path, args)) => match ProcMacroServer::spawn(path.clone(), args) {
337 "Failed to run proc_macro_srv from path {}, error: {:?}",
347 let watch = match files_config.watcher {
348 FilesWatcher::Client => vec![],
349 FilesWatcher::Notify => project_folders.watch,
351 self.vfs_config_version += 1;
352 self.loader.handle.set_config(vfs::loader::Config {
353 load: project_folders.load,
355 version: self.vfs_config_version,
358 // Create crate graph from all the workspaces
360 let proc_macro_client = self.proc_macro_client.as_ref();
361 let mut load_proc_macro =
362 move |path: &AbsPath| load_proc_macro(proc_macro_client, path);
364 let vfs = &mut self.vfs.write().0;
365 let loader = &mut self.loader;
366 let mem_docs = &self.mem_docs;
367 let mut load = move |path: &AbsPath| {
368 let _p = profile::span("GlobalState::load");
369 let vfs_path = vfs::VfsPath::from(path.to_path_buf());
370 if !mem_docs.contains(&vfs_path) {
371 let contents = loader.handle.load_sync(path);
372 vfs.set_file_contents(vfs_path.clone(), contents);
374 let res = vfs.file_id(&vfs_path);
376 tracing::warn!("failed to load {}", path.display())
381 let mut crate_graph = CrateGraph::default();
382 for ws in self.workspaces.iter() {
383 crate_graph.extend(ws.to_crate_graph(&mut load_proc_macro, &mut load));
387 change.set_crate_graph(crate_graph);
389 self.source_root_config = project_folders.source_root_config;
391 self.analysis_host.apply_change(change);
392 self.process_changes();
393 self.reload_flycheck();
394 tracing::info!("did switch workspaces");
397 fn fetch_workspace_error(&self) -> Option<String> {
398 let mut buf = String::new();
400 for ws in self.fetch_workspaces_queue.last_op_result() {
401 if let Err(err) = ws {
402 stdx::format_to!(buf, "rust-analyzer failed to load workspace: {:#}\n", err);
413 fn fetch_build_data_error(&self) -> Option<String> {
414 let mut buf = "rust-analyzer failed to run build scripts:\n".to_string();
415 let mut has_errors = false;
417 for ws in &self.fetch_build_data_queue.last_op_result().1 {
420 if let Some(err) = data.error() {
422 stdx::format_to!(buf, "{:#}\n", err);
427 stdx::format_to!(buf, "{:#}\n", err);
439 fn reload_flycheck(&mut self) {
440 let _p = profile::span("GlobalState::reload_flycheck");
441 let config = match self.config.flycheck() {
444 self.flycheck = Vec::new();
449 let sender = self.flycheck_sender.clone();
454 .filter_map(|(id, w)| match w {
455 ProjectWorkspace::Cargo { cargo, .. } => Some((id, cargo.workspace_root())),
456 ProjectWorkspace::Json { project, .. } => {
457 // Enable flychecks for json projects if a custom flycheck command was supplied
458 // in the workspace configuration.
460 FlycheckConfig::CustomCommand { .. } => Some((id, project.path())),
464 ProjectWorkspace::DetachedFiles { .. } => None,
467 let sender = sender.clone();
468 FlycheckHandle::spawn(
470 Box::new(move |msg| sender.send(msg).unwrap()),
472 root.to_path_buf().into(),
480 pub(crate) struct ProjectFolders {
481 pub(crate) load: Vec<vfs::loader::Entry>,
482 pub(crate) watch: Vec<usize>,
483 pub(crate) source_root_config: SourceRootConfig,
486 impl ProjectFolders {
488 workspaces: &[ProjectWorkspace],
489 global_excludes: &[AbsPathBuf],
490 ) -> ProjectFolders {
491 let mut res = ProjectFolders::default();
492 let mut fsc = FileSetConfig::builder();
493 let mut local_filesets = vec![];
495 for root in workspaces.iter().flat_map(|ws| ws.to_roots()) {
496 let file_set_roots: Vec<VfsPath> =
497 root.include.iter().cloned().map(VfsPath::from).collect();
500 let mut dirs = vfs::loader::Directories::default();
501 dirs.extensions.push("rs".into());
502 dirs.include.extend(root.include);
503 dirs.exclude.extend(root.exclude);
504 for excl in global_excludes {
508 .any(|incl| incl.starts_with(excl) || excl.starts_with(incl))
510 dirs.exclude.push(excl.clone());
514 vfs::loader::Entry::Directories(dirs)
518 res.watch.push(res.load.len());
520 res.load.push(entry);
523 local_filesets.push(fsc.len());
525 fsc.add_file_set(file_set_roots)
528 let fsc = fsc.build();
529 res.source_root_config = SourceRootConfig { fsc, local_filesets };
535 #[derive(Default, Debug)]
536 pub(crate) struct SourceRootConfig {
537 pub(crate) fsc: FileSetConfig,
538 pub(crate) local_filesets: Vec<usize>,
541 impl SourceRootConfig {
542 pub(crate) fn partition(&self, vfs: &vfs::Vfs) -> Vec<SourceRoot> {
543 let _p = profile::span("SourceRootConfig::partition");
548 .map(|(idx, file_set)| {
549 let is_local = self.local_filesets.contains(&idx);
551 SourceRoot::new_local(file_set)
553 SourceRoot::new_library(file_set)
560 pub(crate) fn load_proc_macro(client: Option<&ProcMacroServer>, path: &AbsPath) -> Vec<ProcMacro> {
561 let dylib = match MacroDylib::new(path.to_path_buf()) {
564 // FIXME: that's not really right -- we store this error in a
565 // persistent status.
566 tracing::warn!("failed to load proc macro: {}", err);
572 .map(|it| it.load_dylib(dylib))
574 .flat_map(|it| match it {
575 Ok(Ok(macros)) => macros,
577 tracing::error!("proc macro server crashed: {}", err);
581 // FIXME: that's not really right -- we store this error in a
582 // persistent status.
583 tracing::warn!("failed to load proc macro: {}", err);
587 .map(expander_to_proc_macro)
590 fn expander_to_proc_macro(expander: proc_macro_api::ProcMacro) -> ProcMacro {
591 let name = expander.name().into();
592 let kind = match expander.kind() {
593 proc_macro_api::ProcMacroKind::CustomDerive => ProcMacroKind::CustomDerive,
594 proc_macro_api::ProcMacroKind::FuncLike => ProcMacroKind::FuncLike,
595 proc_macro_api::ProcMacroKind::Attr => ProcMacroKind::Attr,
597 let expander = Arc::new(Expander(expander));
598 ProcMacro { name, kind, expander }
602 struct Expander(proc_macro_api::ProcMacro);
604 impl ProcMacroExpander for Expander {
607 subtree: &tt::Subtree,
608 attrs: Option<&tt::Subtree>,
610 ) -> Result<tt::Subtree, ProcMacroExpansionError> {
611 let env = env.iter().map(|(k, v)| (k.to_string(), v.to_string())).collect();
612 match self.0.expand(subtree, attrs, env) {
613 Ok(Ok(subtree)) => Ok(subtree),
614 Ok(Err(err)) => Err(ProcMacroExpansionError::Panic(err.0)),
615 Err(err) => Err(ProcMacroExpansionError::System(err.to_string())),