diff --git a/crates/ide/src/prime_caches.rs b/crates/ide/src/prime_caches.rs index ac23ed8bd2..88a9240bfa 100644 --- a/crates/ide/src/prime_caches.rs +++ b/crates/ide/src/prime_caches.rs @@ -8,17 +8,12 @@ use ide_db::base_db::SourceDatabase; use crate::RootDatabase; +/// We started indexing a crate. #[derive(Debug)] -pub enum PrimeCachesProgress { - Started, - /// We started indexing a crate. - StartedOnCrate { - on_crate: String, - n_done: usize, - n_total: usize, - }, - /// We finished indexing all crates. - Finished, +pub struct PrimeCachesProgress { + pub on_crate: String, + pub n_done: usize, + pub n_total: usize, } pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress) + Sync)) { @@ -26,21 +21,13 @@ pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress) let graph = db.crate_graph(); let topo = &graph.crates_in_topological_order(); - cb(PrimeCachesProgress::Started); - // Take care to emit the finish signal even when the computation is canceled. - let _d = stdx::defer(|| cb(PrimeCachesProgress::Finished)); - // FIXME: This would be easy to parallelize, since it's in the ideal ordering for that. // Unfortunately rayon prevents panics from propagation out of a `scope`, which breaks // cancellation, so we cannot use rayon. for (i, &crate_id) in topo.iter().enumerate() { let crate_name = graph[crate_id].display_name.as_deref().unwrap_or_default().to_string(); - cb(PrimeCachesProgress::StartedOnCrate { - on_crate: crate_name, - n_done: i, - n_total: topo.len(), - }); + cb(PrimeCachesProgress { on_crate: crate_name, n_done: i, n_total: topo.len() }); db.crate_def_map(crate_id); db.import_map(crate_id); } diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs index eaf417117e..ad43e7eca0 100644 --- a/crates/rust-analyzer/src/main_loop.rs +++ b/crates/rust-analyzer/src/main_loop.rs @@ -8,11 +8,10 @@ use std::{ use always_assert::always; use crossbeam_channel::{select, Receiver}; -use ide::{FileId, PrimeCachesProgress}; use ide_db::base_db::{SourceDatabaseExt, VfsPath}; use lsp_server::{Connection, Notification, Request}; use lsp_types::notification::Notification as _; -use vfs::ChangeKind; +use vfs::{ChangeKind, FileId}; use crate::{ config::Config, @@ -67,6 +66,13 @@ pub(crate) enum Task { FetchBuildData(BuildDataProgress), } +#[derive(Debug)] +pub(crate) enum PrimeCachesProgress { + Begin, + Report(ide::PrimeCachesProgress), + End { cancelled: bool }, +} + impl fmt::Debug for Event { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| { @@ -146,8 +152,10 @@ impl GlobalState { ); } - self.fetch_workspaces_request(); - self.fetch_workspaces_if_needed(); + self.fetch_workspaces_queue.request_op(); + if self.fetch_workspaces_queue.should_start_op() { + self.fetch_workspaces(); + } while let Some(event) = self.next_event(&inbox) { if let Event::Lsp(lsp_server::Message::Notification(not)) = &event { @@ -209,17 +217,17 @@ impl GlobalState { } } Task::PrimeCaches(progress) => match progress { - PrimeCachesProgress::Started => prime_caches_progress.push(progress), - PrimeCachesProgress::StartedOnCrate { .. } => { + PrimeCachesProgress::Begin => prime_caches_progress.push(progress), + PrimeCachesProgress::Report(_) => { match prime_caches_progress.last_mut() { - Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => { + Some(last @ PrimeCachesProgress::Report(_)) => { // Coalesce subsequent update events. *last = progress; } _ => prime_caches_progress.push(progress), } } - PrimeCachesProgress::Finished => prime_caches_progress.push(progress), + PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress), }, Task::FetchWorkspace(progress) => { let (state, msg) = match progress { @@ -228,14 +236,14 @@ impl GlobalState { (Progress::Report, Some(msg)) } ProjectWorkspaceProgress::End(workspaces) => { - self.fetch_workspaces_completed(workspaces); + self.fetch_workspaces_queue.op_completed(workspaces); let old = Arc::clone(&self.workspaces); self.switch_workspaces(); let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces); if self.config.run_build_scripts() && workspaces_updated { - self.fetch_build_data_request() + self.fetch_build_data_queue.request_op() } (Progress::End, None) @@ -251,7 +259,7 @@ impl GlobalState { (Some(Progress::Report), Some(msg)) } BuildDataProgress::End(build_data_result) => { - self.fetch_build_data_completed(build_data_result); + self.fetch_build_data_queue.op_completed(build_data_result); self.switch_workspaces(); @@ -275,22 +283,28 @@ impl GlobalState { for progress in prime_caches_progress { let (state, message, fraction); match progress { - PrimeCachesProgress::Started => { + PrimeCachesProgress::Begin => { state = Progress::Begin; message = None; fraction = 0.0; } - PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => { + PrimeCachesProgress::Report(report) => { state = Progress::Report; - message = Some(format!("{}/{} ({})", n_done, n_total, on_crate)); - fraction = Progress::fraction(n_done, n_total); + message = Some(format!( + "{}/{} ({})", + report.n_done, report.n_total, report.on_crate + )); + fraction = Progress::fraction(report.n_done, report.n_total); } - PrimeCachesProgress::Finished => { + PrimeCachesProgress::End { cancelled } => { state = Progress::End; message = None; fraction = 1.0; self.prime_caches_queue.op_completed(()); + if cancelled { + self.prime_caches_queue.request_op(); + } } }; @@ -413,26 +427,10 @@ impl GlobalState { for flycheck in &self.flycheck { flycheck.update(); } + self.prime_caches_queue.request_op(); } if !was_quiescent || state_changed { - // Ensure that only one cache priming task can run at a time - self.prime_caches_queue.request_op(); - if self.prime_caches_queue.should_start_op() { - self.task_pool.handle.spawn_with_sender({ - let analysis = self.snapshot().analysis; - move |sender| { - let cb = |progress| { - sender.send(Task::PrimeCaches(progress)).unwrap(); - }; - match analysis.prime_caches(cb) { - Ok(()) => (), - Err(_canceled) => (), - } - } - }); - } - // Refresh semantic tokens if the client supports it. if self.config.semantic_tokens_refresh() { self.semantic_tokens_cache.lock().clear(); @@ -478,11 +476,43 @@ impl GlobalState { } if self.config.cargo_autoreload() { - self.fetch_workspaces_if_needed(); + if self.fetch_workspaces_queue.should_start_op() { + self.fetch_workspaces(); + } + } + if self.fetch_build_data_queue.should_start_op() { + self.fetch_build_data(); + } + if self.prime_caches_queue.should_start_op() { + self.task_pool.handle.spawn_with_sender({ + let analysis = self.snapshot().analysis; + move |sender| { + sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap(); + let res = analysis.prime_caches(|progress| { + let report = PrimeCachesProgress::Report(progress); + sender.send(Task::PrimeCaches(report)).unwrap(); + }); + sender + .send(Task::PrimeCaches(PrimeCachesProgress::End { + cancelled: res.is_err(), + })) + .unwrap(); + } + }); } - self.fetch_build_data_if_needed(); - self.report_new_status_if_needed(); + let status = self.current_status(); + if self.last_reported_status.as_ref() != Some(&status) { + self.last_reported_status = Some(status.clone()); + + if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) { + self.show_message(lsp_types::MessageType::Error, message.clone()); + } + + if self.config.server_status_notification() { + self.send_notification::(status); + } + } let loop_duration = loop_start.elapsed(); if loop_duration > Duration::from_millis(100) { @@ -521,8 +551,7 @@ impl GlobalState { RequestDispatcher { req: Some(req), global_state: self } .on_sync_mut::(|s, ()| { - s.fetch_workspaces_request(); - s.fetch_workspaces_if_needed(); + s.fetch_workspaces_queue.request_op(); Ok(()) })? .on_sync_mut::(|s, ()| { diff --git a/crates/rust-analyzer/src/reload.rs b/crates/rust-analyzer/src/reload.rs index fa9f05bc38..ad4d81ada7 100644 --- a/crates/rust-analyzer/src/reload.rs +++ b/crates/rust-analyzer/src/reload.rs @@ -47,7 +47,7 @@ impl GlobalState { self.analysis_host.update_lru_capacity(self.config.lru_capacity()); } if self.config.linked_projects() != old_config.linked_projects() { - self.fetch_workspaces_request() + self.fetch_workspaces_queue.request_op() } else if self.config.flycheck() != old_config.flycheck() { self.reload_flycheck(); } @@ -71,7 +71,7 @@ impl GlobalState { ", " ) ); - self.fetch_workspaces_request(); + self.fetch_workspaces_queue.request_op(); fn is_interesting(path: &AbsPath, change_kind: ChangeKind) -> bool { const IMPLICIT_TARGET_FILES: &[&str] = &["build.rs", "src/main.rs", "src/lib.rs"]; @@ -109,7 +109,8 @@ impl GlobalState { false } } - pub(crate) fn report_new_status_if_needed(&mut self) { + + pub(crate) fn current_status(&self) -> lsp_ext::ServerStatusParams { let mut status = lsp_ext::ServerStatusParams { health: lsp_ext::Health::Ok, quiescent: self.is_quiescent(), @@ -132,27 +133,10 @@ impl GlobalState { status.health = lsp_ext::Health::Error; status.message = Some(error) } - - if self.last_reported_status.as_ref() != Some(&status) { - self.last_reported_status = Some(status.clone()); - - if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) { - self.show_message(lsp_types::MessageType::Error, message.clone()); - } - - if self.config.server_status_notification() { - self.send_notification::(status); - } - } + status } - pub(crate) fn fetch_workspaces_request(&mut self) { - self.fetch_workspaces_queue.request_op() - } - pub(crate) fn fetch_workspaces_if_needed(&mut self) { - if !self.fetch_workspaces_queue.should_start_op() { - return; - } + pub(crate) fn fetch_workspaces(&mut self) { tracing::info!("will fetch workspaces"); self.task_pool.handle.spawn_with_sender({ @@ -203,21 +187,8 @@ impl GlobalState { } }); } - pub(crate) fn fetch_workspaces_completed( - &mut self, - workspaces: Vec>, - ) { - self.fetch_workspaces_queue.op_completed(workspaces) - } - - pub(crate) fn fetch_build_data_request(&mut self) { - self.fetch_build_data_queue.request_op(); - } - pub(crate) fn fetch_build_data_if_needed(&mut self) { - if !self.fetch_build_data_queue.should_start_op() { - return; - } + pub(crate) fn fetch_build_data(&mut self) { let workspaces = Arc::clone(&self.workspaces); let config = self.config.cargo(); self.task_pool.handle.spawn_with_sender(move |sender| { @@ -236,12 +207,6 @@ impl GlobalState { sender.send(Task::FetchBuildData(BuildDataProgress::End((workspaces, res)))).unwrap(); }); } - pub(crate) fn fetch_build_data_completed( - &mut self, - build_data: (Arc>, Vec>), - ) { - self.fetch_build_data_queue.op_completed(build_data) - } pub(crate) fn switch_workspaces(&mut self) { let _p = profile::span("GlobalState::switch_workspaces"); diff --git a/docs/dev/style.md b/docs/dev/style.md index 7954ae8ec6..6131cdcbdc 100644 --- a/docs/dev/style.md +++ b/docs/dev/style.md @@ -257,6 +257,25 @@ if idx >= len { **Rationale:** it's useful to see the invariant relied upon by the rest of the function clearly spelled out. +## Control Flow + +As a special case of the previous rule, do not hide control flow inside functions, push it to the caller: + +```rust +// GOOD +if cond { + f() +} + +// BAD +fn f() { + if !cond { + return; + } + ... +} +``` + ## Assertions Assert liberally.