From 6317292cd52de5119f712be5346051d454f508ad Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Mon, 30 Aug 2021 19:18:48 +0300 Subject: [PATCH 1/4] internal: more obviously correct code for cache priming progerss It doesn't make sense for the prime_caches itself send begin/end events -- the caller knows perfectly fine when they happen! --- crates/ide/src/prime_caches.rs | 25 ++++---------- crates/rust-analyzer/src/main_loop.rs | 48 +++++++++++++++++---------- 2 files changed, 36 insertions(+), 37 deletions(-) diff --git a/crates/ide/src/prime_caches.rs b/crates/ide/src/prime_caches.rs index ac23ed8bd2..88a9240bfa 100644 --- a/crates/ide/src/prime_caches.rs +++ b/crates/ide/src/prime_caches.rs @@ -8,17 +8,12 @@ use ide_db::base_db::SourceDatabase; use crate::RootDatabase; +/// We started indexing a crate. #[derive(Debug)] -pub enum PrimeCachesProgress { - Started, - /// We started indexing a crate. - StartedOnCrate { - on_crate: String, - n_done: usize, - n_total: usize, - }, - /// We finished indexing all crates. - Finished, +pub struct PrimeCachesProgress { + pub on_crate: String, + pub n_done: usize, + pub n_total: usize, } pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress) + Sync)) { @@ -26,21 +21,13 @@ pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress) let graph = db.crate_graph(); let topo = &graph.crates_in_topological_order(); - cb(PrimeCachesProgress::Started); - // Take care to emit the finish signal even when the computation is canceled. - let _d = stdx::defer(|| cb(PrimeCachesProgress::Finished)); - // FIXME: This would be easy to parallelize, since it's in the ideal ordering for that. // Unfortunately rayon prevents panics from propagation out of a `scope`, which breaks // cancellation, so we cannot use rayon. for (i, &crate_id) in topo.iter().enumerate() { let crate_name = graph[crate_id].display_name.as_deref().unwrap_or_default().to_string(); - cb(PrimeCachesProgress::StartedOnCrate { - on_crate: crate_name, - n_done: i, - n_total: topo.len(), - }); + cb(PrimeCachesProgress { on_crate: crate_name, n_done: i, n_total: topo.len() }); db.crate_def_map(crate_id); db.import_map(crate_id); } diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs index eaf417117e..38e0e9c65b 100644 --- a/crates/rust-analyzer/src/main_loop.rs +++ b/crates/rust-analyzer/src/main_loop.rs @@ -8,11 +8,10 @@ use std::{ use always_assert::always; use crossbeam_channel::{select, Receiver}; -use ide::{FileId, PrimeCachesProgress}; use ide_db::base_db::{SourceDatabaseExt, VfsPath}; use lsp_server::{Connection, Notification, Request}; use lsp_types::notification::Notification as _; -use vfs::ChangeKind; +use vfs::{ChangeKind, FileId}; use crate::{ config::Config, @@ -67,6 +66,13 @@ pub(crate) enum Task { FetchBuildData(BuildDataProgress), } +#[derive(Debug)] +pub(crate) enum PrimeCachesProgress { + Begin, + Report(ide::PrimeCachesProgress), + End { cancelled: bool }, +} + impl fmt::Debug for Event { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| { @@ -209,17 +215,17 @@ impl GlobalState { } } Task::PrimeCaches(progress) => match progress { - PrimeCachesProgress::Started => prime_caches_progress.push(progress), - PrimeCachesProgress::StartedOnCrate { .. } => { + PrimeCachesProgress::Begin => prime_caches_progress.push(progress), + PrimeCachesProgress::Report(_) => { match prime_caches_progress.last_mut() { - Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => { + Some(last @ PrimeCachesProgress::Report(_)) => { // Coalesce subsequent update events. *last = progress; } _ => prime_caches_progress.push(progress), } } - PrimeCachesProgress::Finished => prime_caches_progress.push(progress), + PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress), }, Task::FetchWorkspace(progress) => { let (state, msg) = match progress { @@ -275,17 +281,20 @@ impl GlobalState { for progress in prime_caches_progress { let (state, message, fraction); match progress { - PrimeCachesProgress::Started => { + PrimeCachesProgress::Begin => { state = Progress::Begin; message = None; fraction = 0.0; } - PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => { + PrimeCachesProgress::Report(report) => { state = Progress::Report; - message = Some(format!("{}/{} ({})", n_done, n_total, on_crate)); - fraction = Progress::fraction(n_done, n_total); + message = Some(format!( + "{}/{} ({})", + report.n_done, report.n_total, report.on_crate + )); + fraction = Progress::fraction(report.n_done, report.n_total); } - PrimeCachesProgress::Finished => { + PrimeCachesProgress::End { cancelled: _ } => { state = Progress::End; message = None; fraction = 1.0; @@ -422,13 +431,16 @@ impl GlobalState { self.task_pool.handle.spawn_with_sender({ let analysis = self.snapshot().analysis; move |sender| { - let cb = |progress| { - sender.send(Task::PrimeCaches(progress)).unwrap(); - }; - match analysis.prime_caches(cb) { - Ok(()) => (), - Err(_canceled) => (), - } + sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap(); + let res = analysis.prime_caches(|progress| { + let report = PrimeCachesProgress::Report(progress); + sender.send(Task::PrimeCaches(report)).unwrap(); + }); + sender + .send(Task::PrimeCaches(PrimeCachesProgress::End { + cancelled: res.is_err(), + })) + .unwrap(); } }); } From a59f344c4ff4f2bd3923a4499eb5868451698009 Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Mon, 30 Aug 2021 19:24:31 +0300 Subject: [PATCH 2/4] internal: improve consistency Let's have only one place where we start delayed ops --- crates/rust-analyzer/src/main_loop.rs | 35 ++++++++++++++------------- 1 file changed, 18 insertions(+), 17 deletions(-) diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs index 38e0e9c65b..a60056cfbe 100644 --- a/crates/rust-analyzer/src/main_loop.rs +++ b/crates/rust-analyzer/src/main_loop.rs @@ -427,23 +427,6 @@ impl GlobalState { if !was_quiescent || state_changed { // Ensure that only one cache priming task can run at a time self.prime_caches_queue.request_op(); - if self.prime_caches_queue.should_start_op() { - self.task_pool.handle.spawn_with_sender({ - let analysis = self.snapshot().analysis; - move |sender| { - sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap(); - let res = analysis.prime_caches(|progress| { - let report = PrimeCachesProgress::Report(progress); - sender.send(Task::PrimeCaches(report)).unwrap(); - }); - sender - .send(Task::PrimeCaches(PrimeCachesProgress::End { - cancelled: res.is_err(), - })) - .unwrap(); - } - }); - } // Refresh semantic tokens if the client supports it. if self.config.semantic_tokens_refresh() { @@ -494,6 +477,24 @@ impl GlobalState { } self.fetch_build_data_if_needed(); + if self.prime_caches_queue.should_start_op() { + self.task_pool.handle.spawn_with_sender({ + let analysis = self.snapshot().analysis; + move |sender| { + sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap(); + let res = analysis.prime_caches(|progress| { + let report = PrimeCachesProgress::Report(progress); + sender.send(Task::PrimeCaches(report)).unwrap(); + }); + sender + .send(Task::PrimeCaches(PrimeCachesProgress::End { + cancelled: res.is_err(), + })) + .unwrap(); + } + }); + } + self.report_new_status_if_needed(); let loop_duration = loop_start.elapsed(); From 9e0203bd69ffd236b9e129235b94807909823eac Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Mon, 30 Aug 2021 19:35:49 +0300 Subject: [PATCH 3/4] internal: make scheduling control flow more obvious There should be only one place where we need to check if we want to start background activities. --- crates/rust-analyzer/src/main_loop.rs | 38 ++++++++++++++------- crates/rust-analyzer/src/reload.rs | 49 ++++----------------------- docs/dev/style.md | 19 +++++++++++ 3 files changed, 52 insertions(+), 54 deletions(-) diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs index a60056cfbe..9f11254698 100644 --- a/crates/rust-analyzer/src/main_loop.rs +++ b/crates/rust-analyzer/src/main_loop.rs @@ -152,8 +152,10 @@ impl GlobalState { ); } - self.fetch_workspaces_request(); - self.fetch_workspaces_if_needed(); + self.fetch_workspaces_queue.request_op(); + if self.fetch_workspaces_queue.should_start_op() { + self.fetch_workspaces(); + } while let Some(event) = self.next_event(&inbox) { if let Event::Lsp(lsp_server::Message::Notification(not)) = &event { @@ -234,14 +236,14 @@ impl GlobalState { (Progress::Report, Some(msg)) } ProjectWorkspaceProgress::End(workspaces) => { - self.fetch_workspaces_completed(workspaces); + self.fetch_workspaces_queue.op_completed(workspaces); let old = Arc::clone(&self.workspaces); self.switch_workspaces(); let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces); if self.config.run_build_scripts() && workspaces_updated { - self.fetch_build_data_request() + self.fetch_build_data_queue.request_op() } (Progress::End, None) @@ -257,7 +259,7 @@ impl GlobalState { (Some(Progress::Report), Some(msg)) } BuildDataProgress::End(build_data_result) => { - self.fetch_build_data_completed(build_data_result); + self.fetch_build_data_queue.op_completed(build_data_result); self.switch_workspaces(); @@ -425,7 +427,6 @@ impl GlobalState { } if !was_quiescent || state_changed { - // Ensure that only one cache priming task can run at a time self.prime_caches_queue.request_op(); // Refresh semantic tokens if the client supports it. @@ -473,10 +474,13 @@ impl GlobalState { } if self.config.cargo_autoreload() { - self.fetch_workspaces_if_needed(); + if self.fetch_workspaces_queue.should_start_op() { + self.fetch_workspaces(); + } + } + if self.fetch_build_data_queue.should_start_op() { + self.fetch_build_data(); } - self.fetch_build_data_if_needed(); - if self.prime_caches_queue.should_start_op() { self.task_pool.handle.spawn_with_sender({ let analysis = self.snapshot().analysis; @@ -495,7 +499,18 @@ impl GlobalState { }); } - self.report_new_status_if_needed(); + let status = self.current_status(); + if self.last_reported_status.as_ref() != Some(&status) { + self.last_reported_status = Some(status.clone()); + + if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) { + self.show_message(lsp_types::MessageType::Error, message.clone()); + } + + if self.config.server_status_notification() { + self.send_notification::(status); + } + } let loop_duration = loop_start.elapsed(); if loop_duration > Duration::from_millis(100) { @@ -534,8 +549,7 @@ impl GlobalState { RequestDispatcher { req: Some(req), global_state: self } .on_sync_mut::(|s, ()| { - s.fetch_workspaces_request(); - s.fetch_workspaces_if_needed(); + s.fetch_workspaces_queue.request_op(); Ok(()) })? .on_sync_mut::(|s, ()| { diff --git a/crates/rust-analyzer/src/reload.rs b/crates/rust-analyzer/src/reload.rs index fa9f05bc38..ad4d81ada7 100644 --- a/crates/rust-analyzer/src/reload.rs +++ b/crates/rust-analyzer/src/reload.rs @@ -47,7 +47,7 @@ impl GlobalState { self.analysis_host.update_lru_capacity(self.config.lru_capacity()); } if self.config.linked_projects() != old_config.linked_projects() { - self.fetch_workspaces_request() + self.fetch_workspaces_queue.request_op() } else if self.config.flycheck() != old_config.flycheck() { self.reload_flycheck(); } @@ -71,7 +71,7 @@ impl GlobalState { ", " ) ); - self.fetch_workspaces_request(); + self.fetch_workspaces_queue.request_op(); fn is_interesting(path: &AbsPath, change_kind: ChangeKind) -> bool { const IMPLICIT_TARGET_FILES: &[&str] = &["build.rs", "src/main.rs", "src/lib.rs"]; @@ -109,7 +109,8 @@ impl GlobalState { false } } - pub(crate) fn report_new_status_if_needed(&mut self) { + + pub(crate) fn current_status(&self) -> lsp_ext::ServerStatusParams { let mut status = lsp_ext::ServerStatusParams { health: lsp_ext::Health::Ok, quiescent: self.is_quiescent(), @@ -132,27 +133,10 @@ impl GlobalState { status.health = lsp_ext::Health::Error; status.message = Some(error) } - - if self.last_reported_status.as_ref() != Some(&status) { - self.last_reported_status = Some(status.clone()); - - if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) { - self.show_message(lsp_types::MessageType::Error, message.clone()); - } - - if self.config.server_status_notification() { - self.send_notification::(status); - } - } + status } - pub(crate) fn fetch_workspaces_request(&mut self) { - self.fetch_workspaces_queue.request_op() - } - pub(crate) fn fetch_workspaces_if_needed(&mut self) { - if !self.fetch_workspaces_queue.should_start_op() { - return; - } + pub(crate) fn fetch_workspaces(&mut self) { tracing::info!("will fetch workspaces"); self.task_pool.handle.spawn_with_sender({ @@ -203,21 +187,8 @@ impl GlobalState { } }); } - pub(crate) fn fetch_workspaces_completed( - &mut self, - workspaces: Vec>, - ) { - self.fetch_workspaces_queue.op_completed(workspaces) - } - - pub(crate) fn fetch_build_data_request(&mut self) { - self.fetch_build_data_queue.request_op(); - } - pub(crate) fn fetch_build_data_if_needed(&mut self) { - if !self.fetch_build_data_queue.should_start_op() { - return; - } + pub(crate) fn fetch_build_data(&mut self) { let workspaces = Arc::clone(&self.workspaces); let config = self.config.cargo(); self.task_pool.handle.spawn_with_sender(move |sender| { @@ -236,12 +207,6 @@ impl GlobalState { sender.send(Task::FetchBuildData(BuildDataProgress::End((workspaces, res)))).unwrap(); }); } - pub(crate) fn fetch_build_data_completed( - &mut self, - build_data: (Arc>, Vec>), - ) { - self.fetch_build_data_queue.op_completed(build_data) - } pub(crate) fn switch_workspaces(&mut self) { let _p = profile::span("GlobalState::switch_workspaces"); diff --git a/docs/dev/style.md b/docs/dev/style.md index 7954ae8ec6..6131cdcbdc 100644 --- a/docs/dev/style.md +++ b/docs/dev/style.md @@ -257,6 +257,25 @@ if idx >= len { **Rationale:** it's useful to see the invariant relied upon by the rest of the function clearly spelled out. +## Control Flow + +As a special case of the previous rule, do not hide control flow inside functions, push it to the caller: + +```rust +// GOOD +if cond { + f() +} + +// BAD +fn f() { + if !cond { + return; + } + ... +} +``` + ## Assertions Assert liberally. From 53d20500859fa22fb1c5d96a7c34d72d933ddbea Mon Sep 17 00:00:00 2001 From: Aleksey Kladov Date: Mon, 30 Aug 2021 19:39:19 +0300 Subject: [PATCH 4/4] feat: improve CPU usage closes #9922 Turned out to be trivial after preliminary refactor. The intended behavior is that we schedule cache priming once ws become quiescent (that is, we fully load cargo project), and we continue to rschedule it until it completes (priming might get cancelled by user typing into a file). --- crates/rust-analyzer/src/main_loop.rs | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs index 9f11254698..ad43e7eca0 100644 --- a/crates/rust-analyzer/src/main_loop.rs +++ b/crates/rust-analyzer/src/main_loop.rs @@ -296,12 +296,15 @@ impl GlobalState { )); fraction = Progress::fraction(report.n_done, report.n_total); } - PrimeCachesProgress::End { cancelled: _ } => { + PrimeCachesProgress::End { cancelled } => { state = Progress::End; message = None; fraction = 1.0; self.prime_caches_queue.op_completed(()); + if cancelled { + self.prime_caches_queue.request_op(); + } } }; @@ -424,11 +427,10 @@ impl GlobalState { for flycheck in &self.flycheck { flycheck.update(); } + self.prime_caches_queue.request_op(); } if !was_quiescent || state_changed { - self.prime_caches_queue.request_op(); - // Refresh semantic tokens if the client supports it. if self.config.semantic_tokens_refresh() { self.semantic_tokens_cache.lock().clear();