mirror of
https://github.com/rust-lang/rust-analyzer
synced 2024-12-26 13:03:31 +00:00
Merge #10088
10088: feat: improve CPU usage r=matklad a=matklad Co-authored-by: Aleksey Kladov <aleksey.kladov@gmail.com>
This commit is contained in:
commit
5c704f11d2
4 changed files with 99 additions and 99 deletions
|
@ -8,17 +8,12 @@ use ide_db::base_db::SourceDatabase;
|
||||||
|
|
||||||
use crate::RootDatabase;
|
use crate::RootDatabase;
|
||||||
|
|
||||||
|
/// We started indexing a crate.
|
||||||
#[derive(Debug)]
|
#[derive(Debug)]
|
||||||
pub enum PrimeCachesProgress {
|
pub struct PrimeCachesProgress {
|
||||||
Started,
|
pub on_crate: String,
|
||||||
/// We started indexing a crate.
|
pub n_done: usize,
|
||||||
StartedOnCrate {
|
pub n_total: usize,
|
||||||
on_crate: String,
|
|
||||||
n_done: usize,
|
|
||||||
n_total: usize,
|
|
||||||
},
|
|
||||||
/// We finished indexing all crates.
|
|
||||||
Finished,
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress) + Sync)) {
|
pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress) + Sync)) {
|
||||||
|
@ -26,21 +21,13 @@ pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress)
|
||||||
let graph = db.crate_graph();
|
let graph = db.crate_graph();
|
||||||
let topo = &graph.crates_in_topological_order();
|
let topo = &graph.crates_in_topological_order();
|
||||||
|
|
||||||
cb(PrimeCachesProgress::Started);
|
|
||||||
// Take care to emit the finish signal even when the computation is canceled.
|
|
||||||
let _d = stdx::defer(|| cb(PrimeCachesProgress::Finished));
|
|
||||||
|
|
||||||
// FIXME: This would be easy to parallelize, since it's in the ideal ordering for that.
|
// FIXME: This would be easy to parallelize, since it's in the ideal ordering for that.
|
||||||
// Unfortunately rayon prevents panics from propagation out of a `scope`, which breaks
|
// Unfortunately rayon prevents panics from propagation out of a `scope`, which breaks
|
||||||
// cancellation, so we cannot use rayon.
|
// cancellation, so we cannot use rayon.
|
||||||
for (i, &crate_id) in topo.iter().enumerate() {
|
for (i, &crate_id) in topo.iter().enumerate() {
|
||||||
let crate_name = graph[crate_id].display_name.as_deref().unwrap_or_default().to_string();
|
let crate_name = graph[crate_id].display_name.as_deref().unwrap_or_default().to_string();
|
||||||
|
|
||||||
cb(PrimeCachesProgress::StartedOnCrate {
|
cb(PrimeCachesProgress { on_crate: crate_name, n_done: i, n_total: topo.len() });
|
||||||
on_crate: crate_name,
|
|
||||||
n_done: i,
|
|
||||||
n_total: topo.len(),
|
|
||||||
});
|
|
||||||
db.crate_def_map(crate_id);
|
db.crate_def_map(crate_id);
|
||||||
db.import_map(crate_id);
|
db.import_map(crate_id);
|
||||||
}
|
}
|
||||||
|
|
|
@ -8,11 +8,10 @@ use std::{
|
||||||
|
|
||||||
use always_assert::always;
|
use always_assert::always;
|
||||||
use crossbeam_channel::{select, Receiver};
|
use crossbeam_channel::{select, Receiver};
|
||||||
use ide::{FileId, PrimeCachesProgress};
|
|
||||||
use ide_db::base_db::{SourceDatabaseExt, VfsPath};
|
use ide_db::base_db::{SourceDatabaseExt, VfsPath};
|
||||||
use lsp_server::{Connection, Notification, Request};
|
use lsp_server::{Connection, Notification, Request};
|
||||||
use lsp_types::notification::Notification as _;
|
use lsp_types::notification::Notification as _;
|
||||||
use vfs::ChangeKind;
|
use vfs::{ChangeKind, FileId};
|
||||||
|
|
||||||
use crate::{
|
use crate::{
|
||||||
config::Config,
|
config::Config,
|
||||||
|
@ -67,6 +66,13 @@ pub(crate) enum Task {
|
||||||
FetchBuildData(BuildDataProgress),
|
FetchBuildData(BuildDataProgress),
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#[derive(Debug)]
|
||||||
|
pub(crate) enum PrimeCachesProgress {
|
||||||
|
Begin,
|
||||||
|
Report(ide::PrimeCachesProgress),
|
||||||
|
End { cancelled: bool },
|
||||||
|
}
|
||||||
|
|
||||||
impl fmt::Debug for Event {
|
impl fmt::Debug for Event {
|
||||||
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
|
||||||
let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
|
let debug_verbose_not = |not: &Notification, f: &mut fmt::Formatter| {
|
||||||
|
@ -146,8 +152,10 @@ impl GlobalState {
|
||||||
);
|
);
|
||||||
}
|
}
|
||||||
|
|
||||||
self.fetch_workspaces_request();
|
self.fetch_workspaces_queue.request_op();
|
||||||
self.fetch_workspaces_if_needed();
|
if self.fetch_workspaces_queue.should_start_op() {
|
||||||
|
self.fetch_workspaces();
|
||||||
|
}
|
||||||
|
|
||||||
while let Some(event) = self.next_event(&inbox) {
|
while let Some(event) = self.next_event(&inbox) {
|
||||||
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
|
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
|
||||||
|
@ -209,17 +217,17 @@ impl GlobalState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Task::PrimeCaches(progress) => match progress {
|
Task::PrimeCaches(progress) => match progress {
|
||||||
PrimeCachesProgress::Started => prime_caches_progress.push(progress),
|
PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
|
||||||
PrimeCachesProgress::StartedOnCrate { .. } => {
|
PrimeCachesProgress::Report(_) => {
|
||||||
match prime_caches_progress.last_mut() {
|
match prime_caches_progress.last_mut() {
|
||||||
Some(last @ PrimeCachesProgress::StartedOnCrate { .. }) => {
|
Some(last @ PrimeCachesProgress::Report(_)) => {
|
||||||
// Coalesce subsequent update events.
|
// Coalesce subsequent update events.
|
||||||
*last = progress;
|
*last = progress;
|
||||||
}
|
}
|
||||||
_ => prime_caches_progress.push(progress),
|
_ => prime_caches_progress.push(progress),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
PrimeCachesProgress::Finished => prime_caches_progress.push(progress),
|
PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
|
||||||
},
|
},
|
||||||
Task::FetchWorkspace(progress) => {
|
Task::FetchWorkspace(progress) => {
|
||||||
let (state, msg) = match progress {
|
let (state, msg) = match progress {
|
||||||
|
@ -228,14 +236,14 @@ impl GlobalState {
|
||||||
(Progress::Report, Some(msg))
|
(Progress::Report, Some(msg))
|
||||||
}
|
}
|
||||||
ProjectWorkspaceProgress::End(workspaces) => {
|
ProjectWorkspaceProgress::End(workspaces) => {
|
||||||
self.fetch_workspaces_completed(workspaces);
|
self.fetch_workspaces_queue.op_completed(workspaces);
|
||||||
|
|
||||||
let old = Arc::clone(&self.workspaces);
|
let old = Arc::clone(&self.workspaces);
|
||||||
self.switch_workspaces();
|
self.switch_workspaces();
|
||||||
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
|
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
|
||||||
|
|
||||||
if self.config.run_build_scripts() && workspaces_updated {
|
if self.config.run_build_scripts() && workspaces_updated {
|
||||||
self.fetch_build_data_request()
|
self.fetch_build_data_queue.request_op()
|
||||||
}
|
}
|
||||||
|
|
||||||
(Progress::End, None)
|
(Progress::End, None)
|
||||||
|
@ -251,7 +259,7 @@ impl GlobalState {
|
||||||
(Some(Progress::Report), Some(msg))
|
(Some(Progress::Report), Some(msg))
|
||||||
}
|
}
|
||||||
BuildDataProgress::End(build_data_result) => {
|
BuildDataProgress::End(build_data_result) => {
|
||||||
self.fetch_build_data_completed(build_data_result);
|
self.fetch_build_data_queue.op_completed(build_data_result);
|
||||||
|
|
||||||
self.switch_workspaces();
|
self.switch_workspaces();
|
||||||
|
|
||||||
|
@ -275,22 +283,28 @@ impl GlobalState {
|
||||||
for progress in prime_caches_progress {
|
for progress in prime_caches_progress {
|
||||||
let (state, message, fraction);
|
let (state, message, fraction);
|
||||||
match progress {
|
match progress {
|
||||||
PrimeCachesProgress::Started => {
|
PrimeCachesProgress::Begin => {
|
||||||
state = Progress::Begin;
|
state = Progress::Begin;
|
||||||
message = None;
|
message = None;
|
||||||
fraction = 0.0;
|
fraction = 0.0;
|
||||||
}
|
}
|
||||||
PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => {
|
PrimeCachesProgress::Report(report) => {
|
||||||
state = Progress::Report;
|
state = Progress::Report;
|
||||||
message = Some(format!("{}/{} ({})", n_done, n_total, on_crate));
|
message = Some(format!(
|
||||||
fraction = Progress::fraction(n_done, n_total);
|
"{}/{} ({})",
|
||||||
|
report.n_done, report.n_total, report.on_crate
|
||||||
|
));
|
||||||
|
fraction = Progress::fraction(report.n_done, report.n_total);
|
||||||
}
|
}
|
||||||
PrimeCachesProgress::Finished => {
|
PrimeCachesProgress::End { cancelled } => {
|
||||||
state = Progress::End;
|
state = Progress::End;
|
||||||
message = None;
|
message = None;
|
||||||
fraction = 1.0;
|
fraction = 1.0;
|
||||||
|
|
||||||
self.prime_caches_queue.op_completed(());
|
self.prime_caches_queue.op_completed(());
|
||||||
|
if cancelled {
|
||||||
|
self.prime_caches_queue.request_op();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -413,26 +427,10 @@ impl GlobalState {
|
||||||
for flycheck in &self.flycheck {
|
for flycheck in &self.flycheck {
|
||||||
flycheck.update();
|
flycheck.update();
|
||||||
}
|
}
|
||||||
|
self.prime_caches_queue.request_op();
|
||||||
}
|
}
|
||||||
|
|
||||||
if !was_quiescent || state_changed {
|
if !was_quiescent || state_changed {
|
||||||
// Ensure that only one cache priming task can run at a time
|
|
||||||
self.prime_caches_queue.request_op();
|
|
||||||
if self.prime_caches_queue.should_start_op() {
|
|
||||||
self.task_pool.handle.spawn_with_sender({
|
|
||||||
let analysis = self.snapshot().analysis;
|
|
||||||
move |sender| {
|
|
||||||
let cb = |progress| {
|
|
||||||
sender.send(Task::PrimeCaches(progress)).unwrap();
|
|
||||||
};
|
|
||||||
match analysis.prime_caches(cb) {
|
|
||||||
Ok(()) => (),
|
|
||||||
Err(_canceled) => (),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
// Refresh semantic tokens if the client supports it.
|
// Refresh semantic tokens if the client supports it.
|
||||||
if self.config.semantic_tokens_refresh() {
|
if self.config.semantic_tokens_refresh() {
|
||||||
self.semantic_tokens_cache.lock().clear();
|
self.semantic_tokens_cache.lock().clear();
|
||||||
|
@ -478,11 +476,43 @@ impl GlobalState {
|
||||||
}
|
}
|
||||||
|
|
||||||
if self.config.cargo_autoreload() {
|
if self.config.cargo_autoreload() {
|
||||||
self.fetch_workspaces_if_needed();
|
if self.fetch_workspaces_queue.should_start_op() {
|
||||||
|
self.fetch_workspaces();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if self.fetch_build_data_queue.should_start_op() {
|
||||||
|
self.fetch_build_data();
|
||||||
|
}
|
||||||
|
if self.prime_caches_queue.should_start_op() {
|
||||||
|
self.task_pool.handle.spawn_with_sender({
|
||||||
|
let analysis = self.snapshot().analysis;
|
||||||
|
move |sender| {
|
||||||
|
sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
|
||||||
|
let res = analysis.prime_caches(|progress| {
|
||||||
|
let report = PrimeCachesProgress::Report(progress);
|
||||||
|
sender.send(Task::PrimeCaches(report)).unwrap();
|
||||||
|
});
|
||||||
|
sender
|
||||||
|
.send(Task::PrimeCaches(PrimeCachesProgress::End {
|
||||||
|
cancelled: res.is_err(),
|
||||||
|
}))
|
||||||
|
.unwrap();
|
||||||
|
}
|
||||||
|
});
|
||||||
}
|
}
|
||||||
self.fetch_build_data_if_needed();
|
|
||||||
|
|
||||||
self.report_new_status_if_needed();
|
let status = self.current_status();
|
||||||
|
if self.last_reported_status.as_ref() != Some(&status) {
|
||||||
|
self.last_reported_status = Some(status.clone());
|
||||||
|
|
||||||
|
if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
|
||||||
|
self.show_message(lsp_types::MessageType::Error, message.clone());
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.config.server_status_notification() {
|
||||||
|
self.send_notification::<lsp_ext::ServerStatusNotification>(status);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
let loop_duration = loop_start.elapsed();
|
let loop_duration = loop_start.elapsed();
|
||||||
if loop_duration > Duration::from_millis(100) {
|
if loop_duration > Duration::from_millis(100) {
|
||||||
|
@ -521,8 +551,7 @@ impl GlobalState {
|
||||||
|
|
||||||
RequestDispatcher { req: Some(req), global_state: self }
|
RequestDispatcher { req: Some(req), global_state: self }
|
||||||
.on_sync_mut::<lsp_ext::ReloadWorkspace>(|s, ()| {
|
.on_sync_mut::<lsp_ext::ReloadWorkspace>(|s, ()| {
|
||||||
s.fetch_workspaces_request();
|
s.fetch_workspaces_queue.request_op();
|
||||||
s.fetch_workspaces_if_needed();
|
|
||||||
Ok(())
|
Ok(())
|
||||||
})?
|
})?
|
||||||
.on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
|
.on_sync_mut::<lsp_types::request::Shutdown>(|s, ()| {
|
||||||
|
|
|
@ -47,7 +47,7 @@ impl GlobalState {
|
||||||
self.analysis_host.update_lru_capacity(self.config.lru_capacity());
|
self.analysis_host.update_lru_capacity(self.config.lru_capacity());
|
||||||
}
|
}
|
||||||
if self.config.linked_projects() != old_config.linked_projects() {
|
if self.config.linked_projects() != old_config.linked_projects() {
|
||||||
self.fetch_workspaces_request()
|
self.fetch_workspaces_queue.request_op()
|
||||||
} else if self.config.flycheck() != old_config.flycheck() {
|
} else if self.config.flycheck() != old_config.flycheck() {
|
||||||
self.reload_flycheck();
|
self.reload_flycheck();
|
||||||
}
|
}
|
||||||
|
@ -71,7 +71,7 @@ impl GlobalState {
|
||||||
", "
|
", "
|
||||||
)
|
)
|
||||||
);
|
);
|
||||||
self.fetch_workspaces_request();
|
self.fetch_workspaces_queue.request_op();
|
||||||
|
|
||||||
fn is_interesting(path: &AbsPath, change_kind: ChangeKind) -> bool {
|
fn is_interesting(path: &AbsPath, change_kind: ChangeKind) -> bool {
|
||||||
const IMPLICIT_TARGET_FILES: &[&str] = &["build.rs", "src/main.rs", "src/lib.rs"];
|
const IMPLICIT_TARGET_FILES: &[&str] = &["build.rs", "src/main.rs", "src/lib.rs"];
|
||||||
|
@ -109,7 +109,8 @@ impl GlobalState {
|
||||||
false
|
false
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
pub(crate) fn report_new_status_if_needed(&mut self) {
|
|
||||||
|
pub(crate) fn current_status(&self) -> lsp_ext::ServerStatusParams {
|
||||||
let mut status = lsp_ext::ServerStatusParams {
|
let mut status = lsp_ext::ServerStatusParams {
|
||||||
health: lsp_ext::Health::Ok,
|
health: lsp_ext::Health::Ok,
|
||||||
quiescent: self.is_quiescent(),
|
quiescent: self.is_quiescent(),
|
||||||
|
@ -132,27 +133,10 @@ impl GlobalState {
|
||||||
status.health = lsp_ext::Health::Error;
|
status.health = lsp_ext::Health::Error;
|
||||||
status.message = Some(error)
|
status.message = Some(error)
|
||||||
}
|
}
|
||||||
|
status
|
||||||
if self.last_reported_status.as_ref() != Some(&status) {
|
|
||||||
self.last_reported_status = Some(status.clone());
|
|
||||||
|
|
||||||
if let (lsp_ext::Health::Error, Some(message)) = (status.health, &status.message) {
|
|
||||||
self.show_message(lsp_types::MessageType::Error, message.clone());
|
|
||||||
}
|
|
||||||
|
|
||||||
if self.config.server_status_notification() {
|
|
||||||
self.send_notification::<lsp_ext::ServerStatusNotification>(status);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
pub(crate) fn fetch_workspaces_request(&mut self) {
|
pub(crate) fn fetch_workspaces(&mut self) {
|
||||||
self.fetch_workspaces_queue.request_op()
|
|
||||||
}
|
|
||||||
pub(crate) fn fetch_workspaces_if_needed(&mut self) {
|
|
||||||
if !self.fetch_workspaces_queue.should_start_op() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
tracing::info!("will fetch workspaces");
|
tracing::info!("will fetch workspaces");
|
||||||
|
|
||||||
self.task_pool.handle.spawn_with_sender({
|
self.task_pool.handle.spawn_with_sender({
|
||||||
|
@ -203,21 +187,8 @@ impl GlobalState {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
pub(crate) fn fetch_workspaces_completed(
|
|
||||||
&mut self,
|
|
||||||
workspaces: Vec<anyhow::Result<ProjectWorkspace>>,
|
|
||||||
) {
|
|
||||||
self.fetch_workspaces_queue.op_completed(workspaces)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn fetch_build_data_request(&mut self) {
|
|
||||||
self.fetch_build_data_queue.request_op();
|
|
||||||
}
|
|
||||||
pub(crate) fn fetch_build_data_if_needed(&mut self) {
|
|
||||||
if !self.fetch_build_data_queue.should_start_op() {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
|
pub(crate) fn fetch_build_data(&mut self) {
|
||||||
let workspaces = Arc::clone(&self.workspaces);
|
let workspaces = Arc::clone(&self.workspaces);
|
||||||
let config = self.config.cargo();
|
let config = self.config.cargo();
|
||||||
self.task_pool.handle.spawn_with_sender(move |sender| {
|
self.task_pool.handle.spawn_with_sender(move |sender| {
|
||||||
|
@ -236,12 +207,6 @@ impl GlobalState {
|
||||||
sender.send(Task::FetchBuildData(BuildDataProgress::End((workspaces, res)))).unwrap();
|
sender.send(Task::FetchBuildData(BuildDataProgress::End((workspaces, res)))).unwrap();
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
pub(crate) fn fetch_build_data_completed(
|
|
||||||
&mut self,
|
|
||||||
build_data: (Arc<Vec<ProjectWorkspace>>, Vec<anyhow::Result<WorkspaceBuildScripts>>),
|
|
||||||
) {
|
|
||||||
self.fetch_build_data_queue.op_completed(build_data)
|
|
||||||
}
|
|
||||||
|
|
||||||
pub(crate) fn switch_workspaces(&mut self) {
|
pub(crate) fn switch_workspaces(&mut self) {
|
||||||
let _p = profile::span("GlobalState::switch_workspaces");
|
let _p = profile::span("GlobalState::switch_workspaces");
|
||||||
|
|
|
@ -257,6 +257,25 @@ if idx >= len {
|
||||||
|
|
||||||
**Rationale:** it's useful to see the invariant relied upon by the rest of the function clearly spelled out.
|
**Rationale:** it's useful to see the invariant relied upon by the rest of the function clearly spelled out.
|
||||||
|
|
||||||
|
## Control Flow
|
||||||
|
|
||||||
|
As a special case of the previous rule, do not hide control flow inside functions, push it to the caller:
|
||||||
|
|
||||||
|
```rust
|
||||||
|
// GOOD
|
||||||
|
if cond {
|
||||||
|
f()
|
||||||
|
}
|
||||||
|
|
||||||
|
// BAD
|
||||||
|
fn f() {
|
||||||
|
if !cond {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
...
|
||||||
|
}
|
||||||
|
```
|
||||||
|
|
||||||
## Assertions
|
## Assertions
|
||||||
|
|
||||||
Assert liberally.
|
Assert liberally.
|
||||||
|
|
Loading…
Reference in a new issue