mirror of
https://github.com/rust-lang/rust-analyzer
synced 2024-11-15 09:27:27 +00:00
Merge #6153
6153: Improve prime_caches and display its progress r=matklad a=jonas-schievink It now computes the `CrateDefMap` of all crates, which is generally a reasonable approximation for "IDE features ready". There is still some delay after this finishes, I suspect mostly due to impl collection, which takes a while, but this should be an improvement already. For more accurate progress reports, this topologically sorts all crates before starting this operation. ~~Because that is also the ordering in which parallelization makes sense (which was previously attempted in https://github.com/rust-analyzer/rust-analyzer/pull/3529), I decided to throw that into the mix as well. It still doesn't provide *that* much of a performance boost, but it does scale beyond the current single-core architecture, and adding it was very easy.~~ ~~Unfortunately, as written, this will not tell the user which crate is actually causing slowdowns, since the displayed crate is the last one that was *started*, not the one we are currently *blocked* on, but that seems fairly difficult to implement unless I'm missing something.~~ (I have removed rayon for now since it does not work correctly with cancellation.) Co-authored-by: Jonas Schievink <jonas.schievink@ferrous-systems.com>
This commit is contained in:
commit
05faeb50f3
5 changed files with 120 additions and 14 deletions
|
@ -221,6 +221,34 @@ impl CrateGraph {
|
|||
deps.into_iter()
|
||||
}
|
||||
|
||||
/// Returns all crates in the graph, sorted in topological order (ie. dependencies of a crate
|
||||
/// come before the crate itself).
|
||||
pub fn crates_in_topological_order(&self) -> Vec<CrateId> {
|
||||
let mut res = Vec::new();
|
||||
let mut visited = FxHashSet::default();
|
||||
|
||||
for krate in self.arena.keys().copied() {
|
||||
go(self, &mut visited, &mut res, krate);
|
||||
}
|
||||
|
||||
return res;
|
||||
|
||||
fn go(
|
||||
graph: &CrateGraph,
|
||||
visited: &mut FxHashSet<CrateId>,
|
||||
res: &mut Vec<CrateId>,
|
||||
source: CrateId,
|
||||
) {
|
||||
if !visited.insert(source) {
|
||||
return;
|
||||
}
|
||||
for dep in graph[source].dependencies.iter() {
|
||||
go(graph, visited, res, dep.crate_id)
|
||||
}
|
||||
res.push(source)
|
||||
}
|
||||
}
|
||||
|
||||
// FIXME: this only finds one crate with the given root; we could have multiple
|
||||
pub fn crate_id_for_crate_root(&self, file_id: FileId) -> Option<CrateId> {
|
||||
let (&crate_id, _) =
|
||||
|
|
|
@ -77,6 +77,7 @@ pub use crate::{
|
|||
hover::{HoverAction, HoverConfig, HoverGotoTypeData, HoverResult},
|
||||
inlay_hints::{InlayHint, InlayHintsConfig, InlayKind},
|
||||
markup::Markup,
|
||||
prime_caches::PrimeCachesProgress,
|
||||
references::{
|
||||
Declaration, Reference, ReferenceAccess, ReferenceKind, ReferenceSearchResult, RenameError,
|
||||
},
|
||||
|
@ -223,8 +224,11 @@ impl Analysis {
|
|||
self.with_db(|db| status::status(&*db, file_id))
|
||||
}
|
||||
|
||||
pub fn prime_caches(&self, files: Vec<FileId>) -> Cancelable<()> {
|
||||
self.with_db(|db| prime_caches::prime_caches(db, files))
|
||||
pub fn prime_caches<F>(&self, cb: F) -> Cancelable<()>
|
||||
where
|
||||
F: Fn(PrimeCachesProgress) + Sync + std::panic::UnwindSafe,
|
||||
{
|
||||
self.with_db(move |db| prime_caches::prime_caches(db, &cb))
|
||||
}
|
||||
|
||||
/// Gets the text of the source file.
|
||||
|
|
|
@ -3,10 +3,45 @@
|
|||
//! request takes longer to compute. This modules implemented prepopulating of
|
||||
//! various caches, it's not really advanced at the moment.
|
||||
|
||||
use crate::{FileId, RootDatabase};
|
||||
use base_db::SourceDatabase;
|
||||
use hir::db::DefDatabase;
|
||||
|
||||
pub(crate) fn prime_caches(db: &RootDatabase, files: Vec<FileId>) {
|
||||
for file in files {
|
||||
let _ = crate::syntax_highlighting::highlight(db, file, None, false);
|
||||
}
|
||||
use crate::RootDatabase;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum PrimeCachesProgress {
|
||||
Started,
|
||||
/// We started indexing a crate.
|
||||
StartedOnCrate {
|
||||
on_crate: String,
|
||||
n_done: usize,
|
||||
n_total: usize,
|
||||
},
|
||||
/// We finished indexing all crates.
|
||||
Finished,
|
||||
}
|
||||
|
||||
pub(crate) fn prime_caches(db: &RootDatabase, cb: &(dyn Fn(PrimeCachesProgress) + Sync)) {
|
||||
let _p = profile::span("prime_caches");
|
||||
let graph = db.crate_graph();
|
||||
let topo = &graph.crates_in_topological_order();
|
||||
|
||||
cb(PrimeCachesProgress::Started);
|
||||
|
||||
// FIXME: This would be easy to parallelize, since it's in the ideal ordering for that.
|
||||
// Unfortunately rayon prevents panics from propagation out of a `scope`, which breaks
|
||||
// cancellation, so we cannot use rayon.
|
||||
for (i, krate) in topo.iter().enumerate() {
|
||||
let crate_name =
|
||||
graph[*krate].declaration_name.as_ref().map(ToString::to_string).unwrap_or_default();
|
||||
|
||||
cb(PrimeCachesProgress::StartedOnCrate {
|
||||
on_crate: crate_name,
|
||||
n_done: i,
|
||||
n_total: topo.len(),
|
||||
});
|
||||
db.crate_def_map(*krate);
|
||||
}
|
||||
|
||||
cb(PrimeCachesProgress::Finished);
|
||||
}
|
||||
|
|
|
@ -7,6 +7,7 @@ use std::{
|
|||
|
||||
use base_db::VfsPath;
|
||||
use crossbeam_channel::{select, Receiver};
|
||||
use ide::PrimeCachesProgress;
|
||||
use ide::{Canceled, FileId};
|
||||
use lsp_server::{Connection, Notification, Request, Response};
|
||||
use lsp_types::notification::Notification as _;
|
||||
|
@ -61,7 +62,7 @@ pub(crate) enum Task {
|
|||
Response(Response),
|
||||
Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
|
||||
Workspaces(Vec<anyhow::Result<ProjectWorkspace>>),
|
||||
Unit,
|
||||
PrimeCaches(PrimeCachesProgress),
|
||||
}
|
||||
|
||||
impl fmt::Debug for Event {
|
||||
|
@ -197,7 +198,28 @@ impl GlobalState {
|
|||
}
|
||||
}
|
||||
Task::Workspaces(workspaces) => self.switch_workspaces(workspaces),
|
||||
Task::Unit => (),
|
||||
Task::PrimeCaches(progress) => {
|
||||
let (state, message, fraction);
|
||||
match progress {
|
||||
PrimeCachesProgress::Started => {
|
||||
state = Progress::Begin;
|
||||
message = None;
|
||||
fraction = 0.0;
|
||||
}
|
||||
PrimeCachesProgress::StartedOnCrate { on_crate, n_done, n_total } => {
|
||||
state = Progress::Report;
|
||||
message = Some(format!("{}/{} ({})", n_done, n_total, on_crate));
|
||||
fraction = Progress::fraction(n_done, n_total);
|
||||
}
|
||||
PrimeCachesProgress::Finished => {
|
||||
state = Progress::End;
|
||||
message = None;
|
||||
fraction = 1.0;
|
||||
}
|
||||
};
|
||||
|
||||
self.report_progress("indexing", state, message, Some(fraction));
|
||||
}
|
||||
},
|
||||
Event::Vfs(mut task) => {
|
||||
let _p = profile::span("GlobalState::handle_event/vfs");
|
||||
|
@ -573,12 +595,18 @@ impl GlobalState {
|
|||
Task::Diagnostics(diagnostics)
|
||||
})
|
||||
}
|
||||
self.task_pool.handle.spawn({
|
||||
let subs = subscriptions;
|
||||
self.task_pool.handle.spawn_with_sender({
|
||||
let snap = self.snapshot();
|
||||
move || {
|
||||
snap.analysis.prime_caches(subs).unwrap_or_else(|_: Canceled| ());
|
||||
Task::Unit
|
||||
move |sender| {
|
||||
snap.analysis
|
||||
.prime_caches(|progress| {
|
||||
sender.send(Task::PrimeCaches(progress)).unwrap();
|
||||
})
|
||||
.unwrap_or_else(|_: Canceled| {
|
||||
// Pretend that we're done, so that the progress bar is removed. Otherwise
|
||||
// the editor may complain about it already existing.
|
||||
sender.send(Task::PrimeCaches(PrimeCachesProgress::Finished)).unwrap()
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
|
|
|
@ -23,6 +23,17 @@ impl<T> TaskPool<T> {
|
|||
})
|
||||
}
|
||||
|
||||
pub(crate) fn spawn_with_sender<F>(&mut self, task: F)
|
||||
where
|
||||
F: FnOnce(Sender<T>) + Send + 'static,
|
||||
T: Send + 'static,
|
||||
{
|
||||
self.inner.execute({
|
||||
let sender = self.sender.clone();
|
||||
move || task(sender)
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn len(&self) -> usize {
|
||||
self.inner.queued_count()
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue