Fix progress token is already registered crash

After we started reporting progress when running cargo check during
loading, it is possible to crash the client with two identical progress
tokens.

This points to a deeper issue: we might be running several cargo checks
concurrently, which doesn't make sense.

This commit linearizes all workspace fetches, making sure no updates are
lost.

As an additional touch, it also normalizes progress & result reporting,
to make sure they stand in sync.
This commit is contained in:
Aleksey Kladov 2021-01-10 18:02:02 +03:00
parent 77362c7173
commit 2ed258ba42
5 changed files with 57 additions and 12 deletions

View file

@ -22,6 +22,7 @@ use crate::{
from_proto,
line_endings::LineEndings,
main_loop::Task,
op_queue::OpQueue,
reload::SourceRootConfig,
request_metrics::{LatestRequests, RequestMetrics},
thread_pool::TaskPool,
@ -78,6 +79,7 @@ pub(crate) struct GlobalState {
pub(crate) source_root_config: SourceRootConfig,
pub(crate) proc_macro_client: Option<ProcMacroClient>,
pub(crate) workspaces: Arc<Vec<ProjectWorkspace>>,
pub(crate) fetch_workspaces_queue: OpQueue,
latest_requests: Arc<RwLock<LatestRequests>>,
}
@ -130,6 +132,7 @@ impl GlobalState {
source_root_config: SourceRootConfig::default(),
proc_macro_client: None,
workspaces: Arc::new(Vec::new()),
fetch_workspaces_queue: OpQueue::default(),
latest_requests: Default::default(),
}
}

View file

@ -35,6 +35,7 @@ mod lsp_utils;
mod thread_pool;
mod document;
mod diff;
mod op_queue;
pub mod lsp_ext;
pub mod config;

View file

@ -11,7 +11,6 @@ use ide::{Canceled, FileId};
use ide_db::base_db::VfsPath;
use lsp_server::{Connection, Notification, Request, Response};
use lsp_types::notification::Notification as _;
use project_model::ProjectWorkspace;
use vfs::ChangeKind;
use crate::{
@ -62,7 +61,6 @@ enum Event {
pub(crate) enum Task {
Response(Response),
Diagnostics(Vec<(FileId, Vec<lsp_types::Diagnostic>)>),
Workspaces(Vec<anyhow::Result<ProjectWorkspace>>),
PrimeCaches(PrimeCachesProgress),
FetchWorkspace(ProjectWorkspaceProgress),
}
@ -143,7 +141,8 @@ impl GlobalState {
|_, _| (),
);
self.fetch_workspaces();
self.fetch_workspaces_request();
self.fetch_workspaces_if_needed();
while let Some(event) = self.next_event(&inbox) {
if let Event::Lsp(lsp_server::Message::Notification(not)) = &event {
@ -204,7 +203,6 @@ impl GlobalState {
self.diagnostics.set_native_diagnostics(file_id, diagnostics)
}
}
Task::Workspaces(workspaces) => self.switch_workspaces(workspaces),
Task::PrimeCaches(progress) => match progress {
PrimeCachesProgress::Started => prime_caches_progress.push(progress),
PrimeCachesProgress::StartedOnCrate { .. } => {
@ -224,7 +222,11 @@ impl GlobalState {
ProjectWorkspaceProgress::Report(msg) => {
(Progress::Report, Some(msg))
}
ProjectWorkspaceProgress::End => (Progress::End, None),
ProjectWorkspaceProgress::End(workspaces) => {
self.fetch_workspaces_completed();
self.switch_workspaces(workspaces);
(Progress::End, None)
}
};
self.report_progress("fetching", state, msg, None);
}
@ -403,6 +405,8 @@ impl GlobalState {
}
}
self.fetch_workspaces_if_needed();
let loop_duration = loop_start.elapsed();
if loop_duration > Duration::from_millis(100) {
log::warn!("overly long loop turn: {:?}", loop_duration);
@ -440,7 +444,7 @@ impl GlobalState {
}
RequestDispatcher { req: Some(req), global_state: self }
.on_sync::<lsp_ext::ReloadWorkspace>(|s, ()| Ok(s.fetch_workspaces()))?
.on_sync::<lsp_ext::ReloadWorkspace>(|s, ()| Ok(s.fetch_workspaces_request()))?
.on_sync::<lsp_ext::JoinLines>(|s, p| handlers::handle_join_lines(s.snapshot(), p))?
.on_sync::<lsp_ext::OnEnter>(|s, p| handlers::handle_on_enter(s.snapshot(), p))?
.on_sync::<lsp_types::request::Shutdown>(|s, ()| {

View file

@ -0,0 +1,25 @@
//! Bookkeeping to make sure only one long-running operation is executed.
#[derive(Default)]
pub(crate) struct OpQueue {
op_scheduled: bool,
op_in_progress: bool,
}
impl OpQueue {
pub(crate) fn request_op(&mut self) {
self.op_scheduled = true;
}
pub(crate) fn should_start_op(&mut self) -> bool {
if !self.op_in_progress && self.op_scheduled {
self.op_in_progress = true;
self.op_scheduled = false;
return true;
}
false
}
pub(crate) fn op_completed(&mut self) {
assert!(self.op_in_progress);
self.op_in_progress = false;
}
}

View file

@ -19,7 +19,7 @@ use lsp_ext::StatusParams;
pub(crate) enum ProjectWorkspaceProgress {
Begin,
Report(String),
End,
End(Vec<anyhow::Result<ProjectWorkspace>>),
}
impl GlobalState {
@ -30,7 +30,7 @@ impl GlobalState {
self.analysis_host.update_lru_capacity(self.config.lru_capacity());
}
if self.config.linked_projects() != old_config.linked_projects() {
self.fetch_workspaces()
self.fetch_workspaces_request()
} else if self.config.flycheck() != old_config.flycheck() {
self.reload_flycheck();
}
@ -44,7 +44,7 @@ impl GlobalState {
Status::Ready | Status::Invalid => (),
}
if self.config.cargo_autoreload() {
self.fetch_workspaces();
self.fetch_workspaces_request();
} else {
self.transition(Status::NeedsReload);
}
@ -98,8 +98,15 @@ impl GlobalState {
});
}
}
pub(crate) fn fetch_workspaces(&mut self) {
pub(crate) fn fetch_workspaces_request(&mut self) {
self.fetch_workspaces_queue.request_op()
}
pub(crate) fn fetch_workspaces_if_needed(&mut self) {
log::info!("will fetch workspaces");
if !self.fetch_workspaces_queue.should_start_op() {
return;
}
self.task_pool.handle.spawn_with_sender({
let linked_projects = self.config.linked_projects();
@ -133,12 +140,17 @@ impl GlobalState {
})
.collect::<Vec<_>>();
sender.send(Task::FetchWorkspace(ProjectWorkspaceProgress::End)).unwrap();
log::info!("did fetch workspaces {:?}", workspaces);
sender.send(Task::Workspaces(workspaces)).unwrap()
sender
.send(Task::FetchWorkspace(ProjectWorkspaceProgress::End(workspaces)))
.unwrap();
}
});
}
pub(crate) fn fetch_workspaces_completed(&mut self) {
self.fetch_workspaces_queue.op_completed()
}
pub(crate) fn switch_workspaces(&mut self, workspaces: Vec<anyhow::Result<ProjectWorkspace>>) {
let _p = profile::span("GlobalState::switch_workspaces");
log::info!("will switch workspaces: {:?}", workspaces);