mirror of
https://github.com/rust-lang/rust-analyzer
synced 2024-11-15 01:17:27 +00:00
Auto merge of #13014 - Veykril:simplify, r=Veykril
minor: Simplify `GlobalState::handle_event`
This commit is contained in:
commit
306687b640
1 changed files with 192 additions and 189 deletions
|
@ -9,6 +9,7 @@ use std::{
|
||||||
|
|
||||||
use always_assert::always;
|
use always_assert::always;
|
||||||
use crossbeam_channel::{select, Receiver};
|
use crossbeam_channel::{select, Receiver};
|
||||||
|
use flycheck::FlycheckHandle;
|
||||||
use ide_db::base_db::{SourceDatabase, SourceDatabaseExt, VfsPath};
|
use ide_db::base_db::{SourceDatabase, SourceDatabaseExt, VfsPath};
|
||||||
use itertools::Itertools;
|
use itertools::Itertools;
|
||||||
use lsp_server::{Connection, Notification, Request};
|
use lsp_server::{Connection, Notification, Request};
|
||||||
|
@ -205,81 +206,14 @@ impl GlobalState {
|
||||||
}
|
}
|
||||||
lsp_server::Message::Response(resp) => self.complete_request(resp),
|
lsp_server::Message::Response(resp) => self.complete_request(resp),
|
||||||
},
|
},
|
||||||
Event::Task(mut task) => {
|
Event::Task(task) => {
|
||||||
let _p = profile::span("GlobalState::handle_event/task");
|
let _p = profile::span("GlobalState::handle_event/task");
|
||||||
let mut prime_caches_progress = Vec::new();
|
let mut prime_caches_progress = Vec::new();
|
||||||
loop {
|
|
||||||
match task {
|
|
||||||
Task::Response(response) => self.respond(response),
|
|
||||||
Task::Retry(req) => self.on_request(req),
|
|
||||||
Task::Diagnostics(diagnostics_per_file) => {
|
|
||||||
for (file_id, diagnostics) in diagnostics_per_file {
|
|
||||||
self.diagnostics.set_native_diagnostics(file_id, diagnostics)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
Task::PrimeCaches(progress) => match progress {
|
|
||||||
PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
|
|
||||||
PrimeCachesProgress::Report(_) => {
|
|
||||||
match prime_caches_progress.last_mut() {
|
|
||||||
Some(last @ PrimeCachesProgress::Report(_)) => {
|
|
||||||
// Coalesce subsequent update events.
|
|
||||||
*last = progress;
|
|
||||||
}
|
|
||||||
_ => prime_caches_progress.push(progress),
|
|
||||||
}
|
|
||||||
}
|
|
||||||
PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
|
|
||||||
},
|
|
||||||
Task::FetchWorkspace(progress) => {
|
|
||||||
let (state, msg) = match progress {
|
|
||||||
ProjectWorkspaceProgress::Begin => (Progress::Begin, None),
|
|
||||||
ProjectWorkspaceProgress::Report(msg) => {
|
|
||||||
(Progress::Report, Some(msg))
|
|
||||||
}
|
|
||||||
ProjectWorkspaceProgress::End(workspaces) => {
|
|
||||||
self.fetch_workspaces_queue.op_completed(workspaces);
|
|
||||||
|
|
||||||
let old = Arc::clone(&self.workspaces);
|
self.handle_task(&mut prime_caches_progress, task);
|
||||||
self.switch_workspaces("fetched workspace".to_string());
|
// Coalesce multiple task events into one loop turn
|
||||||
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
|
while let Ok(task) = self.task_pool.receiver.try_recv() {
|
||||||
|
self.handle_task(&mut prime_caches_progress, task);
|
||||||
if self.config.run_build_scripts() && workspaces_updated {
|
|
||||||
self.fetch_build_data_queue
|
|
||||||
.request_op(format!("workspace updated"));
|
|
||||||
}
|
|
||||||
|
|
||||||
(Progress::End, None)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
self.report_progress("Fetching", state, msg, None);
|
|
||||||
}
|
|
||||||
Task::FetchBuildData(progress) => {
|
|
||||||
let (state, msg) = match progress {
|
|
||||||
BuildDataProgress::Begin => (Some(Progress::Begin), None),
|
|
||||||
BuildDataProgress::Report(msg) => {
|
|
||||||
(Some(Progress::Report), Some(msg))
|
|
||||||
}
|
|
||||||
BuildDataProgress::End(build_data_result) => {
|
|
||||||
self.fetch_build_data_queue.op_completed(build_data_result);
|
|
||||||
|
|
||||||
self.switch_workspaces("fetched build data".to_string());
|
|
||||||
|
|
||||||
(Some(Progress::End), None)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
if let Some(state) = state {
|
|
||||||
self.report_progress("Loading", state, msg, None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Coalesce multiple task events into one loop turn
|
|
||||||
task = match self.task_pool.receiver.try_recv() {
|
|
||||||
Ok(task) => task,
|
|
||||||
Err(_) => break,
|
|
||||||
};
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for progress in prime_caches_progress {
|
for progress in prime_caches_progress {
|
||||||
|
@ -326,119 +260,20 @@ impl GlobalState {
|
||||||
self.report_progress("Indexing", state, message, Some(fraction));
|
self.report_progress("Indexing", state, message, Some(fraction));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Event::Vfs(mut task) => {
|
Event::Vfs(message) => {
|
||||||
let _p = profile::span("GlobalState::handle_event/vfs");
|
let _p = profile::span("GlobalState::handle_event/vfs");
|
||||||
loop {
|
self.handle_vfs_msg(message);
|
||||||
match task {
|
// Coalesce many VFS event into a single loop turn
|
||||||
vfs::loader::Message::Loaded { files } => {
|
while let Ok(message) = self.loader.receiver.try_recv() {
|
||||||
let vfs = &mut self.vfs.write().0;
|
self.handle_vfs_msg(message);
|
||||||
for (path, contents) in files {
|
|
||||||
let path = VfsPath::from(path);
|
|
||||||
if !self.mem_docs.contains(&path) {
|
|
||||||
vfs.set_file_contents(path, contents);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
vfs::loader::Message::Progress { n_total, n_done, config_version } => {
|
|
||||||
always!(config_version <= self.vfs_config_version);
|
|
||||||
|
|
||||||
self.vfs_progress_config_version = config_version;
|
|
||||||
self.vfs_progress_n_total = n_total;
|
|
||||||
self.vfs_progress_n_done = n_done;
|
|
||||||
|
|
||||||
let state = if n_done == 0 {
|
|
||||||
Progress::Begin
|
|
||||||
} else if n_done < n_total {
|
|
||||||
Progress::Report
|
|
||||||
} else {
|
|
||||||
assert_eq!(n_done, n_total);
|
|
||||||
Progress::End
|
|
||||||
};
|
|
||||||
self.report_progress(
|
|
||||||
"Roots Scanned",
|
|
||||||
state,
|
|
||||||
Some(format!("{}/{}", n_done, n_total)),
|
|
||||||
Some(Progress::fraction(n_done, n_total)),
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Coalesce many VFS event into a single loop turn
|
|
||||||
task = match self.loader.receiver.try_recv() {
|
|
||||||
Ok(task) => task,
|
|
||||||
Err(_) => break,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
Event::Flycheck(mut task) => {
|
Event::Flycheck(message) => {
|
||||||
let _p = profile::span("GlobalState::handle_event/flycheck");
|
let _p = profile::span("GlobalState::handle_event/flycheck");
|
||||||
loop {
|
self.handle_flycheck_msg(message);
|
||||||
match task {
|
// Coalesce many flycheck updates into a single loop turn
|
||||||
flycheck::Message::AddDiagnostic { id, workspace_root, diagnostic } => {
|
while let Ok(message) = self.flycheck_receiver.try_recv() {
|
||||||
let snap = self.snapshot();
|
self.handle_flycheck_msg(message);
|
||||||
let diagnostics =
|
|
||||||
crate::diagnostics::to_proto::map_rust_diagnostic_to_lsp(
|
|
||||||
&self.config.diagnostics_map(),
|
|
||||||
&diagnostic,
|
|
||||||
&workspace_root,
|
|
||||||
&snap,
|
|
||||||
);
|
|
||||||
for diag in diagnostics {
|
|
||||||
match url_to_file_id(&self.vfs.read().0, &diag.url) {
|
|
||||||
Ok(file_id) => self.diagnostics.add_check_diagnostic(
|
|
||||||
id,
|
|
||||||
file_id,
|
|
||||||
diag.diagnostic,
|
|
||||||
diag.fix,
|
|
||||||
),
|
|
||||||
Err(err) => {
|
|
||||||
tracing::error!(
|
|
||||||
"File with cargo diagnostic not found in VFS: {}",
|
|
||||||
err
|
|
||||||
);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
flycheck::Message::Progress { id, progress } => {
|
|
||||||
let (state, message) = match progress {
|
|
||||||
flycheck::Progress::DidStart => {
|
|
||||||
self.diagnostics.clear_check(id);
|
|
||||||
(Progress::Begin, None)
|
|
||||||
}
|
|
||||||
flycheck::Progress::DidCheckCrate(target) => {
|
|
||||||
(Progress::Report, Some(target))
|
|
||||||
}
|
|
||||||
flycheck::Progress::DidCancel => (Progress::End, None),
|
|
||||||
flycheck::Progress::DidFinish(result) => {
|
|
||||||
if let Err(err) = result {
|
|
||||||
self.show_and_log_error(
|
|
||||||
"cargo check failed".to_string(),
|
|
||||||
Some(err.to_string()),
|
|
||||||
);
|
|
||||||
}
|
|
||||||
(Progress::End, None)
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
// When we're running multiple flychecks, we have to include a disambiguator in
|
|
||||||
// the title, or the editor complains. Note that this is a user-facing string.
|
|
||||||
let title = if self.flycheck.len() == 1 {
|
|
||||||
match self.config.flycheck() {
|
|
||||||
Some(config) => format!("{}", config),
|
|
||||||
None => "cargo check".to_string(),
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
format!("cargo check (#{})", id + 1)
|
|
||||||
};
|
|
||||||
self.report_progress(&title, state, message, None);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
// Coalesce many flycheck updates into a single loop turn
|
|
||||||
task = match self.flycheck_receiver.try_recv() {
|
|
||||||
Ok(task) => task,
|
|
||||||
Err(_) => break,
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -447,13 +282,13 @@ impl GlobalState {
|
||||||
let memdocs_added_or_removed = self.mem_docs.take_changes();
|
let memdocs_added_or_removed = self.mem_docs.take_changes();
|
||||||
|
|
||||||
if self.is_quiescent() {
|
if self.is_quiescent() {
|
||||||
if !was_quiescent
|
let became_quiescent = !(was_quiescent
|
||||||
&& !self.fetch_workspaces_queue.op_requested()
|
|| self.fetch_workspaces_queue.op_requested()
|
||||||
&& !self.fetch_build_data_queue.op_requested()
|
|| self.fetch_build_data_queue.op_requested());
|
||||||
{
|
|
||||||
for flycheck in &self.flycheck {
|
if became_quiescent {
|
||||||
flycheck.update();
|
// Project has loaded properly, kick off initial flycheck
|
||||||
}
|
self.flycheck.iter().for_each(FlycheckHandle::update);
|
||||||
if self.config.prefill_caches() {
|
if self.config.prefill_caches() {
|
||||||
self.prime_caches_queue.request_op("became quiescent".to_string());
|
self.prime_caches_queue.request_op("became quiescent".to_string());
|
||||||
}
|
}
|
||||||
|
@ -495,8 +330,9 @@ impl GlobalState {
|
||||||
let url = file_id_to_url(&self.vfs.read().0, file_id);
|
let url = file_id_to_url(&self.vfs.read().0, file_id);
|
||||||
let mut diagnostics =
|
let mut diagnostics =
|
||||||
self.diagnostics.diagnostics_for(file_id).cloned().collect::<Vec<_>>();
|
self.diagnostics.diagnostics_for(file_id).cloned().collect::<Vec<_>>();
|
||||||
// https://github.com/rust-lang/rust-analyzer/issues/11404
|
|
||||||
for d in &mut diagnostics {
|
for d in &mut diagnostics {
|
||||||
|
// https://github.com/rust-lang/rust-analyzer/issues/11404
|
||||||
|
// FIXME: We should move this workaround into the client code
|
||||||
if d.message.is_empty() {
|
if d.message.is_empty() {
|
||||||
d.message = " ".to_string();
|
d.message = " ".to_string();
|
||||||
}
|
}
|
||||||
|
@ -575,11 +411,171 @@ impl GlobalState {
|
||||||
Ok(())
|
Ok(())
|
||||||
}
|
}
|
||||||
|
|
||||||
|
fn handle_task(&mut self, prime_caches_progress: &mut Vec<PrimeCachesProgress>, task: Task) {
|
||||||
|
match task {
|
||||||
|
Task::Response(response) => self.respond(response),
|
||||||
|
Task::Retry(req) => self.on_request(req),
|
||||||
|
Task::Diagnostics(diagnostics_per_file) => {
|
||||||
|
for (file_id, diagnostics) in diagnostics_per_file {
|
||||||
|
self.diagnostics.set_native_diagnostics(file_id, diagnostics)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
Task::PrimeCaches(progress) => match progress {
|
||||||
|
PrimeCachesProgress::Begin => prime_caches_progress.push(progress),
|
||||||
|
PrimeCachesProgress::Report(_) => {
|
||||||
|
match prime_caches_progress.last_mut() {
|
||||||
|
Some(last @ PrimeCachesProgress::Report(_)) => {
|
||||||
|
// Coalesce subsequent update events.
|
||||||
|
*last = progress;
|
||||||
|
}
|
||||||
|
_ => prime_caches_progress.push(progress),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
PrimeCachesProgress::End { .. } => prime_caches_progress.push(progress),
|
||||||
|
},
|
||||||
|
Task::FetchWorkspace(progress) => {
|
||||||
|
let (state, msg) = match progress {
|
||||||
|
ProjectWorkspaceProgress::Begin => (Progress::Begin, None),
|
||||||
|
ProjectWorkspaceProgress::Report(msg) => (Progress::Report, Some(msg)),
|
||||||
|
ProjectWorkspaceProgress::End(workspaces) => {
|
||||||
|
self.fetch_workspaces_queue.op_completed(workspaces);
|
||||||
|
|
||||||
|
let old = Arc::clone(&self.workspaces);
|
||||||
|
self.switch_workspaces("fetched workspace".to_string());
|
||||||
|
let workspaces_updated = !Arc::ptr_eq(&old, &self.workspaces);
|
||||||
|
|
||||||
|
if self.config.run_build_scripts() && workspaces_updated {
|
||||||
|
self.fetch_build_data_queue.request_op(format!("workspace updated"));
|
||||||
|
}
|
||||||
|
|
||||||
|
(Progress::End, None)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
self.report_progress("Fetching", state, msg, None);
|
||||||
|
}
|
||||||
|
Task::FetchBuildData(progress) => {
|
||||||
|
let (state, msg) = match progress {
|
||||||
|
BuildDataProgress::Begin => (Some(Progress::Begin), None),
|
||||||
|
BuildDataProgress::Report(msg) => (Some(Progress::Report), Some(msg)),
|
||||||
|
BuildDataProgress::End(build_data_result) => {
|
||||||
|
self.fetch_build_data_queue.op_completed(build_data_result);
|
||||||
|
|
||||||
|
self.switch_workspaces("fetched build data".to_string());
|
||||||
|
|
||||||
|
(Some(Progress::End), None)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
if let Some(state) = state {
|
||||||
|
self.report_progress("Loading", state, msg, None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_vfs_msg(&mut self, message: vfs::loader::Message) {
|
||||||
|
match message {
|
||||||
|
vfs::loader::Message::Loaded { files } => {
|
||||||
|
let vfs = &mut self.vfs.write().0;
|
||||||
|
for (path, contents) in files {
|
||||||
|
let path = VfsPath::from(path);
|
||||||
|
if !self.mem_docs.contains(&path) {
|
||||||
|
vfs.set_file_contents(path, contents);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
vfs::loader::Message::Progress { n_total, n_done, config_version } => {
|
||||||
|
always!(config_version <= self.vfs_config_version);
|
||||||
|
|
||||||
|
self.vfs_progress_config_version = config_version;
|
||||||
|
self.vfs_progress_n_total = n_total;
|
||||||
|
self.vfs_progress_n_done = n_done;
|
||||||
|
|
||||||
|
let state = if n_done == 0 {
|
||||||
|
Progress::Begin
|
||||||
|
} else if n_done < n_total {
|
||||||
|
Progress::Report
|
||||||
|
} else {
|
||||||
|
assert_eq!(n_done, n_total);
|
||||||
|
Progress::End
|
||||||
|
};
|
||||||
|
self.report_progress(
|
||||||
|
"Roots Scanned",
|
||||||
|
state,
|
||||||
|
Some(format!("{}/{}", n_done, n_total)),
|
||||||
|
Some(Progress::fraction(n_done, n_total)),
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fn handle_flycheck_msg(&mut self, message: flycheck::Message) {
|
||||||
|
match message {
|
||||||
|
flycheck::Message::AddDiagnostic { id, workspace_root, diagnostic } => {
|
||||||
|
let snap = self.snapshot();
|
||||||
|
let diagnostics = crate::diagnostics::to_proto::map_rust_diagnostic_to_lsp(
|
||||||
|
&self.config.diagnostics_map(),
|
||||||
|
&diagnostic,
|
||||||
|
&workspace_root,
|
||||||
|
&snap,
|
||||||
|
);
|
||||||
|
for diag in diagnostics {
|
||||||
|
match url_to_file_id(&self.vfs.read().0, &diag.url) {
|
||||||
|
Ok(file_id) => self.diagnostics.add_check_diagnostic(
|
||||||
|
id,
|
||||||
|
file_id,
|
||||||
|
diag.diagnostic,
|
||||||
|
diag.fix,
|
||||||
|
),
|
||||||
|
Err(err) => {
|
||||||
|
tracing::error!("File with cargo diagnostic not found in VFS: {}", err);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
flycheck::Message::Progress { id, progress } => {
|
||||||
|
let (state, message) = match progress {
|
||||||
|
flycheck::Progress::DidStart => {
|
||||||
|
self.diagnostics.clear_check(id);
|
||||||
|
(Progress::Begin, None)
|
||||||
|
}
|
||||||
|
flycheck::Progress::DidCheckCrate(target) => (Progress::Report, Some(target)),
|
||||||
|
flycheck::Progress::DidCancel => (Progress::End, None),
|
||||||
|
flycheck::Progress::DidFinish(result) => {
|
||||||
|
if let Err(err) = result {
|
||||||
|
self.show_and_log_error(
|
||||||
|
"cargo check failed".to_string(),
|
||||||
|
Some(err.to_string()),
|
||||||
|
);
|
||||||
|
}
|
||||||
|
(Progress::End, None)
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
// When we're running multiple flychecks, we have to include a disambiguator in
|
||||||
|
// the title, or the editor complains. Note that this is a user-facing string.
|
||||||
|
let title = if self.flycheck.len() == 1 {
|
||||||
|
match self.config.flycheck() {
|
||||||
|
Some(config) => format!("{}", config),
|
||||||
|
None => "cargo check".to_string(),
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
format!("cargo check (#{})", id + 1)
|
||||||
|
};
|
||||||
|
self.report_progress(&title, state, message, None);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Registers and handles a request. This should only be called once per incoming request.
|
||||||
fn on_new_request(&mut self, request_received: Instant, req: Request) {
|
fn on_new_request(&mut self, request_received: Instant, req: Request) {
|
||||||
self.register_request(&req, request_received);
|
self.register_request(&req, request_received);
|
||||||
self.on_request(req);
|
self.on_request(req);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handles a request.
|
||||||
fn on_request(&mut self, req: Request) {
|
fn on_request(&mut self, req: Request) {
|
||||||
if self.shutdown_requested {
|
if self.shutdown_requested {
|
||||||
self.respond(lsp_server::Response::new_err(
|
self.respond(lsp_server::Response::new_err(
|
||||||
|
@ -670,6 +666,7 @@ impl GlobalState {
|
||||||
.finish();
|
.finish();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/// Handles an incoming notification.
|
||||||
fn on_notification(&mut self, not: Notification) -> Result<()> {
|
fn on_notification(&mut self, not: Notification) -> Result<()> {
|
||||||
NotificationDispatcher { not: Some(not), global_state: self }
|
NotificationDispatcher { not: Some(not), global_state: self }
|
||||||
.on::<lsp_types::notification::Cancel>(|this, params| {
|
.on::<lsp_types::notification::Cancel>(|this, params| {
|
||||||
|
@ -743,6 +740,8 @@ impl GlobalState {
|
||||||
let mut updated = false;
|
let mut updated = false;
|
||||||
if let Ok(vfs_path) = from_proto::vfs_path(¶ms.text_document.uri) {
|
if let Ok(vfs_path) = from_proto::vfs_path(¶ms.text_document.uri) {
|
||||||
let (vfs, _) = &*this.vfs.read();
|
let (vfs, _) = &*this.vfs.read();
|
||||||
|
|
||||||
|
// Trigger flychecks for all workspaces that depend on the saved file
|
||||||
if let Some(file_id) = vfs.file_id(&vfs_path) {
|
if let Some(file_id) = vfs.file_id(&vfs_path) {
|
||||||
let analysis = this.analysis_host.analysis();
|
let analysis = this.analysis_host.analysis();
|
||||||
// Crates containing or depending on the saved file
|
// Crates containing or depending on the saved file
|
||||||
|
@ -800,6 +799,8 @@ impl GlobalState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Re-fetch workspaces if a workspace related file has changed
|
||||||
if let Some(abs_path) = vfs_path.as_path() {
|
if let Some(abs_path) = vfs_path.as_path() {
|
||||||
if reload::should_refresh_for_change(&abs_path, ChangeKind::Modify) {
|
if reload::should_refresh_for_change(&abs_path, ChangeKind::Modify) {
|
||||||
this.fetch_workspaces_queue
|
this.fetch_workspaces_queue
|
||||||
|
@ -807,6 +808,8 @@ impl GlobalState {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// No specific flycheck was triggered, so let's trigger all of them.
|
||||||
if !updated {
|
if !updated {
|
||||||
for flycheck in &this.flycheck {
|
for flycheck in &this.flycheck {
|
||||||
flycheck.update();
|
flycheck.update();
|
||||||
|
|
Loading…
Reference in a new issue