make it a config

This commit is contained in:
Jake Heinz 2022-01-15 02:47:47 +00:00
parent bcc99091f3
commit 25f67b6939
6 changed files with 38 additions and 8 deletions

View file

@ -108,7 +108,8 @@ pub(crate) fn parallel_prime_caches(
} }
// recv_timeout is somewhat a hack, we need a way to from this thread check to see if the current salsa revision // recv_timeout is somewhat a hack, we need a way to from this thread check to see if the current salsa revision
// is cancelled. // is cancelled on a regular basis. workers will only exit if they are processing a task that is cancelled, or
// if this thread exits, and closes the work channel.
let worker_progress = match progress_receiver.recv_timeout(Duration::from_millis(10)) { let worker_progress = match progress_receiver.recv_timeout(Duration::from_millis(10)) {
Ok(p) => p, Ok(p) => p,
Err(crossbeam_channel::RecvTimeoutError::Timeout) => { Err(crossbeam_channel::RecvTimeoutError::Timeout) => {

View file

@ -1,3 +1,4 @@
//! helper data structure to schedule work for parallel prime caches.
use std::{collections::VecDeque, hash::Hash}; use std::{collections::VecDeque, hash::Hash};
use rustc_hash::FxHashMap; use rustc_hash::FxHashMap;

View file

@ -298,6 +298,9 @@ config_data! {
/// Whether to show `can't find Cargo.toml` error message. /// Whether to show `can't find Cargo.toml` error message.
notifications_cargoTomlNotFound: bool = "true", notifications_cargoTomlNotFound: bool = "true",
/// How many worker threads to to handle priming caches. The default `0` means to pick automatically.
primeCaches_numThreads: ParallelPrimeCachesNumThreads = "0",
/// Enable support for procedural macros, implies `#rust-analyzer.cargo.runBuildScripts#`. /// Enable support for procedural macros, implies `#rust-analyzer.cargo.runBuildScripts#`.
procMacro_enable: bool = "true", procMacro_enable: bool = "true",
/// Internal config, path to proc-macro server executable (typically, /// Internal config, path to proc-macro server executable (typically,
@ -1016,6 +1019,13 @@ impl Config {
yield_points: self.data.highlightRelated_yieldPoints, yield_points: self.data.highlightRelated_yieldPoints,
} }
} }
pub fn prime_caches_num_threads(&self) -> u8 {
match self.data.primeCaches_numThreads {
0 => num_cpus::get_physical().try_into().unwrap_or(u8::MAX),
n => n,
}
}
} }
#[derive(Deserialize, Debug, Clone, Copy)] #[derive(Deserialize, Debug, Clone, Copy)]
@ -1130,6 +1140,8 @@ enum WorkspaceSymbolSearchKindDef {
AllSymbols, AllSymbols,
} }
type ParallelPrimeCachesNumThreads = u8;
macro_rules! _config_data { macro_rules! _config_data {
(struct $name:ident { (struct $name:ident {
$( $(
@ -1351,6 +1363,11 @@ fn field_props(field: &str, ty: &str, doc: &[&str], default: &str) -> serde_json
"Search for all symbols kinds" "Search for all symbols kinds"
], ],
}, },
"ParallelPrimeCachesNumThreads" => set! {
"type": "number",
"minimum": 0,
"maximum": 255
},
_ => panic!("{}: {}", ty, default), _ => panic!("{}: {}", ty, default),
} }

View file

@ -505,17 +505,16 @@ impl GlobalState {
self.fetch_build_data(); self.fetch_build_data();
} }
if self.prime_caches_queue.should_start_op() { if self.prime_caches_queue.should_start_op() {
let num_worker_threads = self.config.prime_caches_num_threads();
self.task_pool.handle.spawn_with_sender({ self.task_pool.handle.spawn_with_sender({
let analysis = self.snapshot().analysis; let analysis = self.snapshot().analysis;
move |sender| { move |sender| {
sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap(); sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap();
let res = analysis.parallel_prime_caches( let res = analysis.parallel_prime_caches(num_worker_threads, |progress| {
num_cpus::get_physical().try_into().unwrap_or(u8::MAX),
|progress| {
let report = PrimeCachesProgress::Report(progress); let report = PrimeCachesProgress::Report(progress);
sender.send(Task::PrimeCaches(report)).unwrap(); sender.send(Task::PrimeCaches(report)).unwrap();
}, });
);
sender sender
.send(Task::PrimeCaches(PrimeCachesProgress::End { .send(Task::PrimeCaches(PrimeCachesProgress::End {
cancelled: res.is_err(), cancelled: res.is_err(),

View file

@ -454,6 +454,11 @@ Number of syntax trees rust-analyzer keeps in memory. Defaults to 128.
-- --
Whether to show `can't find Cargo.toml` error message. Whether to show `can't find Cargo.toml` error message.
-- --
[[rust-analyzer.primeCaches.numThreads]]rust-analyzer.primeCaches.numThreads (default: `0`)::
+
--
How many worker threads to to handle priming caches. The default `0` means to pick automatically.
--
[[rust-analyzer.procMacro.enable]]rust-analyzer.procMacro.enable (default: `true`):: [[rust-analyzer.procMacro.enable]]rust-analyzer.procMacro.enable (default: `true`)::
+ +
-- --

View file

@ -880,6 +880,13 @@
"default": true, "default": true,
"type": "boolean" "type": "boolean"
}, },
"rust-analyzer.primeCaches.numThreads": {
"markdownDescription": "How many worker threads to to handle priming caches. The default `0` means to pick automatically.",
"default": 0,
"type": "number",
"minimum": 0,
"maximum": 255
},
"rust-analyzer.procMacro.enable": { "rust-analyzer.procMacro.enable": {
"markdownDescription": "Enable support for procedural macros, implies `#rust-analyzer.cargo.runBuildScripts#`.", "markdownDescription": "Enable support for procedural macros, implies `#rust-analyzer.cargo.runBuildScripts#`.",
"default": true, "default": true,