diff --git a/crates/ide/src/prime_caches.rs b/crates/ide/src/prime_caches.rs index 5da2995556..892b34c7d9 100644 --- a/crates/ide/src/prime_caches.rs +++ b/crates/ide/src/prime_caches.rs @@ -108,7 +108,8 @@ pub(crate) fn parallel_prime_caches( } // recv_timeout is somewhat a hack, we need a way to from this thread check to see if the current salsa revision - // is cancelled. + // is cancelled on a regular basis. workers will only exit if they are processing a task that is cancelled, or + // if this thread exits, and closes the work channel. let worker_progress = match progress_receiver.recv_timeout(Duration::from_millis(10)) { Ok(p) => p, Err(crossbeam_channel::RecvTimeoutError::Timeout) => { diff --git a/crates/ide/src/prime_caches/topologic_sort.rs b/crates/ide/src/prime_caches/topologic_sort.rs index f54d2c1908..b04087fa7b 100644 --- a/crates/ide/src/prime_caches/topologic_sort.rs +++ b/crates/ide/src/prime_caches/topologic_sort.rs @@ -1,3 +1,4 @@ +//! helper data structure to schedule work for parallel prime caches. use std::{collections::VecDeque, hash::Hash}; use rustc_hash::FxHashMap; diff --git a/crates/rust-analyzer/src/config.rs b/crates/rust-analyzer/src/config.rs index 1df19ffe78..76b7270797 100644 --- a/crates/rust-analyzer/src/config.rs +++ b/crates/rust-analyzer/src/config.rs @@ -298,6 +298,9 @@ config_data! { /// Whether to show `can't find Cargo.toml` error message. notifications_cargoTomlNotFound: bool = "true", + /// How many worker threads to to handle priming caches. The default `0` means to pick automatically. + primeCaches_numThreads: ParallelPrimeCachesNumThreads = "0", + /// Enable support for procedural macros, implies `#rust-analyzer.cargo.runBuildScripts#`. procMacro_enable: bool = "true", /// Internal config, path to proc-macro server executable (typically, @@ -1016,6 +1019,13 @@ impl Config { yield_points: self.data.highlightRelated_yieldPoints, } } + + pub fn prime_caches_num_threads(&self) -> u8 { + match self.data.primeCaches_numThreads { + 0 => num_cpus::get_physical().try_into().unwrap_or(u8::MAX), + n => n, + } + } } #[derive(Deserialize, Debug, Clone, Copy)] @@ -1130,6 +1140,8 @@ enum WorkspaceSymbolSearchKindDef { AllSymbols, } +type ParallelPrimeCachesNumThreads = u8; + macro_rules! _config_data { (struct $name:ident { $( @@ -1351,6 +1363,11 @@ fn field_props(field: &str, ty: &str, doc: &[&str], default: &str) -> serde_json "Search for all symbols kinds" ], }, + "ParallelPrimeCachesNumThreads" => set! { + "type": "number", + "minimum": 0, + "maximum": 255 + }, _ => panic!("{}: {}", ty, default), } diff --git a/crates/rust-analyzer/src/main_loop.rs b/crates/rust-analyzer/src/main_loop.rs index 3d17ee4513..45d7c3e5ed 100644 --- a/crates/rust-analyzer/src/main_loop.rs +++ b/crates/rust-analyzer/src/main_loop.rs @@ -505,17 +505,16 @@ impl GlobalState { self.fetch_build_data(); } if self.prime_caches_queue.should_start_op() { + let num_worker_threads = self.config.prime_caches_num_threads(); + self.task_pool.handle.spawn_with_sender({ let analysis = self.snapshot().analysis; move |sender| { sender.send(Task::PrimeCaches(PrimeCachesProgress::Begin)).unwrap(); - let res = analysis.parallel_prime_caches( - num_cpus::get_physical().try_into().unwrap_or(u8::MAX), - |progress| { - let report = PrimeCachesProgress::Report(progress); - sender.send(Task::PrimeCaches(report)).unwrap(); - }, - ); + let res = analysis.parallel_prime_caches(num_worker_threads, |progress| { + let report = PrimeCachesProgress::Report(progress); + sender.send(Task::PrimeCaches(report)).unwrap(); + }); sender .send(Task::PrimeCaches(PrimeCachesProgress::End { cancelled: res.is_err(), diff --git a/docs/user/generated_config.adoc b/docs/user/generated_config.adoc index f7a533c7c2..b10b0d3552 100644 --- a/docs/user/generated_config.adoc +++ b/docs/user/generated_config.adoc @@ -454,6 +454,11 @@ Number of syntax trees rust-analyzer keeps in memory. Defaults to 128. -- Whether to show `can't find Cargo.toml` error message. -- +[[rust-analyzer.primeCaches.numThreads]]rust-analyzer.primeCaches.numThreads (default: `0`):: ++ +-- +How many worker threads to to handle priming caches. The default `0` means to pick automatically. +-- [[rust-analyzer.procMacro.enable]]rust-analyzer.procMacro.enable (default: `true`):: + -- diff --git a/editors/code/package.json b/editors/code/package.json index 2c7d6c3773..21a59745bf 100644 --- a/editors/code/package.json +++ b/editors/code/package.json @@ -880,6 +880,13 @@ "default": true, "type": "boolean" }, + "rust-analyzer.primeCaches.numThreads": { + "markdownDescription": "How many worker threads to to handle priming caches. The default `0` means to pick automatically.", + "default": 0, + "type": "number", + "minimum": 0, + "maximum": 255 + }, "rust-analyzer.procMacro.enable": { "markdownDescription": "Enable support for procedural macros, implies `#rust-analyzer.cargo.runBuildScripts#`.", "default": true,