Reduce maximum LRU size to 2^16 entries, reducing memory footprint of LRU entries

This commit is contained in:
Lukas Wirth 2024-07-19 17:47:38 +02:00
parent 4691ca97f1
commit 8e3133f118
9 changed files with 50 additions and 49 deletions

View file

@ -42,9 +42,9 @@ pub trait Upcast<T: ?Sized> {
fn upcast(&self) -> &T;
}
pub const DEFAULT_FILE_TEXT_LRU_CAP: usize = 16;
pub const DEFAULT_PARSE_LRU_CAP: usize = 128;
pub const DEFAULT_BORROWCK_LRU_CAP: usize = 2024;
pub const DEFAULT_FILE_TEXT_LRU_CAP: u16 = 16;
pub const DEFAULT_PARSE_LRU_CAP: u16 = 128;
pub const DEFAULT_BORROWCK_LRU_CAP: u16 = 2024;
pub trait FileLoader {
/// Text of the file.

View file

@ -145,7 +145,7 @@ impl Default for RootDatabase {
}
impl RootDatabase {
pub fn new(lru_capacity: Option<usize>) -> RootDatabase {
pub fn new(lru_capacity: Option<u16>) -> RootDatabase {
let mut db = RootDatabase { storage: ManuallyDrop::new(salsa::Storage::default()) };
db.set_crate_graph_with_durability(Default::default(), Durability::HIGH);
db.set_proc_macros_with_durability(Default::default(), Durability::HIGH);
@ -161,7 +161,7 @@ impl RootDatabase {
self.set_expand_proc_attr_macros_with_durability(true, Durability::HIGH);
}
pub fn update_base_query_lru_capacities(&mut self, lru_capacity: Option<usize>) {
pub fn update_base_query_lru_capacities(&mut self, lru_capacity: Option<u16>) {
let lru_capacity = lru_capacity.unwrap_or(base_db::DEFAULT_PARSE_LRU_CAP);
base_db::FileTextQuery.in_db_mut(self).set_lru_capacity(DEFAULT_FILE_TEXT_LRU_CAP);
base_db::ParseQuery.in_db_mut(self).set_lru_capacity(lru_capacity);
@ -170,7 +170,7 @@ impl RootDatabase {
hir::db::BorrowckQuery.in_db_mut(self).set_lru_capacity(base_db::DEFAULT_BORROWCK_LRU_CAP);
}
pub fn update_lru_capacities(&mut self, lru_capacities: &FxHashMap<Box<str>, usize>) {
pub fn update_lru_capacities(&mut self, lru_capacities: &FxHashMap<Box<str>, u16>) {
use hir::db as hir_db;
base_db::FileTextQuery.in_db_mut(self).set_lru_capacity(DEFAULT_FILE_TEXT_LRU_CAP);

View file

@ -161,7 +161,7 @@ pub struct AnalysisHost {
}
impl AnalysisHost {
pub fn new(lru_capacity: Option<usize>) -> AnalysisHost {
pub fn new(lru_capacity: Option<u16>) -> AnalysisHost {
AnalysisHost { db: RootDatabase::new(lru_capacity) }
}
@ -169,11 +169,11 @@ impl AnalysisHost {
AnalysisHost { db }
}
pub fn update_lru_capacity(&mut self, lru_capacity: Option<usize>) {
pub fn update_lru_capacity(&mut self, lru_capacity: Option<u16>) {
self.db.update_base_query_lru_capacities(lru_capacity);
}
pub fn update_lru_capacities(&mut self, lru_capacities: &FxHashMap<Box<str>, usize>) {
pub fn update_lru_capacities(&mut self, lru_capacities: &FxHashMap<Box<str>, u16>) {
self.db.update_lru_capacities(lru_capacities);
}

View file

@ -379,7 +379,7 @@ fn load_crate_graph(
) -> RootDatabase {
let ProjectWorkspace { toolchain, target_layout, .. } = ws;
let lru_cap = std::env::var("RA_LRU_CAP").ok().and_then(|it| it.parse::<usize>().ok());
let lru_cap = std::env::var("RA_LRU_CAP").ok().and_then(|it| it.parse::<u16>().ok());
let mut db = RootDatabase::new(lru_cap);
let mut analysis_change = ChangeWithProcMacros::new();

View file

@ -283,9 +283,9 @@ config_data! {
linkedProjects: Vec<ManifestOrProjectJson> = vec![],
/// Number of syntax trees rust-analyzer keeps in memory. Defaults to 128.
lru_capacity: Option<usize> = None,
lru_capacity: Option<u16> = None,
/// Sets the LRU capacity of the specified queries.
lru_query_capacities: FxHashMap<Box<str>, usize> = FxHashMap::default(),
lru_query_capacities: FxHashMap<Box<str>, u16> = FxHashMap::default(),
/// These proc-macros will be ignored when trying to expand them.
///
@ -1606,11 +1606,11 @@ impl Config {
extra_env
}
pub fn lru_parse_query_capacity(&self) -> Option<usize> {
pub fn lru_parse_query_capacity(&self) -> Option<u16> {
self.lru_capacity().to_owned()
}
pub fn lru_query_capacities_config(&self) -> Option<&FxHashMap<Box<str>, usize>> {
pub fn lru_query_capacities_config(&self) -> Option<&FxHashMap<Box<str>, u16>> {
self.lru_query_capacities().is_empty().not().then(|| self.lru_query_capacities())
}

View file

@ -203,7 +203,7 @@ where
Q: QueryFunction,
MP: MemoizationPolicy<Q>,
{
fn set_lru_capacity(&self, new_capacity: usize) {
fn set_lru_capacity(&self, new_capacity: u16) {
self.lru_list.set_lru_capacity(new_capacity);
}
}

View file

@ -577,7 +577,7 @@ where
/// cost of potential extra recalculations of evicted values.
///
/// If `cap` is zero, all values are preserved, this is the default.
pub fn set_lru_capacity(&self, cap: usize)
pub fn set_lru_capacity(&self, cap: u16)
where
Q::Storage: plumbing::LruQueryStorageOps,
{

View file

@ -1,7 +1,7 @@
use oorandom::Rand64;
use parking_lot::Mutex;
use std::fmt::Debug;
use std::sync::atomic::AtomicUsize;
use std::sync::atomic::AtomicU16;
use std::sync::atomic::Ordering;
use triomphe::Arc;
@ -20,15 +20,15 @@ pub(crate) struct Lru<Node>
where
Node: LruNode,
{
green_zone: AtomicUsize,
green_zone: AtomicU16,
data: Mutex<LruData<Node>>,
}
#[derive(Debug)]
struct LruData<Node> {
end_red_zone: usize,
end_yellow_zone: usize,
end_green_zone: usize,
end_red_zone: u16,
end_yellow_zone: u16,
end_green_zone: u16,
rng: Rand64,
entries: Vec<Arc<Node>>,
}
@ -39,9 +39,9 @@ pub(crate) trait LruNode: Sized + Debug {
#[derive(Debug)]
pub(crate) struct LruIndex {
/// Index in the appropriate LRU list, or std::usize::MAX if not a
/// Index in the appropriate LRU list, or std::u16::MAX if not a
/// member.
index: AtomicUsize,
index: AtomicU16,
}
impl<Node> Default for Lru<Node>
@ -68,12 +68,12 @@ where
#[cfg_attr(not(test), allow(dead_code))]
fn with_seed(seed: &str) -> Self {
Lru { green_zone: AtomicUsize::new(0), data: Mutex::new(LruData::with_seed(seed)) }
Lru { green_zone: AtomicU16::new(0), data: Mutex::new(LruData::with_seed(seed)) }
}
/// Adjust the total number of nodes permitted to have a value at
/// once. If `len` is zero, this disables LRU caching completely.
pub(crate) fn set_lru_capacity(&self, len: usize) {
pub(crate) fn set_lru_capacity(&self, len: u16) {
let mut data = self.data.lock();
// We require each zone to have at least 1 slot. Therefore,
@ -143,23 +143,24 @@ where
LruData { end_yellow_zone: 0, end_green_zone: 0, end_red_zone: 0, entries: Vec::new(), rng }
}
fn green_zone(&self) -> std::ops::Range<usize> {
fn green_zone(&self) -> std::ops::Range<u16> {
0..self.end_green_zone
}
fn yellow_zone(&self) -> std::ops::Range<usize> {
fn yellow_zone(&self) -> std::ops::Range<u16> {
self.end_green_zone..self.end_yellow_zone
}
fn red_zone(&self) -> std::ops::Range<usize> {
fn red_zone(&self) -> std::ops::Range<u16> {
self.end_yellow_zone..self.end_red_zone
}
fn resize(&mut self, len_green_zone: usize, len_yellow_zone: usize, len_red_zone: usize) {
fn resize(&mut self, len_green_zone: u16, len_yellow_zone: u16, len_red_zone: u16) {
self.end_green_zone = len_green_zone;
self.end_yellow_zone = self.end_green_zone + len_yellow_zone;
self.end_red_zone = self.end_yellow_zone + len_red_zone;
let entries = std::mem::replace(&mut self.entries, Vec::with_capacity(self.end_red_zone));
let entries =
std::mem::replace(&mut self.entries, Vec::with_capacity(self.end_red_zone as usize));
tracing::debug!("green_zone = {:?}", self.green_zone());
tracing::debug!("yellow_zone = {:?}", self.yellow_zone());
@ -207,7 +208,7 @@ where
// Easy case: we still have capacity. Push it, and then promote
// it up to the appropriate zone.
let len = self.entries.len();
let len = self.entries.len() as u16;
if len < self.end_red_zone {
self.entries.push(node.clone());
node.lru_index().store(len);
@ -218,7 +219,7 @@ where
// Harder case: no capacity. Create some by evicting somebody from red
// zone and then promoting.
let victim_index = self.pick_index(self.red_zone());
let victim_node = std::mem::replace(&mut self.entries[victim_index], node.clone());
let victim_node = std::mem::replace(&mut self.entries[victim_index as usize], node.clone());
tracing::debug!("evicting red node {:?} from {}", victim_node, victim_index);
victim_node.lru_index().clear();
self.promote_red_to_green(node, victim_index);
@ -231,7 +232,7 @@ where
///
/// NB: It is not required that `node.lru_index()` is up-to-date
/// when entering this method.
fn promote_red_to_green(&mut self, node: &Arc<Node>, red_index: usize) {
fn promote_red_to_green(&mut self, node: &Arc<Node>, red_index: u16) {
debug_assert!(self.red_zone().contains(&red_index));
// Pick a yellow at random and switch places with it.
@ -242,12 +243,12 @@ where
let yellow_index = self.pick_index(self.yellow_zone());
tracing::debug!(
"demoting yellow node {:?} from {} to red at {}",
self.entries[yellow_index],
self.entries[yellow_index as usize],
yellow_index,
red_index,
);
self.entries.swap(yellow_index, red_index);
self.entries[red_index].lru_index().store(red_index);
self.entries.swap(yellow_index as usize, red_index as usize);
self.entries[red_index as usize].lru_index().store(red_index);
// Now move ourselves up into the green zone.
self.promote_yellow_to_green(node, yellow_index);
@ -259,51 +260,51 @@ where
///
/// NB: It is not required that `node.lru_index()` is up-to-date
/// when entering this method.
fn promote_yellow_to_green(&mut self, node: &Arc<Node>, yellow_index: usize) {
fn promote_yellow_to_green(&mut self, node: &Arc<Node>, yellow_index: u16) {
debug_assert!(self.yellow_zone().contains(&yellow_index));
// Pick a yellow at random and switch places with it.
let green_index = self.pick_index(self.green_zone());
tracing::debug!(
"demoting green node {:?} from {} to yellow at {}",
self.entries[green_index],
self.entries[green_index as usize],
green_index,
yellow_index
);
self.entries.swap(green_index, yellow_index);
self.entries[yellow_index].lru_index().store(yellow_index);
self.entries.swap(green_index as usize, yellow_index as usize);
self.entries[yellow_index as usize].lru_index().store(yellow_index);
node.lru_index().store(green_index);
tracing::debug!("promoted {:?} to green index {}", node, green_index);
}
fn pick_index(&mut self, zone: std::ops::Range<usize>) -> usize {
let end_index = std::cmp::min(zone.end, self.entries.len());
self.rng.rand_range(zone.start as u64..end_index as u64) as usize
fn pick_index(&mut self, zone: std::ops::Range<u16>) -> u16 {
let end_index = std::cmp::min(zone.end, self.entries.len() as u16);
self.rng.rand_range(zone.start as u64..end_index as u64) as u16
}
}
impl Default for LruIndex {
fn default() -> Self {
Self { index: AtomicUsize::new(usize::MAX) }
Self { index: AtomicU16::new(u16::MAX) }
}
}
impl LruIndex {
fn load(&self) -> usize {
fn load(&self) -> u16 {
self.index.load(Ordering::Acquire) // see note on ordering below
}
fn store(&self, value: usize) {
fn store(&self, value: u16) {
self.index.store(value, Ordering::Release) // see note on ordering below
}
fn clear(&self) {
self.store(usize::MAX);
self.store(u16::MAX);
}
fn is_in_lru(&self) -> bool {
self.load() != usize::MAX
self.load() != u16::MAX
}
}

View file

@ -228,7 +228,7 @@ where
/// that is, storage whose value is not derived from other storage but
/// is set independently.
pub trait LruQueryStorageOps {
fn set_lru_capacity(&self, new_capacity: usize);
fn set_lru_capacity(&self, new_capacity: u16);
}
pub trait DerivedQueryStorageOps<Q>