2
0
Fork 0
mirror of https://github.com/ClementTsang/bottom synced 2025-02-15 12:48:28 +00:00

Traded some mem + cpu for more smoothness while toggling.

This commit is contained in:
ClementTsang 2020-01-08 00:28:11 -05:00
parent 0f2b4a7ba5
commit 6cf8f0d98f
4 changed files with 123 additions and 60 deletions

View file

@ -32,7 +32,8 @@ pub struct Data {
pub swap: Vec<mem::MemData>,
pub list_of_temperature_sensor: Vec<temperature::TempData>,
pub network: Vec<network::NetworkData>,
pub list_of_processes: Vec<processes::ProcessData>, // Only need to keep a list of processes...
pub list_of_processes: Vec<processes::ProcessData>,
pub grouped_list_of_processes: Option<Vec<processes::ProcessData>>,
pub list_of_disks: Vec<disks::DiskData>, // Only need to keep a list of disks and their data
}

View file

@ -54,6 +54,7 @@ pub struct CanvasData {
pub disk_data: Vec<Vec<String>>,
pub temp_sensor_data: Vec<Vec<String>>,
pub process_data: Vec<ConvertedProcessData>,
pub grouped_process_data: Vec<ConvertedProcessData>,
pub memory_labels: Vec<(u64, u64)>,
pub mem_data: Vec<(f64, f64)>,
pub swap_data: Vec<(f64, f64)>,
@ -747,7 +748,11 @@ fn draw_disk_table<B: backend::Backend>(
fn draw_processes_table<B: backend::Backend>(
f: &mut Frame<B>, app_state: &mut app::App, draw_loc: Rect,
) {
let process_data: &[ConvertedProcessData] = &(app_state.canvas_data.process_data);
let process_data: &[ConvertedProcessData] = if app_state.is_grouped() {
&app_state.canvas_data.grouped_process_data
} else {
&app_state.canvas_data.process_data
};
// Admittedly this is kinda a hack... but we need to:
// * Scroll

View file

@ -136,11 +136,13 @@ pub fn update_disk_row(app_data: &data_collection::Data) -> Vec<Vec<String>> {
disk_vector
}
pub fn update_process_row(app_data: &data_collection::Data) -> Vec<ConvertedProcessData> {
let mut process_vector: Vec<ConvertedProcessData> = Vec::new();
for process in &app_data.list_of_processes {
process_vector.push(ConvertedProcessData {
pub fn update_process_row(
app_data: &data_collection::Data,
) -> (Vec<ConvertedProcessData>, Vec<ConvertedProcessData>) {
let process_vector: Vec<ConvertedProcessData> = app_data
.list_of_processes
.iter()
.map(|process| ConvertedProcessData {
pid: process.pid,
name: process.command.to_string(),
cpu_usage: format!("{:.1}%", process.cpu_usage_percent),
@ -163,10 +165,41 @@ pub fn update_process_row(app_data: &data_collection::Data) -> Vec<ConvertedProc
} else {
0
},
});
})
.collect::<Vec<_>>();
let mut grouped_process_vector: Vec<ConvertedProcessData> = Vec::new();
if let Some(grouped_list_of_processes) = &app_data.grouped_list_of_processes {
grouped_process_vector = grouped_list_of_processes
.iter()
.map(|process| ConvertedProcessData {
pid: process.pid,
name: process.command.to_string(),
cpu_usage: format!("{:.1}%", process.cpu_usage_percent),
mem_usage: format!(
"{:.1}%",
if let Some(mem_usage) = process.mem_usage_percent {
mem_usage
} else if let Some(mem_usage_kb) = process.mem_usage_kb {
if let Some(mem_data) = app_data.memory.last() {
(mem_usage_kb / 1000) as f64 / mem_data.mem_total_in_mb as f64 * 100_f64
} else {
0_f64
}
} else {
0_f64
}
),
group_count: if let Some(pid_vec) = &process.pid_vec {
pid_vec.len() as u32
} else {
0
},
})
.collect::<Vec<_>>();
}
process_vector
(process_vector, grouped_process_vector)
}
pub fn update_cpu_data_points(

View file

@ -260,12 +260,7 @@ fn main() -> error::Result<()> {
}
if app.to_be_resorted {
data_collection::processes::sort_processes(
&mut app.data.list_of_processes,
&app.process_sorting_type,
app.process_sorting_reverse,
);
app.canvas_data.process_data = update_process_row(&app.data);
handle_process_sorting(&mut app);
app.to_be_resorted = false;
}
}
@ -280,50 +275,7 @@ fn main() -> error::Result<()> {
if !app.is_frozen {
app.data = *data;
if app.is_grouped() {
// Handle combining multi-pid processes to form one entry in table.
// This was done this way to save time and avoid code
// duplication... sorry future me. Really.
// First, convert this all into a BTreeMap. The key is by name. This
// pulls double duty by allowing us to combine entries AND it sorts!
// Fields for tuple: CPU%, MEM%, PID_VEC
let mut process_map: BTreeMap<String, (f64, f64, Vec<u32>)> =
BTreeMap::new();
for process in &app.data.list_of_processes {
if let Some(mem_usage) = process.mem_usage_percent {
let entry_val = process_map
.entry(process.command.clone())
.or_insert((0.0, 0.0, vec![]));
entry_val.0 += process.cpu_usage_percent;
entry_val.1 += mem_usage;
entry_val.2.push(process.pid);
}
}
// Now... turn this back into the exact same vector... but now with merged processes!
app.data.list_of_processes = process_map
.iter()
.map(|(name, data)| {
ProcessData {
pid: 0, // Irrelevant
cpu_usage_percent: data.0,
mem_usage_percent: Some(data.1),
mem_usage_kb: None,
command: name.clone(),
pid_vec: Some(data.2.clone()),
}
})
.collect::<Vec<_>>();
}
data_collection::processes::sort_processes(
&mut app.data.list_of_processes,
&app.process_sorting_type,
app.process_sorting_reverse,
);
handle_process_sorting(&mut app);
// Convert all data into tui components
let network_data = update_network_data_points(&app.data);
@ -336,12 +288,12 @@ fn main() -> error::Result<()> {
app.canvas_data.disk_data = update_disk_row(&app.data);
app.canvas_data.temp_sensor_data =
update_temp_row(&app.data, &app.temperature_type);
app.canvas_data.process_data = update_process_row(&app.data);
app.canvas_data.mem_data = update_mem_data_points(&app.data);
app.canvas_data.memory_labels = update_mem_data_values(&app.data);
app.canvas_data.swap_data = update_swap_data_points(&app.data);
app.canvas_data.cpu_data =
update_cpu_data_points(app.show_average_cpu, &app.data);
//debug!("Update event complete.");
}
}
@ -359,6 +311,78 @@ fn main() -> error::Result<()> {
Ok(())
}
type TempProcess = (f64, Option<f64>, Option<u64>, Vec<u32>);
fn handle_process_sorting(app: &mut app::App) {
// Handle combining multi-pid processes to form one entry in table.
// This was done this way to save time and avoid code
// duplication... sorry future me. Really.
// First, convert this all into a BTreeMap. The key is by name. This
// pulls double duty by allowing us to combine entries AND it sorts!
// Fields for tuple: CPU%, MEM%, MEM_KB, PID_VEC
let mut process_map: BTreeMap<String, TempProcess> = BTreeMap::new();
for process in &app.data.list_of_processes {
let entry_val =
process_map
.entry(process.command.clone())
.or_insert((0.0, None, None, vec![]));
if let Some(mem_usage) = process.mem_usage_percent {
entry_val.0 += process.cpu_usage_percent;
if let Some(m) = &mut entry_val.1 {
*m += mem_usage;
}
entry_val.3.push(process.pid);
} else if let Some(mem_usage_kb) = process.mem_usage_kb {
entry_val.0 += process.cpu_usage_percent;
if let Some(m) = &mut entry_val.2 {
*m += mem_usage_kb;
}
entry_val.3.push(process.pid);
}
}
// Now... turn this back into the exact same vector... but now with merged processes!
app.data.grouped_list_of_processes = Some(
process_map
.iter()
.map(|(name, data)| {
ProcessData {
pid: 0, // Irrelevant
cpu_usage_percent: data.0,
mem_usage_percent: data.1,
mem_usage_kb: data.2,
command: name.clone(),
pid_vec: Some(data.3.clone()),
}
})
.collect::<Vec<_>>(),
);
if let Some(grouped_list_of_processes) = &mut app.data.grouped_list_of_processes {
data_collection::processes::sort_processes(
grouped_list_of_processes,
if let data_collection::processes::ProcessSorting::PID = &app.process_sorting_type {
&data_collection::processes::ProcessSorting::CPU // Go back to default, negate PID for group
} else {
&app.process_sorting_type
},
app.process_sorting_reverse,
);
}
data_collection::processes::sort_processes(
&mut app.data.list_of_processes,
&app.process_sorting_type,
app.process_sorting_reverse,
);
let tuple_results = update_process_row(&app.data);
app.canvas_data.process_data = tuple_results.0;
app.canvas_data.grouped_process_data = tuple_results.1;
}
fn cleanup(
terminal: &mut tui::terminal::Terminal<tui::backend::CrosstermBackend<std::io::Stdout>>,
) -> error::Result<()> {