Base building blocks of grouped process functionality

This commit is contained in:
ClementTsang 2020-01-07 23:39:52 -05:00
parent fbadc7d9c5
commit 5a32404ed4
7 changed files with 124 additions and 27 deletions

1
.gitignore vendored
View file

@ -10,3 +10,4 @@ Cargo.lock
**/*.rs.bk **/*.rs.bk
*.log *.log
arch

View file

@ -55,6 +55,7 @@ pub struct App {
pub use_current_cpu_total: bool, pub use_current_cpu_total: bool,
last_key_press: Instant, last_key_press: Instant,
pub canvas_data: canvas::CanvasData, pub canvas_data: canvas::CanvasData,
enable_grouping: bool,
} }
impl App { impl App {
@ -92,6 +93,7 @@ impl App {
use_current_cpu_total, use_current_cpu_total,
last_key_press: Instant::now(), last_key_press: Instant::now(),
canvas_data: canvas::CanvasData::default(), canvas_data: canvas::CanvasData::default(),
enable_grouping: false,
} }
} }
@ -112,6 +114,22 @@ impl App {
self.show_help || self.show_dd self.show_help || self.show_dd
} }
pub fn toggle_grouping(&mut self) {
// Disallow usage whilst in a dialog and only in processes
if !self.is_in_dialog() {
if let ApplicationPosition::Process = self.current_application_position {
self.enable_grouping = !(self.enable_grouping);
}
}
// TODO: Note that we have to handle this in a way such that it will only update
// with the correct formatted vectors... that is, only update the canvas after...?
}
pub fn is_grouped(&self) -> bool {
self.enable_grouping
}
/// One of two functions allowed to run while in a dialog... /// One of two functions allowed to run while in a dialog...
pub fn on_enter(&mut self) { pub fn on_enter(&mut self) {
if self.show_dd { if self.show_dd {
@ -191,15 +209,18 @@ impl App {
self.currently_selected_process_position = 0; self.currently_selected_process_position = 0;
} }
'p' => { 'p' => {
match self.process_sorting_type { // Disable if grouping
processes::ProcessSorting::PID => self.process_sorting_reverse = !self.process_sorting_reverse, if !self.enable_grouping {
_ => { match self.process_sorting_type {
self.process_sorting_type = processes::ProcessSorting::PID; processes::ProcessSorting::PID => self.process_sorting_reverse = !self.process_sorting_reverse,
self.process_sorting_reverse = false; _ => {
self.process_sorting_type = processes::ProcessSorting::PID;
self.process_sorting_reverse = false;
}
} }
self.to_be_resorted = true;
self.currently_selected_process_position = 0;
} }
self.to_be_resorted = true;
self.currently_selected_process_position = 0;
} }
'n' => { 'n' => {
match self.process_sorting_type { match self.process_sorting_type {
@ -227,7 +248,9 @@ impl App {
pub fn kill_highlighted_process(&mut self) -> Result<()> { pub fn kill_highlighted_process(&mut self) -> Result<()> {
// Technically unnecessary but this is a good check... // Technically unnecessary but this is a good check...
if let ApplicationPosition::Process = self.current_application_position { if let ApplicationPosition::Process = self.current_application_position {
if let Some(current_selected_process) = &(self.to_delete_process) { if self.enable_grouping {
// TODO: Enable grouping pid deletion
} else if let Some(current_selected_process) = &(self.to_delete_process) {
process_killer::kill_process_given_pid(current_selected_process.pid)?; process_killer::kill_process_given_pid(current_selected_process.pid)?;
} }
self.to_delete_process = None; self.to_delete_process = None;

View file

@ -33,7 +33,7 @@ pub struct Data {
pub list_of_temperature_sensor: Vec<temperature::TempData>, pub list_of_temperature_sensor: Vec<temperature::TempData>,
pub network: Vec<network::NetworkData>, pub network: Vec<network::NetworkData>,
pub list_of_processes: Vec<processes::ProcessData>, // Only need to keep a list of processes... pub list_of_processes: Vec<processes::ProcessData>, // Only need to keep a list of processes...
pub list_of_disks: Vec<disks::DiskData>, // Only need to keep a list of disks and their data pub list_of_disks: Vec<disks::DiskData>, // Only need to keep a list of disks and their data
} }
pub struct DataState { pub struct DataState {
@ -105,7 +105,10 @@ impl DataState {
.await, .await,
&mut self.data.network, &mut self.data.network,
); );
push_if_valid(&cpu::get_cpu_data_list(&self.sys), &mut self.data.list_of_cpu_packages); push_if_valid(
&cpu::get_cpu_data_list(&self.sys),
&mut self.data.list_of_cpu_packages,
);
push_if_valid(&mem::get_mem_data_list().await, &mut self.data.memory); push_if_valid(&mem::get_mem_data_list().await, &mut self.data.memory);
push_if_valid(&mem::get_swap_data_list().await, &mut self.data.swap); push_if_valid(&mem::get_swap_data_list().await, &mut self.data.swap);
@ -120,8 +123,14 @@ impl DataState {
&mut self.data.list_of_processes, &mut self.data.list_of_processes,
); );
set_if_valid(&disks::get_disk_usage_list().await, &mut self.data.list_of_disks); set_if_valid(
push_if_valid(&disks::get_io_usage_list(false).await, &mut self.data.list_of_io); &disks::get_disk_usage_list().await,
&mut self.data.list_of_disks,
);
push_if_valid(
&disks::get_io_usage_list(false).await,
&mut self.data.list_of_io,
);
set_if_valid( set_if_valid(
&temperature::get_temperature_data(&self.sys, &self.temperature_type).await, &temperature::get_temperature_data(&self.sys, &self.temperature_type).await,
&mut self.data.list_of_temperature_sensor, &mut self.data.list_of_temperature_sensor,
@ -139,7 +148,9 @@ impl DataState {
let stale_list: Vec<_> = self let stale_list: Vec<_> = self
.prev_pid_stats .prev_pid_stats
.iter() .iter()
.filter(|&(_, &v)| current_instant.duration_since(v.1).as_secs() > self.stale_max_seconds) .filter(|&(_, &v)| {
current_instant.duration_since(v.1).as_secs() > self.stale_max_seconds
})
.map(|(k, _)| k.clone()) .map(|(k, _)| k.clone())
.collect(); .collect();
for stale in stale_list { for stale in stale_list {
@ -151,7 +162,10 @@ impl DataState {
.list_of_cpu_packages .list_of_cpu_packages
.iter() .iter()
.cloned() .cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds) .filter(|entry| {
current_instant.duration_since(entry.instant).as_secs()
<= self.stale_max_seconds
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
self.data.memory = self self.data.memory = self
@ -159,7 +173,10 @@ impl DataState {
.memory .memory
.iter() .iter()
.cloned() .cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds) .filter(|entry| {
current_instant.duration_since(entry.instant).as_secs()
<= self.stale_max_seconds
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
self.data.swap = self self.data.swap = self
@ -167,7 +184,10 @@ impl DataState {
.swap .swap
.iter() .iter()
.cloned() .cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds) .filter(|entry| {
current_instant.duration_since(entry.instant).as_secs()
<= self.stale_max_seconds
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
self.data.network = self self.data.network = self
@ -175,7 +195,10 @@ impl DataState {
.network .network
.iter() .iter()
.cloned() .cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds) .filter(|entry| {
current_instant.duration_since(entry.instant).as_secs()
<= self.stale_max_seconds
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
self.data.list_of_io = self self.data.list_of_io = self
@ -183,7 +206,10 @@ impl DataState {
.list_of_io .list_of_io
.iter() .iter()
.cloned() .cloned()
.filter(|entry| current_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds) .filter(|entry| {
current_instant.duration_since(entry.instant).as_secs()
<= self.stale_max_seconds
})
.collect::<Vec<_>>(); .collect::<Vec<_>>();
self.last_clean = current_instant; self.last_clean = current_instant;

View file

@ -25,6 +25,7 @@ pub struct ProcessData {
pub mem_usage_percent: Option<f64>, pub mem_usage_percent: Option<f64>,
pub mem_usage_kb: Option<u64>, pub mem_usage_kb: Option<u64>,
pub command: String, pub command: String,
pub pid_vec: Option<Vec<u32>>, // Note that this is literally never unless we are in grouping mode. This is to save rewriting time.
} }
fn cpu_usage_calculation(prev_idle: &mut f64, prev_non_idle: &mut f64) -> error::Result<(f64, f64)> { fn cpu_usage_calculation(prev_idle: &mut f64, prev_non_idle: &mut f64) -> error::Result<(f64, f64)> {
@ -174,6 +175,7 @@ fn convert_ps(
mem_usage_percent: None, mem_usage_percent: None,
mem_usage_kb: None, mem_usage_kb: None,
cpu_usage_percent: 0_f64, cpu_usage_percent: 0_f64,
pid_vec: None,
}); });
} }
@ -187,6 +189,7 @@ fn convert_ps(
mem_usage_percent, mem_usage_percent,
mem_usage_kb: None, mem_usage_kb: None,
cpu_usage_percent: linux_cpu_usage(pid, cpu_usage, cpu_percentage, prev_pid_stats, use_current_cpu_total)?, cpu_usage_percent: linux_cpu_usage(pid, cpu_usage, cpu_percentage, prev_pid_stats, use_current_cpu_total)?,
pid_vec: None,
}) })
} }
@ -248,6 +251,7 @@ pub fn get_sorted_processes_list(
mem_usage_percent: None, mem_usage_percent: None,
mem_usage_kb: Some(process_val.memory()), mem_usage_kb: Some(process_val.memory()),
cpu_usage_percent: f64::from(process_val.cpu_usage()), cpu_usage_percent: f64::from(process_val.cpu_usage()),
pid_vec: None,
}); });
} }
} }
@ -256,18 +260,17 @@ pub fn get_sorted_processes_list(
} }
pub fn sort_processes(process_vector: &mut Vec<ProcessData>, sorting_method: &ProcessSorting, reverse_order: bool) { pub fn sort_processes(process_vector: &mut Vec<ProcessData>, sorting_method: &ProcessSorting, reverse_order: bool) {
// Always sort alphabetically first!
process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, false));
match sorting_method { match sorting_method {
// Always sort alphabetically first!
ProcessSorting::CPU => { ProcessSorting::CPU => {
process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, false));
process_vector.sort_by(|a, b| get_ordering(a.cpu_usage_percent, b.cpu_usage_percent, reverse_order)); process_vector.sort_by(|a, b| get_ordering(a.cpu_usage_percent, b.cpu_usage_percent, reverse_order));
} }
ProcessSorting::MEM => { ProcessSorting::MEM => {
process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, false));
process_vector.sort_by(|a, b| get_ordering(a.mem_usage_percent, b.mem_usage_percent, reverse_order)); process_vector.sort_by(|a, b| get_ordering(a.mem_usage_percent, b.mem_usage_percent, reverse_order));
} }
ProcessSorting::PID => { ProcessSorting::PID => {
process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, false));
process_vector.sort_by(|a, b| get_ordering(a.pid, b.pid, reverse_order)); process_vector.sort_by(|a, b| get_ordering(a.pid, b.pid, reverse_order));
} }
ProcessSorting::NAME => process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, reverse_order)), ProcessSorting::NAME => process_vector.sort_by(|a, b| get_ordering(&a.command, &b.command, reverse_order)),

View file

@ -648,7 +648,11 @@ fn draw_processes_table<B: backend::Backend>(f: &mut Frame<B>, app_state: &mut a
let process_rows = sliced_vec.iter().map(|process| { let process_rows = sliced_vec.iter().map(|process| {
let stringified_process_vec: Vec<String> = vec![ let stringified_process_vec: Vec<String> = vec![
process.pid.to_string(), if app_state.is_grouped() {
process.group_count.to_string()
} else {
process.pid.to_string()
},
process.name.clone(), process.name.clone(),
process.cpu_usage.clone(), process.cpu_usage.clone(),
process.mem_usage.clone(), process.mem_usage.clone(),
@ -674,7 +678,7 @@ fn draw_processes_table<B: backend::Backend>(f: &mut Frame<B>, app_state: &mut a
{ {
use app::data_collection::processes::ProcessSorting; use app::data_collection::processes::ProcessSorting;
let mut pid = "PID(p)".to_string(); let mut pid_or_name = if app_state.is_grouped() { "Count" } else { "PID(p)" }.to_string();
let mut name = "Name(n)".to_string(); let mut name = "Name(n)".to_string();
let mut cpu = "CPU%(c)".to_string(); let mut cpu = "CPU%(c)".to_string();
let mut mem = "Mem%(m)".to_string(); let mut mem = "Mem%(m)".to_string();
@ -688,11 +692,11 @@ fn draw_processes_table<B: backend::Backend>(f: &mut Frame<B>, app_state: &mut a
match app_state.process_sorting_type { match app_state.process_sorting_type {
ProcessSorting::CPU => cpu += &direction_val, ProcessSorting::CPU => cpu += &direction_val,
ProcessSorting::MEM => mem += &direction_val, ProcessSorting::MEM => mem += &direction_val,
ProcessSorting::PID => pid += &direction_val, ProcessSorting::PID => pid_or_name += &direction_val,
ProcessSorting::NAME => name += &direction_val, ProcessSorting::NAME => name += &direction_val,
}; };
Table::new([pid, name, cpu, mem].iter(), process_rows) Table::new([pid_or_name, name, cpu, mem].iter(), process_rows)
.block( .block(
Block::default() Block::default()
.title("Processes") .title("Processes")

View file

@ -21,6 +21,7 @@ pub struct ConvertedProcessData {
pub name: String, pub name: String,
pub cpu_usage: String, pub cpu_usage: String,
pub mem_usage: String, pub mem_usage: String,
pub group_count: u32,
} }
#[derive(Clone, Default, Debug)] #[derive(Clone, Default, Debug)]
@ -141,6 +142,7 @@ pub fn update_process_row(app_data: &data_collection::Data) -> Vec<ConvertedProc
0_f64 0_f64
} }
), ),
group_count: if let Some(pid_vec) = &process.pid_vec { pid_vec.len() as u32 } else { 0 },
}); });
} }

View file

@ -33,8 +33,10 @@ mod constants;
mod data_conversion; mod data_conversion;
use app::data_collection; use app::data_collection;
use app::data_collection::processes::ProcessData;
use constants::TICK_RATE_IN_MILLISECONDS; use constants::TICK_RATE_IN_MILLISECONDS;
use data_conversion::*; use data_conversion::*;
use std::collections::BTreeMap;
use utils::error::{self, BottomError}; use utils::error::{self, BottomError};
enum Event<I, J> { enum Event<I, J> {
@ -213,7 +215,7 @@ fn main() -> error::Result<()> {
KeyCode::Char(uncaught_char) => app.on_char_key(uncaught_char), KeyCode::Char(uncaught_char) => app.on_char_key(uncaught_char),
KeyCode::Esc => app.reset(), KeyCode::Esc => app.reset(),
KeyCode::Enter => app.on_enter(), KeyCode::Enter => app.on_enter(),
KeyCode::Tab => {} KeyCode::Tab => app.toggle_grouping(),
_ => {} _ => {}
} }
} else { } else {
@ -274,6 +276,42 @@ fn main() -> error::Result<()> {
if !app.is_frozen { if !app.is_frozen {
app.data = *data; app.data = *data;
if app.is_grouped() {
// Handle combining multi-pid processes to form one entry in table.
// This was done this way to save time and avoid code
// duplication... sorry future me. Really.
// First, convert this all into a BTreeMap. The key is by name. This
// pulls double duty by allowing us to combine entries AND it sorts!
// Fields for tuple: CPU%, MEM%, PID_VEC
let mut process_map: BTreeMap<String, (f64, f64, Vec<u32>)> = BTreeMap::new();
for process in &app.data.list_of_processes {
if let Some(mem_usage) = process.mem_usage_percent {
let entry_val = process_map.entry(process.command.clone()).or_insert((0.0, 0.0, vec![]));
entry_val.0 += process.cpu_usage_percent;
entry_val.1 += mem_usage;
entry_val.2.push(process.pid);
}
}
// Now... turn this back into the exact same vector... but now with merged processes!
app.data.list_of_processes = process_map
.iter()
.map(|(name, data)| {
ProcessData {
pid: 0, // Irrelevant
cpu_usage_percent: data.0,
mem_usage_percent: Some(data.1),
mem_usage_kb: None,
command: name.clone(),
pid_vec: Some(data.2.clone()),
}
})
.collect::<Vec<_>>();
}
data_collection::processes::sort_processes( data_collection::processes::sort_processes(
&mut app.data.list_of_processes, &mut app.data.list_of_processes,
&app.process_sorting_type, &app.process_sorting_type,