mirror of
https://github.com/ClementTsang/bottom
synced 2024-11-10 14:44:18 +00:00
Merge pull request #7 from ClementTsang/optimization_and_refactoring_branch
Optimization and refactoring branch
This commit is contained in:
commit
35f78a7e91
20 changed files with 1291 additions and 1236 deletions
5
.gitignore
vendored
5
.gitignore
vendored
|
@ -10,4 +10,7 @@ Cargo.lock
|
|||
**/*.rs.bk
|
||||
|
||||
*.log
|
||||
.vscode
|
||||
.vscode
|
||||
rust-unmangle
|
||||
*.svg
|
||||
*.data
|
10
Cargo.toml
10
Cargo.toml
|
@ -24,17 +24,17 @@ chrono = "0.4.10"
|
|||
clap = "2.33.0"
|
||||
crossterm = "0.14"
|
||||
failure = "0.1.6"
|
||||
fern = "0.5"
|
||||
fern = "0.5.9"
|
||||
futures-timer = "2.0.2"
|
||||
futures = "0.3.1"
|
||||
heim = "0.0.9"
|
||||
log = "0.4"
|
||||
log = "0.4.8"
|
||||
regex = "1.3.3"
|
||||
sysinfo = "0.9" #0.9 seems to be the last working version for my Ryzen PC...
|
||||
sysinfo = "0.9.6" #0.9 seems to be the last working version for my Ryzen PC...
|
||||
tokio = "0.2.9"
|
||||
winapi = "0.3"
|
||||
winapi = "0.3.8"
|
||||
tui = {version = "0.8", features = ["crossterm"], default-features = false }
|
||||
lazy_static = "1.4"
|
||||
lazy_static = "1.4.0"
|
||||
|
||||
[dev-dependencies]
|
||||
assert_cmd = "0.12"
|
||||
|
|
|
@ -80,7 +80,9 @@ Run using `btm`.
|
|||
|
||||
- `-g`, `--group` will group together processes with the same name by default (equivalent to pressing `Tab`).
|
||||
|
||||
- `-s`, `--simple_search` will default the search to simple search rather than regex search.
|
||||
- `-i`, `--case_insensitive` will default to not matching case
|
||||
|
||||
when searching processes.
|
||||
|
||||
### Keybindings
|
||||
|
||||
|
@ -126,7 +128,7 @@ Run using `btm`.
|
|||
|
||||
- `Ctrl-p` or `Ctrl-n` to switch between searching for PID and name respectively.
|
||||
|
||||
- `Ctrl-s` to toggle between a simple search and a regex search.
|
||||
- `Tab` to toggle whether to ignore case.
|
||||
|
||||
- `Ctrl-a` and `Ctrl-e` to jump to the start and end of the search bar respectively.
|
||||
|
||||
|
|
342
src/app.rs
342
src/app.rs
|
@ -1,13 +1,16 @@
|
|||
pub mod data_collection;
|
||||
use data_collection::{processes, temperature};
|
||||
pub mod data_harvester;
|
||||
use data_harvester::{processes, temperature};
|
||||
use std::time::Instant;
|
||||
|
||||
use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::error::Result};
|
||||
pub mod data_farmer;
|
||||
use data_farmer::*;
|
||||
|
||||
use crate::{canvas, constants, utils::error::Result};
|
||||
|
||||
mod process_killer;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub enum ApplicationPosition {
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub enum WidgetPosition {
|
||||
Cpu,
|
||||
Mem,
|
||||
Disk,
|
||||
|
@ -30,6 +33,23 @@ lazy_static! {
|
|||
regex::Regex::new(".*");
|
||||
}
|
||||
|
||||
/// AppConfigFields is meant to cover basic fields that would normally be set
|
||||
/// by config files or launch options. Don't need to be mutable (set and forget).
|
||||
pub struct AppConfigFields {
|
||||
pub update_rate_in_milliseconds: u64,
|
||||
pub temperature_type: temperature::TemperatureType,
|
||||
pub use_dot: bool,
|
||||
}
|
||||
|
||||
/// AppScrollWidgetState deals with fields for a scrollable app's current state.
|
||||
pub struct AppScrollWidgetState {
|
||||
pub widget_scroll_position: i64,
|
||||
}
|
||||
|
||||
/// AppSearchState only deals with the search's state.
|
||||
pub struct AppSearchState {}
|
||||
|
||||
// TODO: [OPT] Group like fields together... this is kinda gross to step through
|
||||
pub struct App {
|
||||
// Sorting
|
||||
pub process_sorting_type: processes::ProcessSorting,
|
||||
|
@ -48,27 +68,28 @@ pub struct App {
|
|||
pub temperature_type: temperature::TemperatureType,
|
||||
pub update_rate_in_milliseconds: u64,
|
||||
pub show_average_cpu: bool,
|
||||
pub current_application_position: ApplicationPosition,
|
||||
pub data: data_collection::Data,
|
||||
pub current_widget_selected: WidgetPosition,
|
||||
pub data: data_harvester::Data,
|
||||
awaiting_second_char: bool,
|
||||
second_char: char,
|
||||
pub use_dot: bool,
|
||||
pub show_help: bool,
|
||||
pub show_dd: bool,
|
||||
pub dd_err: Option<String>,
|
||||
to_delete_process_list: Option<Vec<ConvertedProcessData>>,
|
||||
to_delete_process_list: Option<(String, Vec<u32>)>,
|
||||
pub is_frozen: bool,
|
||||
pub left_legend: bool,
|
||||
pub use_current_cpu_total: bool,
|
||||
last_key_press: Instant,
|
||||
pub canvas_data: canvas::CanvasData,
|
||||
pub canvas_data: canvas::DisplayableData,
|
||||
enable_grouping: bool,
|
||||
enable_searching: bool, // TODO: [OPT] group together?
|
||||
enable_searching: bool,
|
||||
current_search_query: String,
|
||||
searching_pid: bool,
|
||||
pub use_simple: bool,
|
||||
pub ignore_case: bool,
|
||||
current_regex: std::result::Result<regex::Regex, regex::Error>,
|
||||
current_cursor_position: usize,
|
||||
pub data_collection: DataCollection,
|
||||
}
|
||||
|
||||
impl App {
|
||||
|
@ -84,7 +105,7 @@ impl App {
|
|||
temperature_type,
|
||||
update_rate_in_milliseconds,
|
||||
show_average_cpu,
|
||||
current_application_position: ApplicationPosition::Process,
|
||||
current_widget_selected: WidgetPosition::Process,
|
||||
scroll_direction: ScrollDirection::DOWN,
|
||||
currently_selected_process_position: 0,
|
||||
currently_selected_disk_position: 0,
|
||||
|
@ -94,7 +115,7 @@ impl App {
|
|||
previous_disk_position: 0,
|
||||
previous_temp_position: 0,
|
||||
previous_cpu_table_position: 0,
|
||||
data: data_collection::Data::default(),
|
||||
data: data_harvester::Data::default(),
|
||||
awaiting_second_char: false,
|
||||
second_char: ' ',
|
||||
use_dot,
|
||||
|
@ -106,14 +127,15 @@ impl App {
|
|||
left_legend,
|
||||
use_current_cpu_total,
|
||||
last_key_press: Instant::now(),
|
||||
canvas_data: canvas::CanvasData::default(),
|
||||
canvas_data: canvas::DisplayableData::default(),
|
||||
enable_grouping: false,
|
||||
enable_searching: false,
|
||||
current_search_query: String::default(),
|
||||
searching_pid: false,
|
||||
use_simple: false,
|
||||
ignore_case: false,
|
||||
current_regex: BASE_REGEX.clone(), //TODO: [OPT] seems like a thing we can switch to lifetimes to avoid cloning
|
||||
current_cursor_position: 0,
|
||||
data_collection: DataCollection::default(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -122,7 +144,7 @@ impl App {
|
|||
self.show_help = false;
|
||||
self.show_dd = false;
|
||||
if self.enable_searching {
|
||||
self.current_application_position = ApplicationPosition::Process;
|
||||
self.current_widget_selected = WidgetPosition::Process;
|
||||
self.enable_searching = false;
|
||||
}
|
||||
self.current_search_query = String::new();
|
||||
|
@ -139,7 +161,7 @@ impl App {
|
|||
self.to_delete_process_list = None;
|
||||
self.dd_err = None;
|
||||
} else if self.enable_searching {
|
||||
self.current_application_position = ApplicationPosition::Process;
|
||||
self.current_widget_selected = WidgetPosition::Process;
|
||||
self.enable_searching = false;
|
||||
}
|
||||
}
|
||||
|
@ -156,16 +178,18 @@ impl App {
|
|||
pub fn toggle_grouping(&mut self) {
|
||||
// Disallow usage whilst in a dialog and only in processes
|
||||
if !self.is_in_dialog() {
|
||||
if let ApplicationPosition::Process = self.current_application_position {
|
||||
if let WidgetPosition::Process = self.current_widget_selected {
|
||||
self.enable_grouping = !(self.enable_grouping);
|
||||
self.update_process_gui = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn on_tab(&mut self) {
|
||||
match self.current_application_position {
|
||||
ApplicationPosition::Process => self.toggle_grouping(),
|
||||
ApplicationPosition::Disk => {}
|
||||
match self.current_widget_selected {
|
||||
WidgetPosition::Process => self.toggle_grouping(),
|
||||
WidgetPosition::Disk => {}
|
||||
WidgetPosition::ProcessSearch => self.toggle_ignore_case(),
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
@ -174,19 +198,13 @@ impl App {
|
|||
self.enable_grouping
|
||||
}
|
||||
|
||||
pub fn toggle_searching(&mut self) {
|
||||
pub fn enable_searching(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
match self.current_application_position {
|
||||
ApplicationPosition::Process | ApplicationPosition::ProcessSearch => {
|
||||
if self.enable_searching {
|
||||
// Toggle off
|
||||
self.enable_searching = false;
|
||||
self.current_application_position = ApplicationPosition::Process;
|
||||
} else {
|
||||
// Toggle on
|
||||
self.enable_searching = true;
|
||||
self.current_application_position = ApplicationPosition::ProcessSearch;
|
||||
}
|
||||
match self.current_widget_selected {
|
||||
WidgetPosition::Process | WidgetPosition::ProcessSearch => {
|
||||
// Toggle on
|
||||
self.enable_searching = true;
|
||||
self.current_widget_selected = WidgetPosition::ProcessSearch;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
@ -198,7 +216,7 @@ impl App {
|
|||
}
|
||||
|
||||
pub fn is_in_search_widget(&self) -> bool {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
true
|
||||
} else {
|
||||
false
|
||||
|
@ -207,7 +225,7 @@ impl App {
|
|||
|
||||
pub fn search_with_pid(&mut self) {
|
||||
if !self.is_in_dialog() && self.is_searching() {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
self.searching_pid = true;
|
||||
}
|
||||
}
|
||||
|
@ -215,7 +233,7 @@ impl App {
|
|||
|
||||
pub fn search_with_name(&mut self) {
|
||||
if !self.is_in_dialog() && self.is_searching() {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
self.searching_pid = false;
|
||||
}
|
||||
}
|
||||
|
@ -229,15 +247,28 @@ impl App {
|
|||
&self.current_search_query
|
||||
}
|
||||
|
||||
pub fn toggle_simple_search(&mut self) {
|
||||
pub fn toggle_ignore_case(&mut self) {
|
||||
if !self.is_in_dialog() && self.is_searching() {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
self.use_simple = !self.use_simple;
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
self.ignore_case = !self.ignore_case;
|
||||
self.update_regex();
|
||||
self.update_process_gui = true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn update_regex(&mut self) {
|
||||
self.current_regex = if self.current_search_query.is_empty() {
|
||||
BASE_REGEX.clone()
|
||||
} else if self.ignore_case {
|
||||
regex::Regex::new(&(format!("(?i){}", self.current_search_query)))
|
||||
} else {
|
||||
regex::Regex::new(&(self.current_search_query))
|
||||
};
|
||||
self.previous_process_position = 0;
|
||||
self.currently_selected_process_position = 0;
|
||||
}
|
||||
|
||||
pub fn get_cursor_position(&self) -> usize {
|
||||
self.current_cursor_position
|
||||
}
|
||||
|
@ -260,18 +291,13 @@ impl App {
|
|||
}
|
||||
|
||||
pub fn on_backspace(&mut self) {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
if self.current_cursor_position > 0 {
|
||||
self.current_cursor_position -= 1;
|
||||
self.current_search_query
|
||||
.remove(self.current_cursor_position);
|
||||
|
||||
// TODO: [OPT] this runs even while in simple... consider making this only run if they toggle back to regex!
|
||||
self.current_regex = if self.current_search_query.is_empty() {
|
||||
BASE_REGEX.clone()
|
||||
} else {
|
||||
regex::Regex::new(&(self.current_search_query))
|
||||
};
|
||||
self.update_regex();
|
||||
self.update_process_gui = true;
|
||||
}
|
||||
}
|
||||
|
@ -283,7 +309,7 @@ impl App {
|
|||
|
||||
pub fn on_up_key(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
} else {
|
||||
self.decrement_position_count();
|
||||
}
|
||||
|
@ -292,7 +318,7 @@ impl App {
|
|||
|
||||
pub fn on_down_key(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
} else {
|
||||
self.increment_position_count();
|
||||
}
|
||||
|
@ -301,7 +327,7 @@ impl App {
|
|||
|
||||
pub fn on_left_key(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
if self.current_cursor_position > 0 {
|
||||
self.current_cursor_position -= 1;
|
||||
}
|
||||
|
@ -311,7 +337,7 @@ impl App {
|
|||
|
||||
pub fn on_right_key(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
if self.current_cursor_position < self.current_search_query.len() {
|
||||
self.current_cursor_position += 1;
|
||||
}
|
||||
|
@ -321,7 +347,7 @@ impl App {
|
|||
|
||||
pub fn skip_cursor_beginning(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
self.current_cursor_position = 0;
|
||||
}
|
||||
}
|
||||
|
@ -329,7 +355,7 @@ impl App {
|
|||
|
||||
pub fn skip_cursor_end(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
self.current_cursor_position = self.current_search_query.len();
|
||||
}
|
||||
}
|
||||
|
@ -347,51 +373,55 @@ impl App {
|
|||
}
|
||||
self.last_key_press = current_key_press_inst;
|
||||
|
||||
if let ApplicationPosition::ProcessSearch = self.current_application_position {
|
||||
if let WidgetPosition::ProcessSearch = self.current_widget_selected {
|
||||
self.current_search_query
|
||||
.insert(self.current_cursor_position, caught_char);
|
||||
self.current_cursor_position += 1;
|
||||
|
||||
// TODO: [OPT] this runs even while in simple... consider making this only run if they toggle back to regex!
|
||||
self.current_regex = if self.current_search_query.is_empty() {
|
||||
BASE_REGEX.clone()
|
||||
} else {
|
||||
regex::Regex::new(&(self.current_search_query))
|
||||
};
|
||||
self.update_regex();
|
||||
|
||||
self.update_process_gui = true;
|
||||
} else {
|
||||
match caught_char {
|
||||
'/' => {
|
||||
self.toggle_searching();
|
||||
self.enable_searching();
|
||||
}
|
||||
'd' => {
|
||||
if let ApplicationPosition::Process = self.current_application_position {
|
||||
if let WidgetPosition::Process = self.current_widget_selected {
|
||||
if self.awaiting_second_char && self.second_char == 'd' {
|
||||
self.awaiting_second_char = false;
|
||||
self.second_char = ' ';
|
||||
let current_process = if self.is_grouped() {
|
||||
let mut res: Vec<ConvertedProcessData> = Vec::new();
|
||||
for pid in &self.canvas_data.grouped_process_data
|
||||
[self.currently_selected_process_position as usize]
|
||||
.group
|
||||
{
|
||||
let result = self
|
||||
.canvas_data
|
||||
.process_data
|
||||
.iter()
|
||||
.find(|p| p.pid == *pid);
|
||||
if let Some(process) = result {
|
||||
res.push((*process).clone());
|
||||
|
||||
if self.currently_selected_process_position
|
||||
< self.canvas_data.finalized_process_data.len() as i64
|
||||
{
|
||||
let current_process = if self.is_grouped() {
|
||||
let group_pids = &self.canvas_data.finalized_process_data
|
||||
[self.currently_selected_process_position as usize]
|
||||
.group_pids;
|
||||
|
||||
let mut ret = ("".to_string(), group_pids.clone());
|
||||
|
||||
for pid in group_pids {
|
||||
if let Some(process) =
|
||||
self.canvas_data.process_data.get(&pid)
|
||||
{
|
||||
ret.0 = process.name.clone();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
res
|
||||
} else {
|
||||
vec![self.canvas_data.process_data
|
||||
[self.currently_selected_process_position as usize]
|
||||
.clone()]
|
||||
};
|
||||
self.to_delete_process_list = Some(current_process);
|
||||
self.show_dd = true;
|
||||
ret
|
||||
} else {
|
||||
let process = self.canvas_data.finalized_process_data
|
||||
[self.currently_selected_process_position as usize]
|
||||
.clone();
|
||||
(process.name.clone(), vec![process.pid])
|
||||
};
|
||||
|
||||
self.to_delete_process_list = Some(current_process);
|
||||
self.show_dd = true;
|
||||
}
|
||||
|
||||
self.reset_multi_tap_keys();
|
||||
} else {
|
||||
self.awaiting_second_char = true;
|
||||
|
@ -484,10 +514,10 @@ impl App {
|
|||
|
||||
pub fn kill_highlighted_process(&mut self) -> Result<()> {
|
||||
// Technically unnecessary but this is a good check...
|
||||
if let ApplicationPosition::Process = self.current_application_position {
|
||||
if let WidgetPosition::Process = self.current_widget_selected {
|
||||
if let Some(current_selected_processes) = &(self.to_delete_process_list) {
|
||||
for current_selected_process in current_selected_processes {
|
||||
process_killer::kill_process_given_pid(current_selected_process.pid)?;
|
||||
for pid in ¤t_selected_processes.1 {
|
||||
process_killer::kill_process_given_pid(*pid)?;
|
||||
}
|
||||
}
|
||||
self.to_delete_process_list = None;
|
||||
|
@ -495,7 +525,7 @@ impl App {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_current_highlighted_process_list(&self) -> Option<Vec<ConvertedProcessData>> {
|
||||
pub fn get_to_delete_processes(&self) -> Option<(String, Vec<u32>)> {
|
||||
self.to_delete_process_list.clone()
|
||||
}
|
||||
|
||||
|
@ -511,12 +541,12 @@ impl App {
|
|||
// PROC_SEARCH -(up)> Disk, -(down)> PROC, -(left)> Network
|
||||
pub fn move_left(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
self.current_application_position = match self.current_application_position {
|
||||
ApplicationPosition::Process => ApplicationPosition::Network,
|
||||
ApplicationPosition::ProcessSearch => ApplicationPosition::Network,
|
||||
ApplicationPosition::Disk => ApplicationPosition::Mem,
|
||||
ApplicationPosition::Temp => ApplicationPosition::Mem,
|
||||
_ => self.current_application_position,
|
||||
self.current_widget_selected = match self.current_widget_selected {
|
||||
WidgetPosition::Process => WidgetPosition::Network,
|
||||
WidgetPosition::ProcessSearch => WidgetPosition::Network,
|
||||
WidgetPosition::Disk => WidgetPosition::Mem,
|
||||
WidgetPosition::Temp => WidgetPosition::Mem,
|
||||
_ => self.current_widget_selected,
|
||||
};
|
||||
self.reset_multi_tap_keys();
|
||||
}
|
||||
|
@ -524,10 +554,10 @@ impl App {
|
|||
|
||||
pub fn move_right(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
self.current_application_position = match self.current_application_position {
|
||||
ApplicationPosition::Mem => ApplicationPosition::Temp,
|
||||
ApplicationPosition::Network => ApplicationPosition::Process,
|
||||
_ => self.current_application_position,
|
||||
self.current_widget_selected = match self.current_widget_selected {
|
||||
WidgetPosition::Mem => WidgetPosition::Temp,
|
||||
WidgetPosition::Network => WidgetPosition::Process,
|
||||
_ => self.current_widget_selected,
|
||||
};
|
||||
self.reset_multi_tap_keys();
|
||||
}
|
||||
|
@ -535,20 +565,20 @@ impl App {
|
|||
|
||||
pub fn move_up(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
self.current_application_position = match self.current_application_position {
|
||||
ApplicationPosition::Mem => ApplicationPosition::Cpu,
|
||||
ApplicationPosition::Network => ApplicationPosition::Mem,
|
||||
ApplicationPosition::Process => {
|
||||
self.current_widget_selected = match self.current_widget_selected {
|
||||
WidgetPosition::Mem => WidgetPosition::Cpu,
|
||||
WidgetPosition::Network => WidgetPosition::Mem,
|
||||
WidgetPosition::Process => {
|
||||
if self.is_searching() {
|
||||
ApplicationPosition::ProcessSearch
|
||||
WidgetPosition::ProcessSearch
|
||||
} else {
|
||||
ApplicationPosition::Disk
|
||||
WidgetPosition::Disk
|
||||
}
|
||||
}
|
||||
ApplicationPosition::ProcessSearch => ApplicationPosition::Disk,
|
||||
ApplicationPosition::Temp => ApplicationPosition::Cpu,
|
||||
ApplicationPosition::Disk => ApplicationPosition::Temp,
|
||||
_ => self.current_application_position,
|
||||
WidgetPosition::ProcessSearch => WidgetPosition::Disk,
|
||||
WidgetPosition::Temp => WidgetPosition::Cpu,
|
||||
WidgetPosition::Disk => WidgetPosition::Temp,
|
||||
_ => self.current_widget_selected,
|
||||
};
|
||||
self.reset_multi_tap_keys();
|
||||
}
|
||||
|
@ -556,19 +586,19 @@ impl App {
|
|||
|
||||
pub fn move_down(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
self.current_application_position = match self.current_application_position {
|
||||
ApplicationPosition::Cpu => ApplicationPosition::Mem,
|
||||
ApplicationPosition::Mem => ApplicationPosition::Network,
|
||||
ApplicationPosition::Temp => ApplicationPosition::Disk,
|
||||
ApplicationPosition::Disk => {
|
||||
self.current_widget_selected = match self.current_widget_selected {
|
||||
WidgetPosition::Cpu => WidgetPosition::Mem,
|
||||
WidgetPosition::Mem => WidgetPosition::Network,
|
||||
WidgetPosition::Temp => WidgetPosition::Disk,
|
||||
WidgetPosition::Disk => {
|
||||
if self.is_searching() {
|
||||
ApplicationPosition::ProcessSearch
|
||||
WidgetPosition::ProcessSearch
|
||||
} else {
|
||||
ApplicationPosition::Process
|
||||
WidgetPosition::Process
|
||||
}
|
||||
}
|
||||
ApplicationPosition::ProcessSearch => ApplicationPosition::Process,
|
||||
_ => self.current_application_position,
|
||||
WidgetPosition::ProcessSearch => WidgetPosition::Process,
|
||||
_ => self.current_widget_selected,
|
||||
};
|
||||
self.reset_multi_tap_keys();
|
||||
}
|
||||
|
@ -576,11 +606,11 @@ impl App {
|
|||
|
||||
pub fn skip_to_first(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
match self.current_application_position {
|
||||
ApplicationPosition::Process => self.currently_selected_process_position = 0,
|
||||
ApplicationPosition::Temp => self.currently_selected_temperature_position = 0,
|
||||
ApplicationPosition::Disk => self.currently_selected_disk_position = 0,
|
||||
ApplicationPosition::Cpu => self.currently_selected_cpu_table_position = 0,
|
||||
match self.current_widget_selected {
|
||||
WidgetPosition::Process => self.currently_selected_process_position = 0,
|
||||
WidgetPosition::Temp => self.currently_selected_temperature_position = 0,
|
||||
WidgetPosition::Disk => self.currently_selected_disk_position = 0,
|
||||
WidgetPosition::Cpu => self.currently_selected_cpu_table_position = 0,
|
||||
|
||||
_ => {}
|
||||
}
|
||||
|
@ -591,28 +621,22 @@ impl App {
|
|||
|
||||
pub fn skip_to_last(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
match self.current_application_position {
|
||||
ApplicationPosition::Process => {
|
||||
match self.current_widget_selected {
|
||||
WidgetPosition::Process => {
|
||||
self.currently_selected_process_position =
|
||||
self.data.list_of_processes.len() as i64 - 1
|
||||
self.canvas_data.finalized_process_data.len() as i64 - 1
|
||||
}
|
||||
ApplicationPosition::Temp => {
|
||||
WidgetPosition::Temp => {
|
||||
self.currently_selected_temperature_position =
|
||||
self.data.list_of_temperature_sensor.len() as i64 - 1
|
||||
self.canvas_data.temp_sensor_data.len() as i64 - 1
|
||||
}
|
||||
ApplicationPosition::Disk => {
|
||||
self.currently_selected_disk_position = self.data.list_of_disks.len() as i64 - 1
|
||||
WidgetPosition::Disk => {
|
||||
self.currently_selected_disk_position =
|
||||
self.canvas_data.disk_data.len() as i64 - 1
|
||||
}
|
||||
ApplicationPosition::Cpu => {
|
||||
if let Some(cpu_package) = self.data.list_of_cpu_packages.last() {
|
||||
if self.show_average_cpu {
|
||||
self.currently_selected_cpu_table_position =
|
||||
cpu_package.cpu_vec.len() as i64;
|
||||
} else {
|
||||
self.currently_selected_cpu_table_position =
|
||||
cpu_package.cpu_vec.len() as i64 - 1;
|
||||
}
|
||||
}
|
||||
WidgetPosition::Cpu => {
|
||||
self.currently_selected_cpu_table_position =
|
||||
self.canvas_data.cpu_data.len() as i64 - 1;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
@ -623,11 +647,11 @@ impl App {
|
|||
|
||||
pub fn decrement_position_count(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
match self.current_application_position {
|
||||
ApplicationPosition::Process => self.change_process_position(-1),
|
||||
ApplicationPosition::Temp => self.change_temp_position(-1),
|
||||
ApplicationPosition::Disk => self.change_disk_position(-1),
|
||||
ApplicationPosition::Cpu => self.change_cpu_table_position(-1), // TODO: Temporary, may change if we add scaling
|
||||
match self.current_widget_selected {
|
||||
WidgetPosition::Process => self.change_process_position(-1),
|
||||
WidgetPosition::Temp => self.change_temp_position(-1),
|
||||
WidgetPosition::Disk => self.change_disk_position(-1),
|
||||
WidgetPosition::Cpu => self.change_cpu_table_position(-1), // TODO: Temporary, may change if we add scaling
|
||||
_ => {}
|
||||
}
|
||||
self.scroll_direction = ScrollDirection::UP;
|
||||
|
@ -637,11 +661,11 @@ impl App {
|
|||
|
||||
pub fn increment_position_count(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
match self.current_application_position {
|
||||
ApplicationPosition::Process => self.change_process_position(1),
|
||||
ApplicationPosition::Temp => self.change_temp_position(1),
|
||||
ApplicationPosition::Disk => self.change_disk_position(1),
|
||||
ApplicationPosition::Cpu => self.change_cpu_table_position(1), // TODO: Temporary, may change if we add scaling
|
||||
match self.current_widget_selected {
|
||||
WidgetPosition::Process => self.change_process_position(1),
|
||||
WidgetPosition::Temp => self.change_temp_position(1),
|
||||
WidgetPosition::Disk => self.change_disk_position(1),
|
||||
WidgetPosition::Cpu => self.change_cpu_table_position(1), // TODO: Temporary, may change if we add scaling
|
||||
_ => {}
|
||||
}
|
||||
self.scroll_direction = ScrollDirection::DOWN;
|
||||
|
@ -650,24 +674,18 @@ impl App {
|
|||
}
|
||||
|
||||
fn change_cpu_table_position(&mut self, num_to_change_by: i64) {
|
||||
if let Some(cpu_package) = self.data.list_of_cpu_packages.last() {
|
||||
if self.currently_selected_cpu_table_position + num_to_change_by >= 0
|
||||
&& self.currently_selected_cpu_table_position + num_to_change_by
|
||||
< if self.show_average_cpu {
|
||||
cpu_package.cpu_vec.len()
|
||||
} else {
|
||||
cpu_package.cpu_vec.len() - 1
|
||||
} as i64
|
||||
{
|
||||
self.currently_selected_cpu_table_position += num_to_change_by;
|
||||
}
|
||||
if self.currently_selected_cpu_table_position + num_to_change_by >= 0
|
||||
&& self.currently_selected_cpu_table_position + num_to_change_by
|
||||
< self.canvas_data.cpu_data.len() as i64
|
||||
{
|
||||
self.currently_selected_cpu_table_position += num_to_change_by;
|
||||
}
|
||||
}
|
||||
|
||||
fn change_process_position(&mut self, num_to_change_by: i64) {
|
||||
if self.currently_selected_process_position + num_to_change_by >= 0
|
||||
&& self.currently_selected_process_position + num_to_change_by
|
||||
< self.data.list_of_processes.len() as i64
|
||||
< self.canvas_data.finalized_process_data.len() as i64
|
||||
{
|
||||
self.currently_selected_process_position += num_to_change_by;
|
||||
}
|
||||
|
@ -676,7 +694,7 @@ impl App {
|
|||
fn change_temp_position(&mut self, num_to_change_by: i64) {
|
||||
if self.currently_selected_temperature_position + num_to_change_by >= 0
|
||||
&& self.currently_selected_temperature_position + num_to_change_by
|
||||
< self.data.list_of_temperature_sensor.len() as i64
|
||||
< self.canvas_data.temp_sensor_data.len() as i64
|
||||
{
|
||||
self.currently_selected_temperature_position += num_to_change_by;
|
||||
}
|
||||
|
@ -685,7 +703,7 @@ impl App {
|
|||
fn change_disk_position(&mut self, num_to_change_by: i64) {
|
||||
if self.currently_selected_disk_position + num_to_change_by >= 0
|
||||
&& self.currently_selected_disk_position + num_to_change_by
|
||||
< self.data.list_of_disks.len() as i64
|
||||
< self.canvas_data.disk_data.len() as i64
|
||||
{
|
||||
self.currently_selected_disk_position += num_to_change_by;
|
||||
}
|
||||
|
|
|
@ -1,223 +0,0 @@
|
|||
//! This is the main file to house data collection functions.
|
||||
|
||||
use crate::{constants, utils::error::Result};
|
||||
use std::{collections::HashMap, time::Instant};
|
||||
use sysinfo::{System, SystemExt};
|
||||
|
||||
pub mod cpu;
|
||||
pub mod disks;
|
||||
pub mod mem;
|
||||
pub mod network;
|
||||
pub mod processes;
|
||||
pub mod temperature;
|
||||
|
||||
fn set_if_valid<T: std::clone::Clone>(result: &Result<T>, value_to_set: &mut T) {
|
||||
if let Ok(result) = result {
|
||||
*value_to_set = (*result).clone();
|
||||
}
|
||||
}
|
||||
|
||||
fn push_if_valid<T: std::clone::Clone>(result: &Result<T>, vector_to_push: &mut Vec<T>) {
|
||||
if let Ok(result) = result {
|
||||
vector_to_push.push(result.clone());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct Data {
|
||||
pub list_of_cpu_packages: Vec<cpu::CPUPackage>,
|
||||
pub list_of_io: Vec<disks::IOPackage>,
|
||||
pub list_of_physical_io: Vec<disks::IOPackage>,
|
||||
pub memory: Vec<mem::MemData>,
|
||||
pub swap: Vec<mem::MemData>,
|
||||
pub list_of_temperature_sensor: Vec<temperature::TempData>,
|
||||
pub network: Vec<network::NetworkData>,
|
||||
pub list_of_processes: Vec<processes::ProcessData>,
|
||||
pub grouped_list_of_processes: Option<Vec<processes::ProcessData>>,
|
||||
pub list_of_disks: Vec<disks::DiskData>, // Only need to keep a list of disks and their data
|
||||
}
|
||||
|
||||
pub struct DataState {
|
||||
pub data: Data,
|
||||
first_run: bool,
|
||||
sys: System,
|
||||
stale_max_seconds: u64,
|
||||
prev_pid_stats: HashMap<String, (f64, Instant)>,
|
||||
prev_idle: f64,
|
||||
prev_non_idle: f64,
|
||||
prev_net_rx_bytes: u64,
|
||||
prev_net_tx_bytes: u64,
|
||||
prev_net_access_time: Instant,
|
||||
temperature_type: temperature::TemperatureType,
|
||||
last_clean: Instant, // Last time stale data was cleared
|
||||
use_current_cpu_total: bool,
|
||||
}
|
||||
|
||||
impl Default for DataState {
|
||||
fn default() -> Self {
|
||||
DataState {
|
||||
data: Data::default(),
|
||||
first_run: true,
|
||||
sys: System::new(),
|
||||
stale_max_seconds: constants::STALE_MAX_MILLISECONDS / 1000,
|
||||
prev_pid_stats: HashMap::new(),
|
||||
prev_idle: 0_f64,
|
||||
prev_non_idle: 0_f64,
|
||||
prev_net_rx_bytes: 0,
|
||||
prev_net_tx_bytes: 0,
|
||||
prev_net_access_time: Instant::now(),
|
||||
temperature_type: temperature::TemperatureType::Celsius,
|
||||
last_clean: Instant::now(),
|
||||
use_current_cpu_total: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DataState {
|
||||
pub fn set_temperature_type(&mut self, temperature_type: temperature::TemperatureType) {
|
||||
self.temperature_type = temperature_type;
|
||||
}
|
||||
|
||||
pub fn set_use_current_cpu_total(&mut self, use_current_cpu_total: bool) {
|
||||
self.use_current_cpu_total = use_current_cpu_total;
|
||||
}
|
||||
|
||||
pub fn init(&mut self) {
|
||||
self.sys.refresh_all();
|
||||
}
|
||||
|
||||
pub async fn update_data(&mut self) {
|
||||
self.sys.refresh_system();
|
||||
|
||||
if !cfg!(target_os = "linux") {
|
||||
// For now, might be just windows tbh
|
||||
self.sys.refresh_processes();
|
||||
self.sys.refresh_network();
|
||||
}
|
||||
|
||||
let current_instant = std::time::Instant::now();
|
||||
|
||||
// What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update!
|
||||
push_if_valid(
|
||||
&network::get_network_data(
|
||||
&self.sys,
|
||||
&mut self.prev_net_rx_bytes,
|
||||
&mut self.prev_net_tx_bytes,
|
||||
&mut self.prev_net_access_time,
|
||||
¤t_instant,
|
||||
)
|
||||
.await,
|
||||
&mut self.data.network,
|
||||
);
|
||||
push_if_valid(
|
||||
&cpu::get_cpu_data_list(&self.sys, ¤t_instant),
|
||||
&mut self.data.list_of_cpu_packages,
|
||||
);
|
||||
|
||||
push_if_valid(
|
||||
&mem::get_mem_data_list(¤t_instant).await,
|
||||
&mut self.data.memory,
|
||||
);
|
||||
push_if_valid(
|
||||
&mem::get_swap_data_list(¤t_instant).await,
|
||||
&mut self.data.swap,
|
||||
);
|
||||
set_if_valid(
|
||||
&processes::get_sorted_processes_list(
|
||||
&self.sys,
|
||||
&mut self.prev_idle,
|
||||
&mut self.prev_non_idle,
|
||||
&mut self.prev_pid_stats,
|
||||
self.use_current_cpu_total,
|
||||
¤t_instant,
|
||||
),
|
||||
&mut self.data.list_of_processes,
|
||||
);
|
||||
|
||||
set_if_valid(
|
||||
&disks::get_disk_usage_list().await,
|
||||
&mut self.data.list_of_disks,
|
||||
);
|
||||
push_if_valid(
|
||||
&disks::get_io_usage_list(false).await,
|
||||
&mut self.data.list_of_io,
|
||||
);
|
||||
set_if_valid(
|
||||
&temperature::get_temperature_data(&self.sys, &self.temperature_type).await,
|
||||
&mut self.data.list_of_temperature_sensor,
|
||||
);
|
||||
|
||||
if self.first_run {
|
||||
self.data = Data::default();
|
||||
self.first_run = false;
|
||||
}
|
||||
|
||||
// Filter out stale timed entries
|
||||
let clean_instant = Instant::now();
|
||||
if clean_instant.duration_since(self.last_clean).as_secs() > self.stale_max_seconds {
|
||||
let stale_list: Vec<_> = self
|
||||
.prev_pid_stats
|
||||
.iter()
|
||||
.filter(|&(_, &v)| {
|
||||
clean_instant.duration_since(v.1).as_secs() > self.stale_max_seconds
|
||||
})
|
||||
.map(|(k, _)| k.clone())
|
||||
.collect();
|
||||
for stale in stale_list {
|
||||
self.prev_pid_stats.remove(&stale);
|
||||
}
|
||||
|
||||
self.data.list_of_cpu_packages = self
|
||||
.data
|
||||
.list_of_cpu_packages
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|entry| {
|
||||
clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.data.memory = self
|
||||
.data
|
||||
.memory
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|entry| {
|
||||
clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.data.swap = self
|
||||
.data
|
||||
.swap
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|entry| {
|
||||
clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.data.network = self
|
||||
.data
|
||||
.network
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|entry| {
|
||||
clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.data.list_of_io = self
|
||||
.data
|
||||
.list_of_io
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|entry| {
|
||||
clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.last_clean = clean_instant;
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,33 +0,0 @@
|
|||
use std::time::Instant;
|
||||
use sysinfo::{ProcessorExt, System, SystemExt};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CPUData {
|
||||
pub cpu_name: Box<str>,
|
||||
pub cpu_usage: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CPUPackage {
|
||||
pub cpu_vec: Vec<CPUData>,
|
||||
pub instant: Instant,
|
||||
}
|
||||
|
||||
pub fn get_cpu_data_list(
|
||||
sys: &System, curr_time: &Instant,
|
||||
) -> crate::utils::error::Result<CPUPackage> {
|
||||
let cpu_data = sys.get_processor_list();
|
||||
let mut cpu_vec = Vec::new();
|
||||
|
||||
for cpu in cpu_data {
|
||||
cpu_vec.push(CPUData {
|
||||
cpu_name: Box::from(cpu.get_name()),
|
||||
cpu_usage: f64::from(cpu.get_cpu_usage()) * 100_f64,
|
||||
});
|
||||
}
|
||||
|
||||
Ok(CPUPackage {
|
||||
cpu_vec,
|
||||
instant: *curr_time,
|
||||
})
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
use futures::StreamExt;
|
||||
use heim::net;
|
||||
use heim::units::information::byte;
|
||||
use std::time::Instant;
|
||||
use sysinfo::{NetworkExt, System, SystemExt};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
/// Note all values are in bytes...
|
||||
pub struct NetworkData {
|
||||
pub rx: u64,
|
||||
pub tx: u64,
|
||||
pub total_rx: u64,
|
||||
pub total_tx: u64,
|
||||
pub instant: Instant,
|
||||
}
|
||||
|
||||
pub async fn get_network_data(
|
||||
sys: &System, prev_net_rx_bytes: &mut u64, prev_net_tx_bytes: &mut u64,
|
||||
prev_net_access_time: &mut Instant, curr_time: &Instant,
|
||||
) -> crate::utils::error::Result<NetworkData> {
|
||||
if cfg!(target_os = "windows") {
|
||||
let network_data = sys.get_network();
|
||||
|
||||
*prev_net_access_time = *curr_time;
|
||||
Ok(NetworkData {
|
||||
rx: network_data.get_income(),
|
||||
tx: network_data.get_outcome(),
|
||||
total_rx: 0,
|
||||
total_tx: 0,
|
||||
instant: *prev_net_access_time,
|
||||
})
|
||||
} else {
|
||||
let mut io_data = net::io_counters();
|
||||
let mut net_rx: u64 = 0;
|
||||
let mut net_tx: u64 = 0;
|
||||
|
||||
while let Some(io) = io_data.next().await {
|
||||
if let Ok(io) = io {
|
||||
net_rx += io.bytes_recv().get::<byte>();
|
||||
net_tx += io.bytes_sent().get::<byte>();
|
||||
}
|
||||
}
|
||||
let cur_time = Instant::now();
|
||||
let elapsed_time = cur_time.duration_since(*prev_net_access_time).as_secs_f64();
|
||||
|
||||
let rx = ((net_rx - *prev_net_rx_bytes) as f64 / elapsed_time) as u64;
|
||||
let tx = ((net_tx - *prev_net_tx_bytes) as f64 / elapsed_time) as u64;
|
||||
|
||||
*prev_net_rx_bytes = net_rx;
|
||||
*prev_net_tx_bytes = net_tx;
|
||||
*prev_net_access_time = cur_time;
|
||||
Ok(NetworkData {
|
||||
rx,
|
||||
tx,
|
||||
total_rx: *prev_net_rx_bytes,
|
||||
total_tx: *prev_net_tx_bytes,
|
||||
instant: *prev_net_access_time,
|
||||
})
|
||||
}
|
||||
}
|
293
src/app/data_farmer.rs
Normal file
293
src/app/data_farmer.rs
Normal file
|
@ -0,0 +1,293 @@
|
|||
use crate::data_harvester::{cpu, disks, mem, network, processes, temperature, Data};
|
||||
/// In charge of cleaning, processing, and managing data. I couldn't think of
|
||||
/// a better name for the file. Since I called data collection "harvesting",
|
||||
/// then this is the farmer I guess.
|
||||
///
|
||||
/// Essentially the main goal is to shift the initial calculation and distribution
|
||||
/// of joiner points and data to one central location that will only do it
|
||||
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
|
||||
/// which will be a costly process.
|
||||
///
|
||||
/// This will also handle the *cleaning* of stale data. That should be done
|
||||
/// in some manner (timer on another thread, some loop) that will occasionally
|
||||
/// call the purging function. Failure to do so *will* result in a growing
|
||||
/// memory usage and higher CPU usage - you will be trying to process more and
|
||||
/// more points as this is used!
|
||||
use std::time::Instant;
|
||||
use std::vec::Vec;
|
||||
|
||||
pub type TimeOffset = f64;
|
||||
pub type Value = f64;
|
||||
pub type JoinedDataPoints = (Value, Vec<(TimeOffset, Value)>);
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TimedData {
|
||||
pub rx_data: JoinedDataPoints,
|
||||
pub tx_data: JoinedDataPoints,
|
||||
pub cpu_data: Vec<JoinedDataPoints>,
|
||||
pub mem_data: JoinedDataPoints,
|
||||
pub swap_data: JoinedDataPoints,
|
||||
// Unused for now
|
||||
// pub io_data : JoinedDataPoints
|
||||
// pub temp_data: JoinedDataPoints,
|
||||
}
|
||||
|
||||
/// AppCollection represents the pooled data stored within the main app
|
||||
/// thread. Basically stores a (occasionally cleaned) record of the data
|
||||
/// collected, and what is needed to convert into a displayable form.
|
||||
///
|
||||
/// If the app is *frozen* - that is, we do not want to *display* any changing
|
||||
/// data, keep updating this, don't convert to canvas displayable data!
|
||||
///
|
||||
/// Note that with this method, the *app* thread is responsible for cleaning -
|
||||
/// not the data collector.
|
||||
#[derive(Debug)]
|
||||
pub struct DataCollection {
|
||||
pub current_instant: Instant,
|
||||
pub timed_data_vec: Vec<(Instant, TimedData)>,
|
||||
pub network_harvest: network::NetworkHarvest,
|
||||
pub memory_harvest: mem::MemHarvest,
|
||||
pub swap_harvest: mem::MemHarvest,
|
||||
pub cpu_harvest: cpu::CPUHarvest,
|
||||
pub process_harvest: Vec<processes::ProcessHarvest>,
|
||||
pub disk_harvest: Vec<disks::DiskHarvest>,
|
||||
pub io_harvest: disks::IOHarvest,
|
||||
pub io_labels: Vec<(u64, u64)>,
|
||||
io_prev: Vec<(u64, u64)>,
|
||||
pub temp_harvest: Vec<temperature::TempHarvest>,
|
||||
}
|
||||
|
||||
impl Default for DataCollection {
|
||||
fn default() -> Self {
|
||||
DataCollection {
|
||||
current_instant: Instant::now(),
|
||||
timed_data_vec: Vec::default(),
|
||||
network_harvest: network::NetworkHarvest::default(),
|
||||
memory_harvest: mem::MemHarvest::default(),
|
||||
swap_harvest: mem::MemHarvest::default(),
|
||||
cpu_harvest: cpu::CPUHarvest::default(),
|
||||
process_harvest: Vec::default(),
|
||||
disk_harvest: Vec::default(),
|
||||
io_harvest: disks::IOHarvest::default(),
|
||||
io_labels: Vec::default(),
|
||||
io_prev: Vec::default(),
|
||||
temp_harvest: Vec::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DataCollection {
|
||||
pub fn clean_data(&mut self, max_time_millis: u128) {
|
||||
let current_time = Instant::now();
|
||||
|
||||
let mut remove_index = 0;
|
||||
for entry in &self.timed_data_vec {
|
||||
if current_time.duration_since(entry.0).as_millis() >= max_time_millis {
|
||||
remove_index += 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
self.timed_data_vec.drain(0..remove_index);
|
||||
}
|
||||
|
||||
pub fn eat_data(&mut self, harvested_data: &Data) {
|
||||
let harvested_time = harvested_data.last_collection_time;
|
||||
let mut new_entry = TimedData::default();
|
||||
|
||||
// Network
|
||||
self.eat_network(&harvested_data, &harvested_time, &mut new_entry);
|
||||
|
||||
// Memory and Swap
|
||||
self.eat_memory_and_swap(&harvested_data, &harvested_time, &mut new_entry);
|
||||
|
||||
// CPU
|
||||
self.eat_cpu(&harvested_data, &harvested_time, &mut new_entry);
|
||||
|
||||
// Temp
|
||||
self.eat_temp(&harvested_data);
|
||||
|
||||
// Disks
|
||||
self.eat_disks(&harvested_data, &harvested_time);
|
||||
|
||||
// Processes
|
||||
self.eat_proc(&harvested_data);
|
||||
|
||||
// And we're done eating. Update time and push the new entry!
|
||||
self.current_instant = harvested_time;
|
||||
self.timed_data_vec.push((harvested_time, new_entry));
|
||||
}
|
||||
|
||||
fn eat_memory_and_swap(
|
||||
&mut self, harvested_data: &Data, harvested_time: &Instant, new_entry: &mut TimedData,
|
||||
) {
|
||||
// Memory
|
||||
let mem_percent = harvested_data.memory.mem_used_in_mb as f64
|
||||
/ harvested_data.memory.mem_total_in_mb as f64
|
||||
* 100.0;
|
||||
let mem_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
|
||||
generate_joining_points(&time, last_pt.mem_data.0, &harvested_time, mem_percent)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let mem_pt = (mem_percent, mem_joining_pts);
|
||||
new_entry.mem_data = mem_pt;
|
||||
|
||||
// Swap
|
||||
if harvested_data.swap.mem_total_in_mb > 0 {
|
||||
let swap_percent = harvested_data.swap.mem_used_in_mb as f64
|
||||
/ harvested_data.swap.mem_total_in_mb as f64
|
||||
* 100.0;
|
||||
let swap_joining_pt = if let Some((time, last_pt)) = self.timed_data_vec.last() {
|
||||
generate_joining_points(&time, last_pt.swap_data.0, &harvested_time, swap_percent)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let swap_pt = (swap_percent, swap_joining_pt);
|
||||
new_entry.swap_data = swap_pt;
|
||||
}
|
||||
|
||||
// In addition copy over latest data for easy reference
|
||||
self.memory_harvest = harvested_data.memory.clone();
|
||||
self.swap_harvest = harvested_data.swap.clone();
|
||||
}
|
||||
|
||||
fn eat_network(
|
||||
&mut self, harvested_data: &Data, harvested_time: &Instant, new_entry: &mut TimedData,
|
||||
) {
|
||||
// RX
|
||||
let rx_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
|
||||
generate_joining_points(
|
||||
&time,
|
||||
last_pt.rx_data.0,
|
||||
&harvested_time,
|
||||
harvested_data.network.rx as f64,
|
||||
)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let rx_pt = (harvested_data.network.rx as f64, rx_joining_pts);
|
||||
new_entry.rx_data = rx_pt;
|
||||
|
||||
// TX
|
||||
let tx_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
|
||||
generate_joining_points(
|
||||
&time,
|
||||
last_pt.tx_data.0,
|
||||
&harvested_time,
|
||||
harvested_data.network.tx as f64,
|
||||
)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let tx_pt = (harvested_data.network.tx as f64, tx_joining_pts);
|
||||
new_entry.tx_data = tx_pt;
|
||||
|
||||
// In addition copy over latest data for easy reference
|
||||
self.network_harvest = harvested_data.network.clone();
|
||||
}
|
||||
|
||||
fn eat_cpu(
|
||||
&mut self, harvested_data: &Data, harvested_time: &Instant, new_entry: &mut TimedData,
|
||||
) {
|
||||
// Note this only pre-calculates the data points - the names will be
|
||||
// within the local copy of cpu_harvest. Since it's all sequential
|
||||
// it probably doesn't matter anyways.
|
||||
for (itx, cpu) in harvested_data.cpu.iter().enumerate() {
|
||||
let cpu_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
|
||||
generate_joining_points(
|
||||
&time,
|
||||
last_pt.cpu_data[itx].0,
|
||||
&harvested_time,
|
||||
cpu.cpu_usage,
|
||||
)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
let cpu_pt = (cpu.cpu_usage, cpu_joining_pts);
|
||||
new_entry.cpu_data.push(cpu_pt);
|
||||
}
|
||||
|
||||
self.cpu_harvest = harvested_data.cpu.clone();
|
||||
}
|
||||
|
||||
fn eat_temp(&mut self, harvested_data: &Data) {
|
||||
// TODO: [PO] To implement
|
||||
self.temp_harvest = harvested_data.temperature_sensors.clone();
|
||||
}
|
||||
|
||||
fn eat_disks(&mut self, harvested_data: &Data, harvested_time: &Instant) {
|
||||
// TODO: [PO] To implement
|
||||
|
||||
let time_since_last_harvest = harvested_time
|
||||
.duration_since(self.current_instant)
|
||||
.as_secs_f64();
|
||||
|
||||
for (itx, device) in harvested_data.disks.iter().enumerate() {
|
||||
if let Some(trim) = device.name.split('/').last() {
|
||||
let io_device = harvested_data.io.get(trim);
|
||||
if let Some(io) = io_device {
|
||||
let io_r_pt = io.read_bytes;
|
||||
let io_w_pt = io.write_bytes;
|
||||
|
||||
if self.io_labels.len() <= itx {
|
||||
self.io_prev.push((io_r_pt, io_w_pt));
|
||||
self.io_labels.push((0, 0));
|
||||
} else {
|
||||
let r_rate = ((io_r_pt - self.io_prev[itx].0) as f64
|
||||
/ time_since_last_harvest)
|
||||
.round() as u64;
|
||||
let w_rate = ((io_w_pt - self.io_prev[itx].1) as f64
|
||||
/ time_since_last_harvest)
|
||||
.round() as u64;
|
||||
|
||||
self.io_labels[itx] = (r_rate, w_rate);
|
||||
self.io_prev[itx] = (io_r_pt, io_w_pt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.disk_harvest = harvested_data.disks.clone();
|
||||
self.io_harvest = harvested_data.io.clone();
|
||||
}
|
||||
|
||||
fn eat_proc(&mut self, harvested_data: &Data) {
|
||||
self.process_harvest = harvested_data.list_of_processes.clone();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_joining_points(
|
||||
start_x: &Instant, start_y: f64, end_x: &Instant, end_y: f64,
|
||||
) -> Vec<(TimeOffset, Value)> {
|
||||
let mut points: Vec<(TimeOffset, Value)> = Vec::new();
|
||||
|
||||
// Convert time floats first:
|
||||
let tmp_time_diff = (*end_x).duration_since(*start_x).as_millis() as f64;
|
||||
let time_difference = if tmp_time_diff == 0.0 {
|
||||
0.001
|
||||
} else {
|
||||
tmp_time_diff
|
||||
};
|
||||
let value_difference = end_y - start_y;
|
||||
|
||||
// Let's generate... about this many points!
|
||||
let num_points = std::cmp::min(
|
||||
std::cmp::max(
|
||||
(value_difference.abs() / time_difference * 500.0) as u64,
|
||||
100,
|
||||
),
|
||||
500,
|
||||
);
|
||||
|
||||
for itx in 0..num_points {
|
||||
points.push((
|
||||
time_difference - (itx as f64 / num_points as f64 * time_difference),
|
||||
start_y + (itx as f64 / num_points as f64 * value_difference),
|
||||
));
|
||||
}
|
||||
|
||||
points
|
||||
}
|
170
src/app/data_harvester.rs
Normal file
170
src/app/data_harvester.rs
Normal file
|
@ -0,0 +1,170 @@
|
|||
//! This is the main file to house data collection functions.
|
||||
|
||||
use crate::utils::error::Result;
|
||||
use std::{collections::HashMap, time::Instant};
|
||||
use sysinfo::{System, SystemExt};
|
||||
|
||||
pub mod cpu;
|
||||
pub mod disks;
|
||||
pub mod mem;
|
||||
pub mod network;
|
||||
pub mod processes;
|
||||
pub mod temperature;
|
||||
|
||||
fn set_if_valid<T: std::clone::Clone>(result: &Result<T>, value_to_set: &mut T) {
|
||||
if let Ok(result) = result {
|
||||
*value_to_set = (*result).clone();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Data {
|
||||
pub cpu: cpu::CPUHarvest,
|
||||
pub memory: mem::MemHarvest,
|
||||
pub swap: mem::MemHarvest,
|
||||
pub temperature_sensors: Vec<temperature::TempHarvest>,
|
||||
pub network: network::NetworkHarvest,
|
||||
pub list_of_processes: Vec<processes::ProcessHarvest>,
|
||||
pub disks: Vec<disks::DiskHarvest>,
|
||||
pub io: disks::IOHarvest,
|
||||
pub last_collection_time: Instant,
|
||||
}
|
||||
|
||||
impl Default for Data {
|
||||
fn default() -> Self {
|
||||
Data {
|
||||
cpu: cpu::CPUHarvest::default(),
|
||||
memory: mem::MemHarvest::default(),
|
||||
swap: mem::MemHarvest::default(),
|
||||
temperature_sensors: Vec::default(),
|
||||
list_of_processes: Vec::default(),
|
||||
disks: Vec::default(),
|
||||
io: disks::IOHarvest::default(),
|
||||
network: network::NetworkHarvest::default(),
|
||||
last_collection_time: Instant::now(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Data {
|
||||
pub fn first_run_cleanup(&mut self) {
|
||||
self.io = disks::IOHarvest::default();
|
||||
self.temperature_sensors = Vec::new();
|
||||
self.list_of_processes = Vec::new();
|
||||
self.disks = Vec::new();
|
||||
|
||||
self.network.first_run_cleanup();
|
||||
self.memory = mem::MemHarvest::default();
|
||||
self.swap = mem::MemHarvest::default();
|
||||
self.cpu = cpu::CPUHarvest::default();
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DataState {
|
||||
pub data: Data,
|
||||
sys: System,
|
||||
prev_pid_stats: HashMap<String, (f64, Instant)>,
|
||||
prev_idle: f64,
|
||||
prev_non_idle: f64,
|
||||
mem_total_kb: u64,
|
||||
temperature_type: temperature::TemperatureType,
|
||||
use_current_cpu_total: bool,
|
||||
}
|
||||
|
||||
impl Default for DataState {
|
||||
fn default() -> Self {
|
||||
DataState {
|
||||
data: Data::default(),
|
||||
sys: System::new(),
|
||||
prev_pid_stats: HashMap::new(),
|
||||
prev_idle: 0_f64,
|
||||
prev_non_idle: 0_f64,
|
||||
mem_total_kb: 0,
|
||||
temperature_type: temperature::TemperatureType::Celsius,
|
||||
use_current_cpu_total: false,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DataState {
|
||||
pub fn set_temperature_type(&mut self, temperature_type: temperature::TemperatureType) {
|
||||
self.temperature_type = temperature_type;
|
||||
}
|
||||
|
||||
pub fn set_use_current_cpu_total(&mut self, use_current_cpu_total: bool) {
|
||||
self.use_current_cpu_total = use_current_cpu_total;
|
||||
}
|
||||
|
||||
pub fn init(&mut self) {
|
||||
self.sys.refresh_all();
|
||||
self.mem_total_kb = self.sys.get_total_memory();
|
||||
futures::executor::block_on(self.update_data());
|
||||
std::thread::sleep(std::time::Duration::from_millis(250));
|
||||
self.data.first_run_cleanup();
|
||||
}
|
||||
|
||||
pub async fn update_data(&mut self) {
|
||||
self.sys.refresh_system();
|
||||
|
||||
if !cfg!(target_os = "linux") {
|
||||
// For now, might be just windows tbh
|
||||
self.sys.refresh_processes();
|
||||
self.sys.refresh_network();
|
||||
}
|
||||
|
||||
let current_instant = std::time::Instant::now();
|
||||
|
||||
// Network
|
||||
self.data.network = network::get_network_data(
|
||||
&self.sys,
|
||||
&self.data.last_collection_time,
|
||||
&mut self.data.network.total_rx,
|
||||
&mut self.data.network.total_tx,
|
||||
¤t_instant,
|
||||
)
|
||||
.await;
|
||||
|
||||
// Mem and swap
|
||||
if let Ok(memory) = mem::get_mem_data_list().await {
|
||||
self.data.memory = memory;
|
||||
}
|
||||
|
||||
if let Ok(swap) = mem::get_swap_data_list().await {
|
||||
self.data.swap = swap;
|
||||
}
|
||||
|
||||
// CPU
|
||||
self.data.cpu = cpu::get_cpu_data_list(&self.sys);
|
||||
|
||||
// Disks
|
||||
if let Ok(disks) = disks::get_disk_usage_list().await {
|
||||
self.data.disks = disks;
|
||||
}
|
||||
if let Ok(io) = disks::get_io_usage_list(false).await {
|
||||
self.data.io = io;
|
||||
}
|
||||
|
||||
// Temp
|
||||
if let Ok(temp) = temperature::get_temperature_data(&self.sys, &self.temperature_type).await
|
||||
{
|
||||
self.data.temperature_sensors = temp;
|
||||
}
|
||||
|
||||
// What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update!
|
||||
set_if_valid(
|
||||
&processes::get_sorted_processes_list(
|
||||
&self.sys,
|
||||
&mut self.prev_idle,
|
||||
&mut self.prev_non_idle,
|
||||
&mut self.prev_pid_stats,
|
||||
self.use_current_cpu_total,
|
||||
self.mem_total_kb,
|
||||
¤t_instant,
|
||||
),
|
||||
&mut self.data.list_of_processes,
|
||||
);
|
||||
|
||||
// Update time
|
||||
self.data.last_collection_time = current_instant;
|
||||
}
|
||||
}
|
23
src/app/data_harvester/cpu.rs
Normal file
23
src/app/data_harvester/cpu.rs
Normal file
|
@ -0,0 +1,23 @@
|
|||
use sysinfo::{ProcessorExt, System, SystemExt};
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct CPUData {
|
||||
pub cpu_name: String,
|
||||
pub cpu_usage: f64,
|
||||
}
|
||||
|
||||
pub type CPUHarvest = Vec<CPUData>;
|
||||
|
||||
pub fn get_cpu_data_list(sys: &System) -> CPUHarvest {
|
||||
let cpu_data = sys.get_processor_list();
|
||||
let mut cpu_vec = Vec::new();
|
||||
|
||||
for cpu in cpu_data {
|
||||
cpu_vec.push(CPUData {
|
||||
cpu_name: cpu.get_name().to_string(),
|
||||
cpu_usage: f64::from(cpu.get_cpu_usage()) * 100_f64,
|
||||
});
|
||||
}
|
||||
|
||||
cpu_vec
|
||||
}
|
|
@ -1,11 +1,10 @@
|
|||
use futures::stream::StreamExt;
|
||||
use heim::units::information;
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DiskData {
|
||||
pub name: Box<str>,
|
||||
pub mount_point: Box<str>,
|
||||
pub struct DiskHarvest {
|
||||
pub name: String,
|
||||
pub mount_point: String,
|
||||
pub free_space: u64,
|
||||
pub used_space: u64,
|
||||
pub total_space: u64,
|
||||
|
@ -13,18 +12,13 @@ pub struct DiskData {
|
|||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IOData {
|
||||
pub mount_point: Box<str>,
|
||||
pub read_bytes: u64,
|
||||
pub write_bytes: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IOPackage {
|
||||
pub io_hash: std::collections::HashMap<String, IOData>,
|
||||
pub instant: Instant,
|
||||
}
|
||||
pub type IOHarvest = std::collections::HashMap<String, IOData>;
|
||||
|
||||
pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Result<IOPackage> {
|
||||
pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Result<IOHarvest> {
|
||||
let mut io_hash: std::collections::HashMap<String, IOData> = std::collections::HashMap::new();
|
||||
if get_physical {
|
||||
let mut physical_counter_stream = heim::disk::io_counters_physical();
|
||||
|
@ -34,7 +28,6 @@ pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Resul
|
|||
io_hash.insert(
|
||||
mount_point.to_string(),
|
||||
IOData {
|
||||
mount_point: Box::from(mount_point),
|
||||
read_bytes: io.read_bytes().get::<information::megabyte>(),
|
||||
write_bytes: io.write_bytes().get::<information::megabyte>(),
|
||||
},
|
||||
|
@ -48,7 +41,6 @@ pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Resul
|
|||
io_hash.insert(
|
||||
mount_point.to_string(),
|
||||
IOData {
|
||||
mount_point: Box::from(mount_point),
|
||||
read_bytes: io.read_bytes().get::<information::byte>(),
|
||||
write_bytes: io.write_bytes().get::<information::byte>(),
|
||||
},
|
||||
|
@ -56,14 +48,11 @@ pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Resul
|
|||
}
|
||||
}
|
||||
|
||||
Ok(IOPackage {
|
||||
io_hash,
|
||||
instant: Instant::now(),
|
||||
})
|
||||
Ok(io_hash)
|
||||
}
|
||||
|
||||
pub async fn get_disk_usage_list() -> crate::utils::error::Result<Vec<DiskData>> {
|
||||
let mut vec_disks: Vec<DiskData> = Vec::new();
|
||||
pub async fn get_disk_usage_list() -> crate::utils::error::Result<Vec<DiskHarvest>> {
|
||||
let mut vec_disks: Vec<DiskHarvest> = Vec::new();
|
||||
let mut partitions_stream = heim::disk::partitions_physical();
|
||||
|
||||
while let Some(part) = partitions_stream.next().await {
|
||||
|
@ -71,23 +60,21 @@ pub async fn get_disk_usage_list() -> crate::utils::error::Result<Vec<DiskData>>
|
|||
let partition = part;
|
||||
let usage = heim::disk::usage(partition.mount_point().to_path_buf()).await?;
|
||||
|
||||
vec_disks.push(DiskData {
|
||||
vec_disks.push(DiskHarvest {
|
||||
free_space: usage.free().get::<information::byte>(),
|
||||
used_space: usage.used().get::<information::byte>(),
|
||||
total_space: usage.total().get::<information::byte>(),
|
||||
mount_point: Box::from(
|
||||
partition
|
||||
.mount_point()
|
||||
.to_str()
|
||||
.unwrap_or("Name Unavailable"),
|
||||
),
|
||||
name: Box::from(
|
||||
partition
|
||||
.device()
|
||||
.unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable"))
|
||||
.to_str()
|
||||
.unwrap_or("Name Unavailable"),
|
||||
),
|
||||
mount_point: (partition
|
||||
.mount_point()
|
||||
.to_str()
|
||||
.unwrap_or("Name Unavailable"))
|
||||
.to_string(),
|
||||
name: (partition
|
||||
.device()
|
||||
.unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable"))
|
||||
.to_str()
|
||||
.unwrap_or("Name Unavailable"))
|
||||
.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
|
@ -1,30 +1,35 @@
|
|||
use heim::units::information;
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MemData {
|
||||
pub struct MemHarvest {
|
||||
pub mem_total_in_mb: u64,
|
||||
pub mem_used_in_mb: u64,
|
||||
pub instant: Instant,
|
||||
}
|
||||
|
||||
pub async fn get_mem_data_list(curr_time: &Instant) -> crate::utils::error::Result<MemData> {
|
||||
impl Default for MemHarvest {
|
||||
fn default() -> Self {
|
||||
MemHarvest {
|
||||
mem_total_in_mb: 0,
|
||||
mem_used_in_mb: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_mem_data_list() -> crate::utils::error::Result<MemHarvest> {
|
||||
let memory = heim::memory::memory().await?;
|
||||
|
||||
Ok(MemData {
|
||||
Ok(MemHarvest {
|
||||
mem_total_in_mb: memory.total().get::<information::megabyte>(),
|
||||
mem_used_in_mb: memory.total().get::<information::megabyte>()
|
||||
- memory.available().get::<information::megabyte>(),
|
||||
instant: *curr_time,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_swap_data_list(curr_time: &Instant) -> crate::utils::error::Result<MemData> {
|
||||
pub async fn get_swap_data_list() -> crate::utils::error::Result<MemHarvest> {
|
||||
let memory = heim::memory::swap().await?;
|
||||
|
||||
Ok(MemData {
|
||||
Ok(MemHarvest {
|
||||
mem_total_in_mb: memory.total().get::<information::megabyte>(),
|
||||
mem_used_in_mb: memory.used().get::<information::megabyte>(),
|
||||
instant: *curr_time,
|
||||
})
|
||||
}
|
62
src/app/data_harvester/network.rs
Normal file
62
src/app/data_harvester/network.rs
Normal file
|
@ -0,0 +1,62 @@
|
|||
use futures::StreamExt;
|
||||
use heim::net;
|
||||
use heim::units::information::byte;
|
||||
use std::time::Instant;
|
||||
use sysinfo::{NetworkExt, System, SystemExt};
|
||||
|
||||
#[derive(Default, Clone, Debug)]
|
||||
pub struct NetworkHarvest {
|
||||
pub rx: u64,
|
||||
pub tx: u64,
|
||||
pub total_rx: u64,
|
||||
pub total_tx: u64,
|
||||
}
|
||||
|
||||
impl NetworkHarvest {
|
||||
pub fn first_run_cleanup(&mut self) {
|
||||
self.rx = 0;
|
||||
self.tx = 0;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_network_data(
|
||||
sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64,
|
||||
curr_time: &Instant,
|
||||
) -> NetworkHarvest {
|
||||
// FIXME: [WIN] Track current total bytes... also is this accurate?
|
||||
if cfg!(target_os = "windows") {
|
||||
let network_data = sys.get_network();
|
||||
NetworkHarvest {
|
||||
rx: network_data.get_income(),
|
||||
tx: network_data.get_outcome(),
|
||||
total_rx: 0,
|
||||
total_tx: 0,
|
||||
}
|
||||
} else {
|
||||
let mut io_data = net::io_counters();
|
||||
let mut total_rx: u64 = 0;
|
||||
let mut total_tx: u64 = 0;
|
||||
|
||||
while let Some(io) = io_data.next().await {
|
||||
if let Ok(io) = io {
|
||||
total_rx += io.bytes_recv().get::<byte>();
|
||||
total_tx += io.bytes_sent().get::<byte>();
|
||||
}
|
||||
}
|
||||
let elapsed_time = curr_time
|
||||
.duration_since(*prev_net_access_time)
|
||||
.as_secs_f64();
|
||||
|
||||
let rx = ((total_rx - *prev_net_rx) as f64 / elapsed_time) as u64;
|
||||
let tx = ((total_tx - *prev_net_tx) as f64 / elapsed_time) as u64;
|
||||
|
||||
*prev_net_rx = total_rx;
|
||||
*prev_net_tx = total_tx;
|
||||
NetworkHarvest {
|
||||
rx,
|
||||
tx,
|
||||
total_rx,
|
||||
total_tx,
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,5 +1,4 @@
|
|||
use crate::utils::error;
|
||||
use std::cmp::Ordering;
|
||||
use std::{collections::HashMap, process::Command, time::Instant};
|
||||
use sysinfo::{ProcessExt, System, SystemExt};
|
||||
|
||||
|
@ -18,13 +17,11 @@ impl Default for ProcessSorting {
|
|||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ProcessData {
|
||||
pub struct ProcessHarvest {
|
||||
pub pid: u32,
|
||||
pub cpu_usage_percent: f64,
|
||||
pub mem_usage_percent: Option<f64>,
|
||||
pub mem_usage_kb: Option<u64>,
|
||||
pub mem_usage_percent: f64,
|
||||
pub name: String,
|
||||
pub pid_vec: Option<Vec<u32>>,
|
||||
}
|
||||
|
||||
fn cpu_usage_calculation(
|
||||
|
@ -101,31 +98,6 @@ fn cpu_usage_calculation(
|
|||
Ok((result, cpu_percentage))
|
||||
}
|
||||
|
||||
fn get_ordering<T: std::cmp::PartialOrd>(
|
||||
a_val: T, b_val: T, reverse_order: bool,
|
||||
) -> std::cmp::Ordering {
|
||||
match a_val.partial_cmp(&b_val) {
|
||||
Some(x) => match x {
|
||||
Ordering::Greater => {
|
||||
if reverse_order {
|
||||
std::cmp::Ordering::Less
|
||||
} else {
|
||||
std::cmp::Ordering::Greater
|
||||
}
|
||||
}
|
||||
Ordering::Less => {
|
||||
if reverse_order {
|
||||
std::cmp::Ordering::Greater
|
||||
} else {
|
||||
std::cmp::Ordering::Less
|
||||
}
|
||||
}
|
||||
Ordering::Equal => Ordering::Equal,
|
||||
},
|
||||
None => Ordering::Equal,
|
||||
}
|
||||
}
|
||||
|
||||
fn get_process_cpu_stats(pid: u32) -> std::io::Result<f64> {
|
||||
let mut path = std::path::PathBuf::new();
|
||||
path.push("/proc");
|
||||
|
@ -145,12 +117,13 @@ fn get_process_cpu_stats(pid: u32) -> std::io::Result<f64> {
|
|||
/// Note that cpu_percentage should be represented WITHOUT the \times 100 factor!
|
||||
fn linux_cpu_usage(
|
||||
pid: u32, cpu_usage: f64, cpu_percentage: f64,
|
||||
previous_pid_stats: &mut HashMap<String, (f64, Instant)>, use_current_cpu_total: bool,
|
||||
prev_pid_stats: &HashMap<String, (f64, Instant)>,
|
||||
new_pid_stats: &mut HashMap<String, (f64, Instant)>, use_current_cpu_total: bool,
|
||||
curr_time: &Instant,
|
||||
) -> std::io::Result<f64> {
|
||||
// Based heavily on https://stackoverflow.com/a/23376195 and https://stackoverflow.com/a/1424556
|
||||
let before_proc_val: f64 = if previous_pid_stats.contains_key(&pid.to_string()) {
|
||||
previous_pid_stats
|
||||
let before_proc_val: f64 = if prev_pid_stats.contains_key(&pid.to_string()) {
|
||||
prev_pid_stats
|
||||
.get(&pid.to_string())
|
||||
.unwrap_or(&(0_f64, *curr_time))
|
||||
.0
|
||||
|
@ -168,10 +141,7 @@ fn linux_cpu_usage(
|
|||
(after_proc_val - before_proc_val) / cpu_usage * 100_f64
|
||||
);*/
|
||||
|
||||
let entry = previous_pid_stats
|
||||
.entry(pid.to_string())
|
||||
.or_insert((after_proc_val, *curr_time));
|
||||
*entry = (after_proc_val, *curr_time);
|
||||
new_pid_stats.insert(pid.to_string(), (after_proc_val, *curr_time));
|
||||
if use_current_cpu_total {
|
||||
Ok((after_proc_val - before_proc_val) / cpu_usage * 100_f64)
|
||||
} else {
|
||||
|
@ -181,17 +151,16 @@ fn linux_cpu_usage(
|
|||
|
||||
fn convert_ps(
|
||||
process: &str, cpu_usage: f64, cpu_percentage: f64,
|
||||
prev_pid_stats: &mut HashMap<String, (f64, Instant)>, use_current_cpu_total: bool,
|
||||
prev_pid_stats: &HashMap<String, (f64, Instant)>,
|
||||
new_pid_stats: &mut HashMap<String, (f64, Instant)>, use_current_cpu_total: bool,
|
||||
curr_time: &Instant,
|
||||
) -> std::io::Result<ProcessData> {
|
||||
) -> std::io::Result<ProcessHarvest> {
|
||||
if process.trim().to_string().is_empty() {
|
||||
return Ok(ProcessData {
|
||||
return Ok(ProcessHarvest {
|
||||
pid: 0,
|
||||
name: "".to_string(),
|
||||
mem_usage_percent: None,
|
||||
mem_usage_kb: None,
|
||||
cpu_usage_percent: 0_f64,
|
||||
pid_vec: None,
|
||||
mem_usage_percent: 0.0,
|
||||
cpu_usage_percent: 0.0,
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -201,37 +170,34 @@ fn convert_ps(
|
|||
.parse::<u32>()
|
||||
.unwrap_or(0);
|
||||
let name = (&process[11..61]).trim().to_string();
|
||||
let mem_usage_percent = Some(
|
||||
(&process[62..])
|
||||
.trim()
|
||||
.to_string()
|
||||
.parse::<f64>()
|
||||
.unwrap_or(0_f64),
|
||||
);
|
||||
let mem_usage_percent = (&process[62..])
|
||||
.trim()
|
||||
.to_string()
|
||||
.parse::<f64>()
|
||||
.unwrap_or(0_f64);
|
||||
|
||||
Ok(ProcessData {
|
||||
Ok(ProcessHarvest {
|
||||
pid,
|
||||
name,
|
||||
mem_usage_percent,
|
||||
mem_usage_kb: None,
|
||||
cpu_usage_percent: linux_cpu_usage(
|
||||
pid,
|
||||
cpu_usage,
|
||||
cpu_percentage,
|
||||
prev_pid_stats,
|
||||
new_pid_stats,
|
||||
use_current_cpu_total,
|
||||
curr_time,
|
||||
)?,
|
||||
pid_vec: None,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_sorted_processes_list(
|
||||
sys: &System, prev_idle: &mut f64, prev_non_idle: &mut f64,
|
||||
prev_pid_stats: &mut std::collections::HashMap<String, (f64, Instant)>,
|
||||
use_current_cpu_total: bool, curr_time: &Instant,
|
||||
) -> crate::utils::error::Result<Vec<ProcessData>> {
|
||||
let mut process_vector: Vec<ProcessData> = Vec::new();
|
||||
prev_pid_stats: &mut HashMap<String, (f64, Instant)>, use_current_cpu_total: bool,
|
||||
mem_total_kb: u64, curr_time: &Instant,
|
||||
) -> crate::utils::error::Result<Vec<ProcessHarvest>> {
|
||||
let mut process_vector: Vec<ProcessHarvest> = Vec::new();
|
||||
|
||||
if cfg!(target_os = "linux") {
|
||||
// Linux specific - this is a massive pain... ugh.
|
||||
|
@ -241,17 +207,19 @@ pub fn get_sorted_processes_list(
|
|||
.output()?;
|
||||
let ps_stdout = String::from_utf8_lossy(&ps_result.stdout);
|
||||
let split_string = ps_stdout.split('\n');
|
||||
//debug!("{:?}", split_string);
|
||||
let cpu_calc = cpu_usage_calculation(prev_idle, prev_non_idle);
|
||||
if let Ok((cpu_usage, cpu_percentage)) = cpu_calc {
|
||||
let process_stream = split_string.collect::<Vec<&str>>();
|
||||
|
||||
let mut new_pid_stats: HashMap<String, (f64, Instant)> = HashMap::new();
|
||||
|
||||
for process in process_stream {
|
||||
if let Ok(process_object) = convert_ps(
|
||||
process,
|
||||
cpu_usage,
|
||||
cpu_percentage,
|
||||
prev_pid_stats,
|
||||
&prev_pid_stats,
|
||||
&mut new_pid_stats,
|
||||
use_current_cpu_total,
|
||||
curr_time,
|
||||
) {
|
||||
|
@ -260,6 +228,8 @@ pub fn get_sorted_processes_list(
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
*prev_pid_stats = new_pid_stats;
|
||||
} else {
|
||||
error!("Unable to properly parse CPU data in Linux.");
|
||||
error!("Result: {:?}", cpu_calc.err());
|
||||
|
@ -288,42 +258,14 @@ pub fn get_sorted_processes_list(
|
|||
process_val.name().to_string()
|
||||
};
|
||||
|
||||
process_vector.push(ProcessData {
|
||||
process_vector.push(ProcessHarvest {
|
||||
pid: process_val.pid() as u32,
|
||||
name,
|
||||
mem_usage_percent: None,
|
||||
mem_usage_kb: Some(process_val.memory()),
|
||||
mem_usage_percent: process_val.memory() as f64 * 100.0 / mem_total_kb as f64,
|
||||
cpu_usage_percent: f64::from(process_val.cpu_usage()),
|
||||
pid_vec: None,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
Ok(process_vector)
|
||||
}
|
||||
|
||||
pub fn sort_processes(
|
||||
process_vector: &mut Vec<ProcessData>, sorting_method: &ProcessSorting, reverse_order: bool,
|
||||
) {
|
||||
// Always sort alphabetically first!
|
||||
process_vector.sort_by(|a, b| get_ordering(&a.name, &b.name, false));
|
||||
|
||||
match sorting_method {
|
||||
ProcessSorting::CPU => {
|
||||
process_vector.sort_by(|a, b| {
|
||||
get_ordering(a.cpu_usage_percent, b.cpu_usage_percent, reverse_order)
|
||||
});
|
||||
}
|
||||
ProcessSorting::MEM => {
|
||||
process_vector.sort_by(|a, b| {
|
||||
get_ordering(a.mem_usage_percent, b.mem_usage_percent, reverse_order)
|
||||
});
|
||||
}
|
||||
ProcessSorting::PID => {
|
||||
process_vector.sort_by(|a, b| get_ordering(a.pid, b.pid, reverse_order));
|
||||
}
|
||||
ProcessSorting::NAME => {
|
||||
process_vector.sort_by(|a, b| get_ordering(&a.name, &b.name, reverse_order))
|
||||
}
|
||||
}
|
||||
}
|
|
@ -3,9 +3,9 @@ use heim::units::thermodynamic_temperature;
|
|||
use std::cmp::Ordering;
|
||||
use sysinfo::{ComponentExt, System, SystemExt};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TempData {
|
||||
pub component_name: Box<str>,
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct TempHarvest {
|
||||
pub component_name: String,
|
||||
pub temperature: f32,
|
||||
}
|
||||
|
||||
|
@ -24,15 +24,15 @@ impl Default for TemperatureType {
|
|||
|
||||
pub async fn get_temperature_data(
|
||||
sys: &System, temp_type: &TemperatureType,
|
||||
) -> crate::utils::error::Result<Vec<TempData>> {
|
||||
let mut temperature_vec: Vec<TempData> = Vec::new();
|
||||
) -> crate::utils::error::Result<Vec<TempHarvest>> {
|
||||
let mut temperature_vec: Vec<TempHarvest> = Vec::new();
|
||||
|
||||
if cfg!(target_os = "linux") {
|
||||
let mut sensor_data = heim::sensors::temperatures();
|
||||
while let Some(sensor) = sensor_data.next().await {
|
||||
if let Ok(sensor) = sensor {
|
||||
temperature_vec.push(TempData {
|
||||
component_name: Box::from(sensor.unit()),
|
||||
temperature_vec.push(TempHarvest {
|
||||
component_name: sensor.unit().to_string(),
|
||||
temperature: match temp_type {
|
||||
TemperatureType::Celsius => sensor
|
||||
.current()
|
||||
|
@ -52,8 +52,8 @@ pub async fn get_temperature_data(
|
|||
} else {
|
||||
let sensor_data = sys.get_components_list();
|
||||
for component in sensor_data {
|
||||
temperature_vec.push(TempData {
|
||||
component_name: Box::from(component.get_label()),
|
||||
temperature_vec.push(TempHarvest {
|
||||
component_name: component.get_label().to_string(),
|
||||
temperature: match temp_type {
|
||||
TemperatureType::Celsius => component.get_temperature(),
|
||||
TemperatureType::Kelvin => component.get_temperature() + 273.15,
|
233
src/canvas.rs
233
src/canvas.rs
|
@ -1,9 +1,11 @@
|
|||
use crate::{
|
||||
app, constants,
|
||||
app::{self, data_harvester::processes::ProcessHarvest},
|
||||
constants,
|
||||
data_conversion::{ConvertedCpuData, ConvertedProcessData},
|
||||
utils::{error, gen_util::*},
|
||||
};
|
||||
use std::cmp::max;
|
||||
use std::collections::HashMap;
|
||||
use tui::{
|
||||
backend,
|
||||
layout::{Alignment, Constraint, Direction, Layout, Rect},
|
||||
|
@ -51,7 +53,7 @@ lazy_static! {
|
|||
Text::raw("Ctrl-f to toggle searching for a process. / to just open it.\n"),
|
||||
Text::raw("Use Ctrl-p and Ctrl-n to toggle between searching for PID and name.\n"),
|
||||
Text::raw("Use Ctrl-a and Ctrl-e to set the cursor to the start and end of the bar respectively.\n"),
|
||||
Text::raw("Use Ctrl-s to toggle between simple and regex search.\n"),
|
||||
Text::raw("Use Tab to toggle whether to ignore case.\n"),
|
||||
Text::raw("\nFor startup flags, type in \"btm -h\".")
|
||||
];
|
||||
static ref COLOUR_LIST: Vec<Color> = gen_n_colours(constants::NUM_COLOURS);
|
||||
|
@ -85,7 +87,7 @@ lazy_static! {
|
|||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct CanvasData {
|
||||
pub struct DisplayableData {
|
||||
pub rx_display: String,
|
||||
pub tx_display: String,
|
||||
pub total_rx_display: String,
|
||||
|
@ -94,9 +96,11 @@ pub struct CanvasData {
|
|||
pub network_data_tx: Vec<(f64, f64)>,
|
||||
pub disk_data: Vec<Vec<String>>,
|
||||
pub temp_sensor_data: Vec<Vec<String>>,
|
||||
pub process_data: Vec<ConvertedProcessData>,
|
||||
pub grouped_process_data: Vec<ConvertedProcessData>,
|
||||
pub memory_labels: Vec<(u64, u64)>,
|
||||
pub process_data: HashMap<u32, ProcessHarvest>, // Not final
|
||||
pub grouped_process_data: Vec<ConvertedProcessData>, // Not final
|
||||
pub finalized_process_data: Vec<ConvertedProcessData>, // What's actually displayed
|
||||
pub mem_label: String,
|
||||
pub swap_label: String,
|
||||
pub mem_data: Vec<(f64, f64)>,
|
||||
pub swap_data: Vec<(f64, f64)>,
|
||||
pub cpu_data: Vec<ConvertedCpuData>,
|
||||
|
@ -131,11 +135,11 @@ fn gen_n_colours(num_to_gen: i32) -> Vec<Color> {
|
|||
|
||||
// Generate colours
|
||||
let mut colour_vec: Vec<Color> = vec![
|
||||
Color::LightCyan,
|
||||
Color::LightYellow,
|
||||
Color::Red,
|
||||
Color::Green,
|
||||
Color::LightYellow,
|
||||
Color::LightMagenta,
|
||||
Color::LightCyan,
|
||||
Color::Green,
|
||||
];
|
||||
|
||||
let mut h: f32 = 0.4; // We don't need random colours... right?
|
||||
|
@ -235,23 +239,23 @@ pub fn draw_data<B: backend::Backend>(
|
|||
.alignment(Alignment::Center)
|
||||
.wrap(true)
|
||||
.render(&mut f, middle_dialog_chunk[1]);
|
||||
} else if let Some(process_list) = app_state.get_current_highlighted_process_list() {
|
||||
if let Some(process) = process_list.first() {
|
||||
} else if let Some(to_kill_processes) = app_state.get_to_delete_processes() {
|
||||
if let Some(first_pid) = to_kill_processes.1.first() {
|
||||
let dd_text = [
|
||||
if app_state.is_grouped() {
|
||||
Text::raw(format!(
|
||||
"\nAre you sure you want to kill {} process(es) with name {}?",
|
||||
process_list.len(), process.name
|
||||
))
|
||||
} else {
|
||||
Text::raw(format!(
|
||||
"\nAre you sure you want to kill process {} with PID {}?",
|
||||
process.name, process.pid
|
||||
))
|
||||
},
|
||||
Text::raw("\n\nPress ENTER to proceed, ESC to exit."),
|
||||
Text::raw("\nNote that if bottom is frozen, it must be unfrozen for changes to be shown."),
|
||||
];
|
||||
if app_state.is_grouped() {
|
||||
Text::raw(format!(
|
||||
"\nAre you sure you want to kill {} process(es) with name {}?",
|
||||
to_kill_processes.1.len(), to_kill_processes.0
|
||||
))
|
||||
} else {
|
||||
Text::raw(format!(
|
||||
"\nAre you sure you want to kill process {} with PID {}?",
|
||||
to_kill_processes.0, first_pid
|
||||
))
|
||||
},
|
||||
Text::raw("\n\nPress ENTER to proceed, ESC to exit."),
|
||||
Text::raw("\nNote that if bottom is frozen, it must be unfrozen for changes to be shown."),
|
||||
];
|
||||
|
||||
Paragraph::new(dd_text.iter())
|
||||
.block(
|
||||
|
@ -264,6 +268,7 @@ pub fn draw_data<B: backend::Backend>(
|
|||
.wrap(true)
|
||||
.render(&mut f, middle_dialog_chunk[1]);
|
||||
} else {
|
||||
// This is a bit nasty, but it works well... I guess.
|
||||
app_state.show_dd = false;
|
||||
}
|
||||
} else {
|
||||
|
@ -276,8 +281,8 @@ pub fn draw_data<B: backend::Backend>(
|
|||
.margin(1)
|
||||
.constraints(
|
||||
[
|
||||
Constraint::Percentage(33),
|
||||
Constraint::Percentage(34),
|
||||
Constraint::Percentage(30),
|
||||
Constraint::Percentage(36),
|
||||
Constraint::Percentage(34),
|
||||
]
|
||||
.as_ref(),
|
||||
|
@ -334,7 +339,6 @@ pub fn draw_data<B: backend::Backend>(
|
|||
} else {
|
||||
5
|
||||
};
|
||||
debug!("Req: {}", required);
|
||||
let remaining = bottom_chunks[0].height - required;
|
||||
[Constraint::Length(remaining), Constraint::Length(required)]
|
||||
}
|
||||
|
@ -391,7 +395,7 @@ fn draw_cpu_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::App, d
|
|||
// CPU usage graph
|
||||
let x_axis: Axis<String> = Axis::default()
|
||||
.style(Style::default().fg(GRAPH_COLOUR))
|
||||
.bounds([0.0, constants::TIME_STARTS_FROM as f64 * 10.0]);
|
||||
.bounds([0.0, constants::TIME_STARTS_FROM as f64]);
|
||||
let y_axis = Axis::default()
|
||||
.style(Style::default().fg(GRAPH_COLOUR))
|
||||
.bounds([-0.5, 100.5])
|
||||
|
@ -401,18 +405,8 @@ fn draw_cpu_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::App, d
|
|||
let mut cpu_entries_vec: Vec<(Style, Vec<(f64, f64)>)> = Vec::new();
|
||||
|
||||
for (i, cpu) in cpu_data.iter().enumerate() {
|
||||
let mut avg_cpu_exist_offset = 0;
|
||||
if app_state.show_average_cpu {
|
||||
if i == 0 {
|
||||
// Skip, we want to render the average cpu last!
|
||||
continue;
|
||||
} else {
|
||||
avg_cpu_exist_offset = 1;
|
||||
}
|
||||
}
|
||||
|
||||
cpu_entries_vec.push((
|
||||
Style::default().fg(COLOUR_LIST[(i - avg_cpu_exist_offset) % COLOUR_LIST.len()]),
|
||||
Style::default().fg(COLOUR_LIST[(i) % COLOUR_LIST.len()]),
|
||||
cpu.cpu_data
|
||||
.iter()
|
||||
.map(<(f64, f64)>::from)
|
||||
|
@ -423,7 +417,7 @@ fn draw_cpu_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::App, d
|
|||
if app_state.show_average_cpu {
|
||||
if let Some(avg_cpu_entry) = cpu_data.first() {
|
||||
cpu_entries_vec.push((
|
||||
Style::default().fg(COLOUR_LIST[(cpu_data.len() - 1) % COLOUR_LIST.len()]),
|
||||
Style::default().fg(COLOUR_LIST[0]),
|
||||
avg_cpu_entry
|
||||
.cpu_data
|
||||
.iter()
|
||||
|
@ -451,8 +445,8 @@ fn draw_cpu_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::App, d
|
|||
Block::default()
|
||||
.title("CPU")
|
||||
.borders(Borders::ALL)
|
||||
.border_style(match app_state.current_application_position {
|
||||
app::ApplicationPosition::Cpu => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
.border_style(match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Cpu => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
}),
|
||||
)
|
||||
|
@ -495,8 +489,8 @@ fn draw_cpu_legend<B: backend::Backend>(
|
|||
.map(|(itx, cpu_string_row)| {
|
||||
Row::StyledData(
|
||||
cpu_string_row.iter(),
|
||||
match app_state.current_application_position {
|
||||
app::ApplicationPosition::Cpu => {
|
||||
match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Cpu => {
|
||||
if cpu_row_counter
|
||||
== app_state.currently_selected_cpu_table_position - start_position
|
||||
{
|
||||
|
@ -525,8 +519,8 @@ fn draw_cpu_legend<B: backend::Backend>(
|
|||
// Draw
|
||||
Table::new(CPU_LEGEND_HEADER.iter(), cpu_rows)
|
||||
.block(Block::default().borders(Borders::ALL).border_style(
|
||||
match app_state.current_application_position {
|
||||
app::ApplicationPosition::Cpu => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Cpu => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
},
|
||||
))
|
||||
|
@ -564,8 +558,8 @@ fn draw_memory_table<B: backend::Backend>(
|
|||
// Draw
|
||||
Table::new(MEM_HEADERS.iter(), mapped_mem_rows)
|
||||
.block(Block::default().borders(Borders::ALL).border_style(
|
||||
match app_state.current_application_position {
|
||||
app::ApplicationPosition::Mem => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Mem => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
},
|
||||
))
|
||||
|
@ -582,29 +576,19 @@ fn draw_memory_table<B: backend::Backend>(
|
|||
fn draw_memory_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::App, draw_loc: Rect) {
|
||||
let mem_data: &[(f64, f64)] = &(app_state.canvas_data.mem_data);
|
||||
let swap_data: &[(f64, f64)] = &(app_state.canvas_data.swap_data);
|
||||
let memory_labels: &[(u64, u64)] = &(app_state.canvas_data.memory_labels);
|
||||
|
||||
let x_axis: Axis<String> = Axis::default()
|
||||
.style(Style::default().fg(GRAPH_COLOUR))
|
||||
.bounds([0.0, constants::TIME_STARTS_FROM as f64 * 10.0]);
|
||||
let y_axis = Axis::default()
|
||||
.bounds([0.0, constants::TIME_STARTS_FROM as f64]);
|
||||
|
||||
// Offset as the zero value isn't drawn otherwise...
|
||||
let y_axis: Axis<&str> = Axis::default()
|
||||
.style(Style::default().fg(GRAPH_COLOUR))
|
||||
.bounds([-0.5, 100.5]) // Offset as the zero value isn't drawn otherwise...
|
||||
.bounds([-0.5, 100.5])
|
||||
.labels(&["0%", "100%"]);
|
||||
|
||||
let mem_name = "RAM:".to_string()
|
||||
+ &format!(
|
||||
"{:3}%",
|
||||
(mem_data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64)
|
||||
) + &format!(
|
||||
" {:.1}GB/{:.1}GB",
|
||||
memory_labels.first().unwrap_or(&(0, 0)).0 as f64 / 1024.0,
|
||||
memory_labels.first().unwrap_or(&(0, 0)).1 as f64 / 1024.0
|
||||
);
|
||||
let swap_name: String;
|
||||
|
||||
let mut mem_canvas_vec: Vec<Dataset> = vec![Dataset::default()
|
||||
.name(&mem_name)
|
||||
.name(&app_state.canvas_data.mem_label)
|
||||
.marker(if app_state.use_dot {
|
||||
Marker::Dot
|
||||
} else {
|
||||
|
@ -614,42 +598,26 @@ fn draw_memory_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::App
|
|||
.data(&mem_data)];
|
||||
|
||||
if !(&swap_data).is_empty() {
|
||||
if let Some(last_canvas_result) = (&swap_data).last() {
|
||||
if last_canvas_result.1 >= 0.0 {
|
||||
swap_name = "SWP:".to_string()
|
||||
+ &format!(
|
||||
"{:3}%",
|
||||
(swap_data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64)
|
||||
) + &format!(
|
||||
" {:.1}GB/{:.1}GB",
|
||||
memory_labels[1].0 as f64 / 1024.0,
|
||||
memory_labels[1].1 as f64 / 1024.0
|
||||
);
|
||||
mem_canvas_vec.push(
|
||||
Dataset::default()
|
||||
.name(&swap_name)
|
||||
.marker(if app_state.use_dot {
|
||||
Marker::Dot
|
||||
} else {
|
||||
Marker::Braille
|
||||
})
|
||||
.style(Style::default().fg(COLOUR_LIST[1]))
|
||||
.data(&swap_data),
|
||||
);
|
||||
}
|
||||
}
|
||||
mem_canvas_vec.push(
|
||||
Dataset::default()
|
||||
.name(&app_state.canvas_data.swap_label)
|
||||
.marker(if app_state.use_dot {
|
||||
Marker::Dot
|
||||
} else {
|
||||
Marker::Braille
|
||||
})
|
||||
.style(Style::default().fg(COLOUR_LIST[1]))
|
||||
.data(&swap_data),
|
||||
);
|
||||
}
|
||||
|
||||
// Memory usage table
|
||||
// draw_memory_table(f, &app_state, mem_labels, swap_labels, label_loc);
|
||||
|
||||
Chart::default()
|
||||
.block(
|
||||
Block::default()
|
||||
.title("Memory")
|
||||
.borders(Borders::ALL)
|
||||
.border_style(match app_state.current_application_position {
|
||||
app::ApplicationPosition::Mem => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
.border_style(match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Mem => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
}),
|
||||
)
|
||||
|
@ -665,7 +633,7 @@ fn draw_network_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::Ap
|
|||
|
||||
let x_axis: Axis<String> = Axis::default()
|
||||
.style(Style::default().fg(GRAPH_COLOUR))
|
||||
.bounds([0.0, 600_000.0]);
|
||||
.bounds([0.0, 60_000.0]);
|
||||
let y_axis = Axis::default()
|
||||
.style(Style::default().fg(GRAPH_COLOUR))
|
||||
.bounds([-0.5, 30_f64])
|
||||
|
@ -675,8 +643,8 @@ fn draw_network_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::Ap
|
|||
Block::default()
|
||||
.title("Network")
|
||||
.borders(Borders::ALL)
|
||||
.border_style(match app_state.current_application_position {
|
||||
app::ApplicationPosition::Network => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
.border_style(match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Network => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
}),
|
||||
)
|
||||
|
@ -753,8 +721,8 @@ fn draw_network_labels<B: backend::Backend>(
|
|||
mapped_network,
|
||||
)
|
||||
.block(Block::default().borders(Borders::ALL).border_style(
|
||||
match app_state.current_application_position {
|
||||
app::ApplicationPosition::Network => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Network => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
},
|
||||
))
|
||||
|
@ -787,8 +755,8 @@ fn draw_temp_table<B: backend::Backend>(
|
|||
let temperature_rows = sliced_vec.iter().map(|temp_row| {
|
||||
Row::StyledData(
|
||||
temp_row.iter(),
|
||||
match app_state.current_application_position {
|
||||
app::ApplicationPosition::Temp => {
|
||||
match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Temp => {
|
||||
if temp_row_counter
|
||||
== app_state.currently_selected_temperature_position - start_position
|
||||
{
|
||||
|
@ -820,8 +788,8 @@ fn draw_temp_table<B: backend::Backend>(
|
|||
Block::default()
|
||||
.title("Temperatures")
|
||||
.borders(Borders::ALL)
|
||||
.border_style(match app_state.current_application_position {
|
||||
app::ApplicationPosition::Temp => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
.border_style(match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Temp => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
}),
|
||||
)
|
||||
|
@ -853,8 +821,8 @@ fn draw_disk_table<B: backend::Backend>(
|
|||
let disk_rows = sliced_vec.iter().map(|disk| {
|
||||
Row::StyledData(
|
||||
disk.iter(),
|
||||
match app_state.current_application_position {
|
||||
app::ApplicationPosition::Disk => {
|
||||
match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Disk => {
|
||||
if disk_counter == app_state.currently_selected_disk_position - start_position {
|
||||
disk_counter = -1;
|
||||
Style::default().fg(Color::Black).bg(Color::Cyan)
|
||||
|
@ -885,8 +853,8 @@ fn draw_disk_table<B: backend::Backend>(
|
|||
Block::default()
|
||||
.title("Disk")
|
||||
.borders(Borders::ALL)
|
||||
.border_style(match app_state.current_application_position {
|
||||
app::ApplicationPosition::Disk => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
.border_style(match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Disk => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
}),
|
||||
)
|
||||
|
@ -919,8 +887,7 @@ fn draw_search_field<B: backend::Backend>(
|
|||
.chars()
|
||||
.enumerate()
|
||||
.map(|(itx, c)| {
|
||||
if let app::ApplicationPosition::ProcessSearch = app_state.current_application_position
|
||||
{
|
||||
if let app::WidgetPosition::ProcessSearch = app_state.current_widget_selected {
|
||||
if itx == cursor_position {
|
||||
return Text::styled(
|
||||
c.to_string(),
|
||||
|
@ -931,7 +898,7 @@ fn draw_search_field<B: backend::Backend>(
|
|||
Text::styled(c.to_string(), Style::default().fg(TEXT_COLOUR))
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
if let app::ApplicationPosition::ProcessSearch = app_state.current_application_position {
|
||||
if let app::WidgetPosition::ProcessSearch = app_state.current_widget_selected {
|
||||
if cursor_position >= query.len() {
|
||||
query_with_cursor.push(Text::styled(
|
||||
" ".to_string(),
|
||||
|
@ -946,10 +913,10 @@ fn draw_search_field<B: backend::Backend>(
|
|||
} else {
|
||||
Text::styled("\nName", Style::default().fg(TABLE_HEADER_COLOUR))
|
||||
},
|
||||
if app_state.use_simple {
|
||||
Text::styled(" (Simple): ", Style::default().fg(TABLE_HEADER_COLOUR))
|
||||
if app_state.ignore_case {
|
||||
Text::styled(" (Ignore Case): ", Style::default().fg(TABLE_HEADER_COLOUR))
|
||||
} else {
|
||||
Text::styled(" (Regex): ", Style::default().fg(TABLE_HEADER_COLOUR))
|
||||
Text::styled(": ", Style::default().fg(TABLE_HEADER_COLOUR))
|
||||
},
|
||||
];
|
||||
|
||||
|
@ -964,8 +931,8 @@ fn draw_search_field<B: backend::Backend>(
|
|||
.border_style(if app_state.get_current_regex_matcher().is_err() {
|
||||
Style::default().fg(Color::Red)
|
||||
} else {
|
||||
match app_state.current_application_position {
|
||||
app::ApplicationPosition::ProcessSearch => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
match app_state.current_widget_selected {
|
||||
app::WidgetPosition::ProcessSearch => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
}
|
||||
}),
|
||||
|
@ -979,26 +946,32 @@ fn draw_search_field<B: backend::Backend>(
|
|||
fn draw_processes_table<B: backend::Backend>(
|
||||
f: &mut Frame<B>, app_state: &mut app::App, draw_loc: Rect,
|
||||
) {
|
||||
let process_data: &[ConvertedProcessData] = if app_state.is_grouped() {
|
||||
&app_state.canvas_data.grouped_process_data
|
||||
} else {
|
||||
&app_state.canvas_data.process_data
|
||||
};
|
||||
let process_data: &[ConvertedProcessData] = &app_state.canvas_data.finalized_process_data;
|
||||
|
||||
// Admittedly this is kinda a hack... but we need to:
|
||||
// * Scroll
|
||||
// * Show/hide elements based on scroll position
|
||||
// As such, we use a process_counter to know when we've hit the process we've currently scrolled to. We also need to move the list - we can
|
||||
//
|
||||
// As such, we use a process_counter to know when we've
|
||||
// hit the process we've currently scrolled to.
|
||||
// We also need to move the list - we can
|
||||
// do so by hiding some elements!
|
||||
let num_rows = i64::from(draw_loc.height) - 5;
|
||||
|
||||
let start_position = get_start_position(
|
||||
let position = get_start_position(
|
||||
num_rows,
|
||||
&(app_state.scroll_direction),
|
||||
&mut app_state.previous_process_position,
|
||||
app_state.currently_selected_process_position,
|
||||
);
|
||||
|
||||
// Sanity check
|
||||
let start_position = if position >= process_data.len() as i64 {
|
||||
std::cmp::max(0, process_data.len() as i64 - 1)
|
||||
} else {
|
||||
position
|
||||
};
|
||||
|
||||
let sliced_vec: Vec<ConvertedProcessData> = (&process_data[start_position as usize..]).to_vec();
|
||||
let mut process_counter = 0;
|
||||
|
||||
|
@ -1006,18 +979,18 @@ fn draw_processes_table<B: backend::Backend>(
|
|||
let process_rows = sliced_vec.iter().map(|process| {
|
||||
let stringified_process_vec: Vec<String> = vec![
|
||||
if app_state.is_grouped() {
|
||||
process.group.len().to_string()
|
||||
process.group_pids.len().to_string()
|
||||
} else {
|
||||
process.pid.to_string()
|
||||
},
|
||||
process.name.clone(),
|
||||
process.cpu_usage.clone(),
|
||||
process.mem_usage.clone(),
|
||||
format!("{:.1}%", process.cpu_usage),
|
||||
format!("{:.1}%", process.mem_usage),
|
||||
];
|
||||
Row::StyledData(
|
||||
stringified_process_vec.into_iter(),
|
||||
match app_state.current_application_position {
|
||||
app::ApplicationPosition::Process => {
|
||||
match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Process => {
|
||||
if process_counter
|
||||
== app_state.currently_selected_process_position - start_position
|
||||
{
|
||||
|
@ -1035,7 +1008,7 @@ fn draw_processes_table<B: backend::Backend>(
|
|||
)
|
||||
});
|
||||
|
||||
use app::data_collection::processes::ProcessSorting;
|
||||
use app::data_harvester::processes::ProcessSorting;
|
||||
let mut pid_or_name = if app_state.is_grouped() {
|
||||
"Count"
|
||||
} else {
|
||||
|
@ -1079,8 +1052,8 @@ fn draw_processes_table<B: backend::Backend>(
|
|||
Block::default()
|
||||
.title("Processes")
|
||||
.borders(Borders::ALL)
|
||||
.border_style(match app_state.current_application_position {
|
||||
app::ApplicationPosition::Process => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
.border_style(match app_state.current_widget_selected {
|
||||
app::WidgetPosition::Process => *CANVAS_HIGHLIGHTED_BORDER_STYLE,
|
||||
_ => *CANVAS_BORDER_STYLE,
|
||||
}),
|
||||
)
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
// TODO: Store like three minutes of data, then change how much is shown based on scaling!
|
||||
pub const STALE_MAX_MILLISECONDS: u64 = 180 * 1000; // We wish to store at most 60 seconds worth of data. This may change in the future, or be configurable.
|
||||
pub const STALE_MAX_MILLISECONDS: u128 = 60 * 1000; // How long to store data
|
||||
pub const TIME_STARTS_FROM: u64 = 60 * 1000;
|
||||
pub const TICK_RATE_IN_MILLISECONDS: u64 = 200; // We use this as it's a good value to work with.
|
||||
pub const TICK_RATE_IN_MILLISECONDS: u64 = 200; // How fast the screen refreshes
|
||||
pub const DEFAULT_REFRESH_RATE_IN_MILLISECONDS: u128 = 1000;
|
||||
pub const MAX_KEY_TIMEOUT_IN_MILLISECONDS: u128 = 1000;
|
||||
pub const NUM_COLOURS: i32 = 256;
|
||||
|
|
|
@ -2,12 +2,16 @@
|
|||
//! can actually handle.
|
||||
|
||||
use crate::{
|
||||
app::data_collection,
|
||||
app::{
|
||||
data_farmer,
|
||||
data_harvester::{self, processes::ProcessHarvest},
|
||||
App,
|
||||
},
|
||||
constants,
|
||||
utils::gen_util::{get_exact_byte_values, get_simple_byte_values},
|
||||
};
|
||||
use constants::*;
|
||||
use regex::Regex;
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[derive(Default, Debug)]
|
||||
pub struct ConvertedNetworkData {
|
||||
|
@ -23,9 +27,9 @@ pub struct ConvertedNetworkData {
|
|||
pub struct ConvertedProcessData {
|
||||
pub pid: u32,
|
||||
pub name: String,
|
||||
pub cpu_usage: String,
|
||||
pub mem_usage: String,
|
||||
pub group: Vec<u32>,
|
||||
pub cpu_usage: f64,
|
||||
pub mem_usage: f64,
|
||||
pub group_pids: Vec<u32>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Default, Debug)]
|
||||
|
@ -54,22 +58,23 @@ impl From<&CpuPoint> for (f64, f64) {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn update_temp_row(
|
||||
app_data: &data_collection::Data, temp_type: &data_collection::temperature::TemperatureType,
|
||||
) -> Vec<Vec<String>> {
|
||||
pub fn update_temp_row(app: &App) -> Vec<Vec<String>> {
|
||||
let mut sensor_vector: Vec<Vec<String>> = Vec::new();
|
||||
|
||||
if (&app_data.list_of_temperature_sensor).is_empty() {
|
||||
let current_data = &app.data_collection;
|
||||
let temp_type = &app.temperature_type;
|
||||
|
||||
if current_data.temp_harvest.is_empty() {
|
||||
sensor_vector.push(vec!["No Sensors Found".to_string(), "".to_string()])
|
||||
} else {
|
||||
for sensor in &app_data.list_of_temperature_sensor {
|
||||
for sensor in ¤t_data.temp_harvest {
|
||||
sensor_vector.push(vec![
|
||||
sensor.component_name.to_string(),
|
||||
(sensor.temperature.ceil() as u64).to_string()
|
||||
+ match temp_type {
|
||||
data_collection::temperature::TemperatureType::Celsius => "C",
|
||||
data_collection::temperature::TemperatureType::Kelvin => "K",
|
||||
data_collection::temperature::TemperatureType::Fahrenheit => "F",
|
||||
data_harvester::temperature::TemperatureType::Celsius => "C",
|
||||
data_harvester::temperature::TemperatureType::Kelvin => "K",
|
||||
data_harvester::temperature::TemperatureType::Fahrenheit => "F",
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
@ -78,44 +83,18 @@ pub fn update_temp_row(
|
|||
sensor_vector
|
||||
}
|
||||
|
||||
pub fn update_disk_row(app_data: &data_collection::Data) -> Vec<Vec<String>> {
|
||||
pub fn update_disk_row(current_data: &data_farmer::DataCollection) -> Vec<Vec<String>> {
|
||||
let mut disk_vector: Vec<Vec<String>> = Vec::new();
|
||||
for disk in &app_data.list_of_disks {
|
||||
let io_activity = {
|
||||
let mut final_result = ("0B/s".to_string(), "0B/s".to_string());
|
||||
if app_data.list_of_io.len() > 2 {
|
||||
if let Some(io_package) = &app_data.list_of_io.last() {
|
||||
if let Some(trimmed_mount) = disk.name.to_string().split('/').last() {
|
||||
let prev_io_package = &app_data.list_of_io[app_data.list_of_io.len() - 2];
|
||||
|
||||
let io_hashmap = &io_package.io_hash;
|
||||
let prev_io_hashmap = &prev_io_package.io_hash;
|
||||
let time_difference = io_package
|
||||
.instant
|
||||
.duration_since(prev_io_package.instant)
|
||||
.as_secs_f64();
|
||||
if io_hashmap.contains_key(trimmed_mount)
|
||||
&& prev_io_hashmap.contains_key(trimmed_mount)
|
||||
{
|
||||
// Ideally change this...
|
||||
let ele = &io_hashmap[trimmed_mount];
|
||||
let prev = &prev_io_hashmap[trimmed_mount];
|
||||
let read_bytes_per_sec = ((ele.read_bytes - prev.read_bytes) as f64
|
||||
/ time_difference) as u64;
|
||||
let write_bytes_per_sec = ((ele.write_bytes - prev.write_bytes) as f64
|
||||
/ time_difference) as u64;
|
||||
let converted_read = get_simple_byte_values(read_bytes_per_sec, false);
|
||||
let converted_write =
|
||||
get_simple_byte_values(write_bytes_per_sec, false);
|
||||
final_result = (
|
||||
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
|
||||
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
final_result
|
||||
for (itx, disk) in current_data.disk_harvest.iter().enumerate() {
|
||||
let io_activity = if current_data.io_labels.len() > itx {
|
||||
let converted_read = get_simple_byte_values(current_data.io_labels[itx].0, false);
|
||||
let converted_write = get_simple_byte_values(current_data.io_labels[itx].1, false);
|
||||
(
|
||||
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
|
||||
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
|
||||
)
|
||||
} else {
|
||||
("0B/s".to_string(), "0B/s".to_string())
|
||||
};
|
||||
|
||||
let converted_free_space = get_simple_byte_values(disk.free_space, false);
|
||||
|
@ -140,325 +119,184 @@ pub fn update_disk_row(app_data: &data_collection::Data) -> Vec<Vec<String>> {
|
|||
disk_vector
|
||||
}
|
||||
|
||||
pub fn simple_update_process_row(
|
||||
app_data: &data_collection::Data, matching_string: &str, use_pid: bool,
|
||||
) -> (Vec<ConvertedProcessData>, Vec<ConvertedProcessData>) {
|
||||
let process_vector: Vec<ConvertedProcessData> = app_data
|
||||
.list_of_processes
|
||||
.iter()
|
||||
.filter(|process| {
|
||||
if use_pid {
|
||||
process
|
||||
.pid
|
||||
.to_string()
|
||||
.to_ascii_lowercase()
|
||||
.contains(matching_string)
|
||||
} else {
|
||||
process.name.to_ascii_lowercase().contains(matching_string)
|
||||
}
|
||||
})
|
||||
.map(|process| return_mapped_process(process, app_data))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut grouped_process_vector: Vec<ConvertedProcessData> = Vec::new();
|
||||
if let Some(grouped_list_of_processes) = &app_data.grouped_list_of_processes {
|
||||
grouped_process_vector = grouped_list_of_processes
|
||||
.iter()
|
||||
.filter(|process| {
|
||||
if use_pid {
|
||||
process
|
||||
.pid
|
||||
.to_string()
|
||||
.to_ascii_lowercase()
|
||||
.contains(matching_string)
|
||||
} else {
|
||||
process.name.to_ascii_lowercase().contains(matching_string)
|
||||
}
|
||||
})
|
||||
.map(|process| return_mapped_process(process, app_data))
|
||||
.collect::<Vec<_>>();
|
||||
}
|
||||
|
||||
(process_vector, grouped_process_vector)
|
||||
}
|
||||
|
||||
pub fn regex_update_process_row(
|
||||
app_data: &data_collection::Data, regex_matcher: &std::result::Result<Regex, regex::Error>,
|
||||
use_pid: bool,
|
||||
) -> (Vec<ConvertedProcessData>, Vec<ConvertedProcessData>) {
|
||||
let process_vector: Vec<ConvertedProcessData> = app_data
|
||||
.list_of_processes
|
||||
.iter()
|
||||
.filter(|process| {
|
||||
if let Ok(matcher) = regex_matcher {
|
||||
if use_pid {
|
||||
matcher.is_match(&process.pid.to_string())
|
||||
} else {
|
||||
matcher.is_match(&process.name)
|
||||
}
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.map(|process| return_mapped_process(process, app_data))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut grouped_process_vector: Vec<ConvertedProcessData> = Vec::new();
|
||||
if let Some(grouped_list_of_processes) = &app_data.grouped_list_of_processes {
|
||||
grouped_process_vector = grouped_list_of_processes
|
||||
.iter()
|
||||
.filter(|process| {
|
||||
if let Ok(matcher) = regex_matcher {
|
||||
if use_pid {
|
||||
matcher.is_match(&process.pid.to_string())
|
||||
} else {
|
||||
matcher.is_match(&process.name)
|
||||
}
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.map(|process| return_mapped_process(process, app_data))
|
||||
.collect::<Vec<_>>();
|
||||
}
|
||||
|
||||
(process_vector, grouped_process_vector)
|
||||
}
|
||||
|
||||
fn return_mapped_process(
|
||||
process: &data_collection::processes::ProcessData, app_data: &data_collection::Data,
|
||||
) -> ConvertedProcessData {
|
||||
ConvertedProcessData {
|
||||
pid: process.pid,
|
||||
name: process.name.to_string(),
|
||||
cpu_usage: format!("{:.1}%", process.cpu_usage_percent),
|
||||
mem_usage: format!(
|
||||
"{:.1}%",
|
||||
if let Some(mem_usage) = process.mem_usage_percent {
|
||||
mem_usage
|
||||
} else if let Some(mem_usage_kb) = process.mem_usage_kb {
|
||||
if let Some(mem_data) = app_data.memory.last() {
|
||||
(mem_usage_kb / 1000) as f64 / mem_data.mem_total_in_mb as f64 * 100_f64 // TODO: [OPT] Get rid of this
|
||||
} else {
|
||||
0_f64
|
||||
}
|
||||
} else {
|
||||
0_f64
|
||||
}
|
||||
),
|
||||
group: vec![],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_cpu_data_points(
|
||||
show_avg_cpu: bool, app_data: &data_collection::Data,
|
||||
show_avg_cpu: bool, current_data: &data_farmer::DataCollection,
|
||||
) -> Vec<ConvertedCpuData> {
|
||||
let mut cpu_data_vector: Vec<ConvertedCpuData> = Vec::new();
|
||||
let mut cpu_collection: Vec<Vec<CpuPoint>> = Vec::new();
|
||||
let current_time = current_data.current_instant;
|
||||
let cpu_listing_offset = if show_avg_cpu { 0 } else { 1 };
|
||||
|
||||
if !app_data.list_of_cpu_packages.is_empty() {
|
||||
// I'm sorry for the following if statement but I couldn't be bothered here...
|
||||
for cpu_num in (if show_avg_cpu { 0 } else { 1 })
|
||||
..app_data.list_of_cpu_packages.last().unwrap().cpu_vec.len()
|
||||
{
|
||||
let mut this_cpu_data: Vec<CpuPoint> = Vec::new();
|
||||
for (time, data) in ¤t_data.timed_data_vec {
|
||||
let time_from_start: f64 = (TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(*time).as_millis() as f64)
|
||||
.floor();
|
||||
|
||||
for data in &app_data.list_of_cpu_packages {
|
||||
let current_time = std::time::Instant::now();
|
||||
let current_cpu_usage = data.cpu_vec[cpu_num].cpu_usage;
|
||||
|
||||
let new_entry = CpuPoint {
|
||||
time: ((TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(data.instant).as_millis() as f64)
|
||||
* 10_f64)
|
||||
.floor(),
|
||||
usage: current_cpu_usage,
|
||||
};
|
||||
|
||||
// Now, inject our joining points...
|
||||
if let Some(previous_element_data) = this_cpu_data.last().cloned() {
|
||||
for idx in 0..50 {
|
||||
this_cpu_data.push(CpuPoint {
|
||||
time: previous_element_data.time
|
||||
+ ((new_entry.time - previous_element_data.time) / 50.0
|
||||
* f64::from(idx)),
|
||||
usage: previous_element_data.usage
|
||||
+ ((new_entry.usage - previous_element_data.usage) / 50.0
|
||||
* f64::from(idx)),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
this_cpu_data.push(new_entry);
|
||||
for (itx, cpu) in data.cpu_data.iter().enumerate() {
|
||||
if !show_avg_cpu && itx == 0 {
|
||||
continue;
|
||||
}
|
||||
|
||||
cpu_collection.push(this_cpu_data);
|
||||
}
|
||||
// Check if the vector exists yet
|
||||
let itx_offset = itx - cpu_listing_offset;
|
||||
if cpu_data_vector.len() <= itx_offset {
|
||||
cpu_data_vector.push(ConvertedCpuData::default());
|
||||
cpu_data_vector[itx_offset].cpu_name = if show_avg_cpu && itx_offset == 0 {
|
||||
"AVG".to_string()
|
||||
} else {
|
||||
current_data.cpu_harvest[itx].cpu_name.to_uppercase()
|
||||
};
|
||||
}
|
||||
|
||||
// Finally, add it all onto the end
|
||||
for (i, data) in cpu_collection.iter().enumerate() {
|
||||
if !app_data.list_of_cpu_packages.is_empty() {
|
||||
// Commented out: this version includes the percentage in the label...
|
||||
// cpu_data_vector.push((
|
||||
// // + 1 to skip total CPU if show_avg_cpu is false
|
||||
// format!(
|
||||
// "{:4}: ",
|
||||
// &*(app_data.list_of_cpu_packages.last().unwrap().cpu_vec[i + if show_avg_cpu { 0 } else { 1 }].cpu_name)
|
||||
// )
|
||||
// .to_uppercase() + &format!("{:3}%", (data.last().unwrap_or(&(0_f64, 0_f64)).1.round() as u64)),
|
||||
// data.clone(),
|
||||
// ))
|
||||
cpu_data_vector.push(ConvertedCpuData {
|
||||
cpu_name: format!(
|
||||
"{} ",
|
||||
if show_avg_cpu && i == 0 {
|
||||
"AVG"
|
||||
} else {
|
||||
&*(app_data.list_of_cpu_packages.last().unwrap().cpu_vec
|
||||
[i + if show_avg_cpu { 0 } else { 1 }]
|
||||
.cpu_name)
|
||||
}
|
||||
)
|
||||
.to_uppercase(),
|
||||
cpu_data: data.clone(),
|
||||
//Insert joiner points
|
||||
for &(joiner_offset, joiner_val) in &cpu.1 {
|
||||
let offset_time = time_from_start - joiner_offset as f64;
|
||||
cpu_data_vector[itx_offset].cpu_data.push(CpuPoint {
|
||||
time: offset_time,
|
||||
usage: joiner_val,
|
||||
});
|
||||
}
|
||||
|
||||
cpu_data_vector[itx_offset].cpu_data.push(CpuPoint {
|
||||
time: time_from_start,
|
||||
usage: cpu.0,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
cpu_data_vector
|
||||
}
|
||||
|
||||
pub fn update_mem_data_points(app_data: &data_collection::Data) -> Vec<(f64, f64)> {
|
||||
convert_mem_data(&app_data.memory)
|
||||
}
|
||||
|
||||
pub fn update_swap_data_points(app_data: &data_collection::Data) -> Vec<(f64, f64)> {
|
||||
convert_mem_data(&app_data.swap)
|
||||
}
|
||||
|
||||
pub fn update_mem_data_values(app_data: &data_collection::Data) -> Vec<(u64, u64)> {
|
||||
let mut result: Vec<(u64, u64)> = Vec::new();
|
||||
result.push(get_most_recent_mem_values(&app_data.memory));
|
||||
result.push(get_most_recent_mem_values(&app_data.swap));
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn get_most_recent_mem_values(mem_data: &[data_collection::mem::MemData]) -> (u64, u64) {
|
||||
let mut result: (u64, u64) = (0, 0);
|
||||
|
||||
if !mem_data.is_empty() {
|
||||
if let Some(most_recent) = mem_data.last() {
|
||||
result.0 = most_recent.mem_used_in_mb;
|
||||
result.1 = most_recent.mem_total_in_mb;
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn convert_mem_data(mem_data: &[data_collection::mem::MemData]) -> Vec<(f64, f64)> {
|
||||
pub fn update_mem_data_points(current_data: &data_farmer::DataCollection) -> Vec<(f64, f64)> {
|
||||
let mut result: Vec<(f64, f64)> = Vec::new();
|
||||
let current_time = current_data.current_instant;
|
||||
|
||||
for data in mem_data {
|
||||
let current_time = std::time::Instant::now();
|
||||
let new_entry = (
|
||||
((TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(data.instant).as_millis() as f64)
|
||||
* 10_f64)
|
||||
.floor(),
|
||||
if data.mem_total_in_mb == 0 {
|
||||
-1000.0
|
||||
} else {
|
||||
(data.mem_used_in_mb as f64 * 100_f64) / data.mem_total_in_mb as f64
|
||||
},
|
||||
);
|
||||
for (time, data) in ¤t_data.timed_data_vec {
|
||||
let time_from_start: f64 = (TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(*time).as_millis() as f64)
|
||||
.floor();
|
||||
|
||||
// Now, inject our joining points...
|
||||
if !result.is_empty() {
|
||||
let previous_element_data = *(result.last().unwrap());
|
||||
for idx in 0..50 {
|
||||
result.push((
|
||||
previous_element_data.0
|
||||
+ ((new_entry.0 - previous_element_data.0) / 50.0 * f64::from(idx)),
|
||||
previous_element_data.1
|
||||
+ ((new_entry.1 - previous_element_data.1) / 50.0 * f64::from(idx)),
|
||||
));
|
||||
}
|
||||
//Insert joiner points
|
||||
for &(joiner_offset, joiner_val) in &data.mem_data.1 {
|
||||
let offset_time = time_from_start - joiner_offset as f64;
|
||||
result.push((offset_time, joiner_val));
|
||||
}
|
||||
|
||||
result.push(new_entry);
|
||||
result.push((time_from_start, data.mem_data.0));
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub fn update_network_data_points(app_data: &data_collection::Data) -> ConvertedNetworkData {
|
||||
convert_network_data_points(&app_data.network)
|
||||
pub fn update_swap_data_points(current_data: &data_farmer::DataCollection) -> Vec<(f64, f64)> {
|
||||
let mut result: Vec<(f64, f64)> = Vec::new();
|
||||
let current_time = current_data.current_instant;
|
||||
|
||||
for (time, data) in ¤t_data.timed_data_vec {
|
||||
let time_from_start: f64 = (TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(*time).as_millis() as f64)
|
||||
.floor();
|
||||
|
||||
//Insert joiner points
|
||||
for &(joiner_offset, joiner_val) in &data.swap_data.1 {
|
||||
let offset_time = time_from_start - joiner_offset as f64;
|
||||
result.push((offset_time, joiner_val));
|
||||
}
|
||||
|
||||
result.push((time_from_start, data.swap_data.0));
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub fn update_mem_labels(current_data: &data_farmer::DataCollection) -> (String, String) {
|
||||
let mem_label = if current_data.memory_harvest.mem_total_in_mb == 0 {
|
||||
"".to_string()
|
||||
} else {
|
||||
"RAM:".to_string()
|
||||
+ &format!(
|
||||
"{:3.0}%",
|
||||
(current_data.memory_harvest.mem_used_in_mb as f64 * 100.0
|
||||
/ current_data.memory_harvest.mem_total_in_mb as f64)
|
||||
.round()
|
||||
) + &format!(
|
||||
" {:.1}GB/{:.1}GB",
|
||||
current_data.memory_harvest.mem_used_in_mb as f64 / 1024.0,
|
||||
current_data.memory_harvest.mem_total_in_mb as f64 / 1024.0
|
||||
)
|
||||
};
|
||||
|
||||
let swap_label = if current_data.swap_harvest.mem_total_in_mb == 0 {
|
||||
"".to_string()
|
||||
} else {
|
||||
"SWP:".to_string()
|
||||
+ &format!(
|
||||
"{:3.0}%",
|
||||
(current_data.swap_harvest.mem_used_in_mb as f64 * 100.0
|
||||
/ current_data.swap_harvest.mem_total_in_mb as f64)
|
||||
.round()
|
||||
) + &format!(
|
||||
" {:.1}GB/{:.1}GB",
|
||||
current_data.swap_harvest.mem_used_in_mb as f64 / 1024.0,
|
||||
current_data.swap_harvest.mem_total_in_mb as f64 / 1024.0
|
||||
)
|
||||
};
|
||||
|
||||
(mem_label, swap_label)
|
||||
}
|
||||
|
||||
pub fn convert_network_data_points(
|
||||
network_data: &[data_collection::network::NetworkData],
|
||||
current_data: &data_farmer::DataCollection,
|
||||
) -> ConvertedNetworkData {
|
||||
let mut rx: Vec<(f64, f64)> = Vec::new();
|
||||
let mut tx: Vec<(f64, f64)> = Vec::new();
|
||||
|
||||
for data in network_data {
|
||||
let current_time = std::time::Instant::now();
|
||||
let rx_data = (
|
||||
((TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(data.instant).as_millis() as f64)
|
||||
* 10_f64)
|
||||
.floor(),
|
||||
if data.rx > 0 {
|
||||
(data.rx as f64).log(2.0)
|
||||
let current_time = current_data.current_instant;
|
||||
for (time, data) in ¤t_data.timed_data_vec {
|
||||
let time_from_start: f64 = (TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(*time).as_millis() as f64)
|
||||
.floor();
|
||||
|
||||
//Insert joiner points
|
||||
for &(joiner_offset, joiner_val) in &data.rx_data.1 {
|
||||
let offset_time = time_from_start - joiner_offset as f64;
|
||||
rx.push((
|
||||
offset_time,
|
||||
if joiner_val > 0.0 {
|
||||
(joiner_val).log(2.0)
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
for &(joiner_offset, joiner_val) in &data.tx_data.1 {
|
||||
let offset_time = time_from_start - joiner_offset as f64;
|
||||
tx.push((
|
||||
offset_time,
|
||||
if joiner_val > 0.0 {
|
||||
(joiner_val).log(2.0)
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
rx.push((
|
||||
time_from_start,
|
||||
if data.rx_data.0 > 0.0 {
|
||||
(data.rx_data.0).log(2.0)
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
);
|
||||
let tx_data = (
|
||||
((TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(data.instant).as_millis() as f64)
|
||||
* 10_f64)
|
||||
.floor(),
|
||||
if data.tx > 0 {
|
||||
(data.tx as f64).log(2.0)
|
||||
));
|
||||
tx.push((
|
||||
time_from_start,
|
||||
if data.rx_data.0 > 0.0 {
|
||||
(data.rx_data.0).log(2.0)
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
);
|
||||
|
||||
//debug!("Plotting: {:?} bytes rx, {:?} bytes tx", rx_data, tx_data);
|
||||
|
||||
// Now, inject our joining points...
|
||||
if !rx.is_empty() {
|
||||
let previous_element_data = *(rx.last().unwrap());
|
||||
for idx in 0..50 {
|
||||
rx.push((
|
||||
previous_element_data.0
|
||||
+ ((rx_data.0 - previous_element_data.0) / 50.0 * f64::from(idx)),
|
||||
previous_element_data.1
|
||||
+ ((rx_data.1 - previous_element_data.1) / 50.0 * f64::from(idx)),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
// Now, inject our joining points...
|
||||
if !tx.is_empty() {
|
||||
let previous_element_data = *(tx.last().unwrap());
|
||||
for idx in 0..50 {
|
||||
tx.push((
|
||||
previous_element_data.0
|
||||
+ ((tx_data.0 - previous_element_data.0) / 50.0 * f64::from(idx)),
|
||||
previous_element_data.1
|
||||
+ ((tx_data.1 - previous_element_data.1) / 50.0 * f64::from(idx)),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
rx.push(rx_data);
|
||||
tx.push(tx_data);
|
||||
));
|
||||
}
|
||||
|
||||
let total_rx_converted_result: (f64, String);
|
||||
|
@ -466,13 +304,8 @@ pub fn convert_network_data_points(
|
|||
let total_tx_converted_result: (f64, String);
|
||||
let tx_converted_result: (f64, String);
|
||||
|
||||
if let Some(last_num_bytes_entry) = network_data.last() {
|
||||
rx_converted_result = get_exact_byte_values(last_num_bytes_entry.rx, false);
|
||||
total_rx_converted_result = get_exact_byte_values(last_num_bytes_entry.total_rx, false)
|
||||
} else {
|
||||
rx_converted_result = get_exact_byte_values(0, false);
|
||||
total_rx_converted_result = get_exact_byte_values(0, false);
|
||||
}
|
||||
rx_converted_result = get_exact_byte_values(current_data.network_harvest.rx, false);
|
||||
total_rx_converted_result = get_exact_byte_values(current_data.network_harvest.total_rx, false);
|
||||
let rx_display = format!("{:.*}{}", 1, rx_converted_result.0, rx_converted_result.1);
|
||||
let total_rx_display = if cfg!(not(target_os = "windows")) {
|
||||
format!(
|
||||
|
@ -483,13 +316,8 @@ pub fn convert_network_data_points(
|
|||
"N/A".to_string()
|
||||
};
|
||||
|
||||
if let Some(last_num_bytes_entry) = network_data.last() {
|
||||
tx_converted_result = get_exact_byte_values(last_num_bytes_entry.tx, false);
|
||||
total_tx_converted_result = get_exact_byte_values(last_num_bytes_entry.total_tx, false);
|
||||
} else {
|
||||
tx_converted_result = get_exact_byte_values(0, false);
|
||||
total_tx_converted_result = get_exact_byte_values(0, false);
|
||||
}
|
||||
tx_converted_result = get_exact_byte_values(current_data.network_harvest.tx, false);
|
||||
total_tx_converted_result = get_exact_byte_values(current_data.network_harvest.total_tx, false);
|
||||
let tx_display = format!("{:.*}{}", 1, tx_converted_result.0, tx_converted_result.1);
|
||||
let total_tx_display = if cfg!(not(target_os = "windows")) {
|
||||
format!(
|
||||
|
@ -509,3 +337,45 @@ pub fn convert_network_data_points(
|
|||
total_tx_display,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn convert_process_data(
|
||||
current_data: &data_farmer::DataCollection,
|
||||
) -> (HashMap<u32, ProcessHarvest>, Vec<ConvertedProcessData>) {
|
||||
let mut single_list = HashMap::new();
|
||||
|
||||
// cpu, mem, pids
|
||||
let mut grouped_hashmap: HashMap<String, (u32, f64, f64, Vec<u32>)> =
|
||||
std::collections::HashMap::new();
|
||||
|
||||
// Go through every single process in the list... and build a hashmap + single list
|
||||
for process in &(current_data).process_harvest {
|
||||
let entry = grouped_hashmap.entry(process.name.clone()).or_insert((
|
||||
process.pid,
|
||||
0.0,
|
||||
0.0,
|
||||
Vec::new(),
|
||||
));
|
||||
|
||||
(*entry).1 += process.cpu_usage_percent;
|
||||
(*entry).2 += process.mem_usage_percent;
|
||||
(*entry).3.push(process.pid);
|
||||
|
||||
single_list.insert(process.pid, process.clone());
|
||||
}
|
||||
|
||||
let grouped_list: Vec<ConvertedProcessData> = grouped_hashmap
|
||||
.iter()
|
||||
.map(|(name, process_details)| {
|
||||
let p = process_details.clone();
|
||||
ConvertedProcessData {
|
||||
pid: p.0,
|
||||
name: name.to_string(),
|
||||
cpu_usage: p.1,
|
||||
mem_usage: p.2,
|
||||
group_pids: p.3,
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
(single_list, grouped_list)
|
||||
}
|
||||
|
|
262
src/main.rs
262
src/main.rs
|
@ -35,17 +35,16 @@ mod canvas;
|
|||
mod constants;
|
||||
mod data_conversion;
|
||||
|
||||
use app::data_collection;
|
||||
use app::data_collection::processes::ProcessData;
|
||||
use app::data_harvester::{self, processes::ProcessSorting};
|
||||
use constants::TICK_RATE_IN_MILLISECONDS;
|
||||
use data_conversion::*;
|
||||
use std::collections::BTreeMap;
|
||||
use utils::error::{self, BottomError};
|
||||
|
||||
enum Event<I, J> {
|
||||
KeyInput(I),
|
||||
MouseInput(J),
|
||||
Update(Box<data_collection::Data>),
|
||||
Update(Box<data_harvester::Data>),
|
||||
Clean,
|
||||
}
|
||||
|
||||
enum ResetEvent {
|
||||
|
@ -71,9 +70,9 @@ fn main() -> error::Result<()> {
|
|||
(@arg LEFT_LEGEND: -l --left_legend "Puts external chart legends on the left side rather than the default right side.")
|
||||
(@arg USE_CURR_USAGE: -u --current_usage "Within Linux, sets a process' CPU usage to be based on the total current CPU usage, rather than assuming 100% usage.")
|
||||
//(@arg CONFIG_LOCATION: -co --config +takes_value "Sets the location of the config file. Expects a config file in the JSON format.")
|
||||
(@arg BASIC_MODE: -b --basic "Sets bottom to basic mode, not showing graphs and only showing basic tables.")
|
||||
//(@arg BASIC_MODE: -b --basic "Sets bottom to basic mode, not showing graphs and only showing basic tables.")
|
||||
(@arg GROUP_PROCESSES: -g --group "Groups processes with the same name together on launch.")
|
||||
(@arg SEARCH_DEFAULT_USE_SIMPLE: -s --simple_search "Uses a simple case-insensitive string comparison to search processes by default.")
|
||||
(@arg CASE_INSENSITIVE_DEFAULT: -i --case_insensitive "Do not match case when searching processes by default.")
|
||||
)
|
||||
.get_matches();
|
||||
|
||||
|
@ -104,11 +103,11 @@ fn main() -> error::Result<()> {
|
|||
|
||||
// Set other settings
|
||||
let temperature_type = if matches.is_present("FAHRENHEIT") {
|
||||
data_collection::temperature::TemperatureType::Fahrenheit
|
||||
data_harvester::temperature::TemperatureType::Fahrenheit
|
||||
} else if matches.is_present("KELVIN") {
|
||||
data_collection::temperature::TemperatureType::Kelvin
|
||||
data_harvester::temperature::TemperatureType::Kelvin
|
||||
} else {
|
||||
data_collection::temperature::TemperatureType::Celsius
|
||||
data_harvester::temperature::TemperatureType::Celsius
|
||||
};
|
||||
let show_average_cpu = matches.is_present("AVG_CPU");
|
||||
let use_dot = matches.is_present("DOT_MARKER");
|
||||
|
@ -131,8 +130,8 @@ fn main() -> error::Result<()> {
|
|||
}
|
||||
|
||||
// Set default search method
|
||||
if matches.is_present("SEARCH_DEFAULT_USE_SIMPLE") {
|
||||
app.use_simple = true;
|
||||
if matches.is_present("CASE_INSENSITIVE_DEFAULT") {
|
||||
app.ignore_case = true;
|
||||
}
|
||||
|
||||
// Set up up tui and crossterm
|
||||
|
@ -175,15 +174,24 @@ fn main() -> error::Result<()> {
|
|||
});
|
||||
}
|
||||
|
||||
// Cleaning loop
|
||||
{
|
||||
let tx = tx.clone();
|
||||
thread::spawn(move || loop {
|
||||
thread::sleep(Duration::from_millis(
|
||||
constants::STALE_MAX_MILLISECONDS as u64 + 5000,
|
||||
));
|
||||
tx.send(Event::Clean).unwrap();
|
||||
});
|
||||
}
|
||||
// Event loop
|
||||
let (rtx, rrx) = mpsc::channel();
|
||||
{
|
||||
let tx = tx;
|
||||
let mut first_run = true;
|
||||
let temp_type = app.temperature_type.clone();
|
||||
thread::spawn(move || {
|
||||
let tx = tx.clone();
|
||||
let mut data_state = data_collection::DataState::default();
|
||||
let mut data_state = data_harvester::DataState::default();
|
||||
data_state.init();
|
||||
data_state.set_temperature_type(temp_type);
|
||||
data_state.set_use_current_cpu_total(use_current_cpu_total);
|
||||
|
@ -191,35 +199,27 @@ fn main() -> error::Result<()> {
|
|||
if let Ok(message) = rrx.try_recv() {
|
||||
match message {
|
||||
ResetEvent::Reset => {
|
||||
//debug!("Received reset message");
|
||||
first_run = true;
|
||||
data_state.data = app::data_collection::Data::default();
|
||||
data_state.data.first_run_cleanup();
|
||||
}
|
||||
}
|
||||
}
|
||||
futures::executor::block_on(data_state.update_data());
|
||||
tx.send(Event::Update(Box::from(data_state.data.clone())))
|
||||
.unwrap();
|
||||
|
||||
if first_run {
|
||||
// Fix for if you set a really long time for update periods (and just gives a faster first value)
|
||||
thread::sleep(Duration::from_millis(250));
|
||||
first_run = false;
|
||||
} else {
|
||||
thread::sleep(Duration::from_millis(update_rate_in_milliseconds as u64));
|
||||
}
|
||||
.unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it
|
||||
thread::sleep(Duration::from_millis(update_rate_in_milliseconds as u64));
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
loop {
|
||||
// TODO: [OPT] this should not block... let's properly use tick rates and non-blocking, okay?
|
||||
if let Ok(recv) = rx.recv_timeout(Duration::from_millis(TICK_RATE_IN_MILLISECONDS)) {
|
||||
match recv {
|
||||
Event::KeyInput(event) => {
|
||||
if event.modifiers.is_empty() {
|
||||
// If only a code, and no modifiers, don't bother...
|
||||
|
||||
// Required to catch for while typing
|
||||
// Required catch for searching - otherwise you couldn't search with q.
|
||||
if event.code == KeyCode::Char('q') && !app.is_in_search_widget() {
|
||||
break;
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ fn main() -> error::Result<()> {
|
|||
if let KeyModifiers::CONTROL = event.modifiers {
|
||||
match event.code {
|
||||
KeyCode::Char('c') => break,
|
||||
KeyCode::Char('f') => app.toggle_searching(), // Note that this is fine for now, assuming '/' does not do anything other than search.
|
||||
KeyCode::Char('f') => app.enable_searching(),
|
||||
KeyCode::Left | KeyCode::Char('h') => app.move_left(),
|
||||
KeyCode::Right | KeyCode::Char('l') => app.move_right(),
|
||||
KeyCode::Up | KeyCode::Char('k') => app.move_up(),
|
||||
|
@ -255,7 +255,7 @@ fn main() -> error::Result<()> {
|
|||
app.reset();
|
||||
}
|
||||
}
|
||||
KeyCode::Char('s') => app.toggle_simple_search(),
|
||||
// TODO: [SEARCH] Rename "simple" search to just... search without cases...
|
||||
KeyCode::Char('a') => app.skip_cursor_beginning(),
|
||||
KeyCode::Char('e') => app.skip_cursor_end(),
|
||||
_ => {}
|
||||
|
@ -264,7 +264,7 @@ fn main() -> error::Result<()> {
|
|||
}
|
||||
|
||||
if app.update_process_gui {
|
||||
handle_process_sorting(&mut app);
|
||||
update_final_process_list(&mut app);
|
||||
app.update_process_gui = false;
|
||||
}
|
||||
}
|
||||
|
@ -274,38 +274,54 @@ fn main() -> error::Result<()> {
|
|||
_ => {}
|
||||
},
|
||||
Event::Update(data) => {
|
||||
// NOTE TO SELF - data is refreshed into app state HERE! That means, if it is
|
||||
// frozen, then, app.data is never refreshed, until unfrozen!
|
||||
if !app.is_frozen {
|
||||
app.data = *data;
|
||||
app.data_collection.eat_data(&data);
|
||||
|
||||
handle_process_sorting(&mut app);
|
||||
// Convert all data into tui-compliant components
|
||||
|
||||
// Convert all data into tui components
|
||||
let network_data = update_network_data_points(&app.data);
|
||||
// Network
|
||||
let network_data = convert_network_data_points(&app.data_collection);
|
||||
app.canvas_data.network_data_rx = network_data.rx;
|
||||
app.canvas_data.network_data_tx = network_data.tx;
|
||||
app.canvas_data.rx_display = network_data.rx_display;
|
||||
app.canvas_data.tx_display = network_data.tx_display;
|
||||
app.canvas_data.total_rx_display = network_data.total_rx_display;
|
||||
app.canvas_data.total_tx_display = network_data.total_tx_display;
|
||||
app.canvas_data.disk_data = update_disk_row(&app.data);
|
||||
app.canvas_data.temp_sensor_data =
|
||||
update_temp_row(&app.data, &app.temperature_type);
|
||||
app.canvas_data.mem_data = update_mem_data_points(&app.data);
|
||||
app.canvas_data.memory_labels = update_mem_data_values(&app.data);
|
||||
app.canvas_data.swap_data = update_swap_data_points(&app.data);
|
||||
|
||||
// Disk
|
||||
app.canvas_data.disk_data = update_disk_row(&app.data_collection);
|
||||
|
||||
// Temperatures
|
||||
app.canvas_data.temp_sensor_data = update_temp_row(&app);
|
||||
// Memory
|
||||
app.canvas_data.mem_data = update_mem_data_points(&app.data_collection);
|
||||
app.canvas_data.swap_data = update_swap_data_points(&app.data_collection);
|
||||
let memory_and_swap_labels = update_mem_labels(&app.data_collection);
|
||||
app.canvas_data.mem_label = memory_and_swap_labels.0;
|
||||
app.canvas_data.swap_label = memory_and_swap_labels.1;
|
||||
|
||||
// CPU
|
||||
app.canvas_data.cpu_data =
|
||||
update_cpu_data_points(app.show_average_cpu, &app.data);
|
||||
update_cpu_data_points(app.show_average_cpu, &app.data_collection);
|
||||
|
||||
// Processes
|
||||
let (single, grouped) = convert_process_data(&app.data_collection);
|
||||
app.canvas_data.process_data = single;
|
||||
app.canvas_data.grouped_process_data = grouped;
|
||||
update_final_process_list(&mut app);
|
||||
}
|
||||
}
|
||||
Event::Clean => {
|
||||
app.data_collection
|
||||
.clean_data(constants::STALE_MAX_MILLISECONDS);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Quick fix for tab updating the table headers
|
||||
if let data_collection::processes::ProcessSorting::PID = &app.process_sorting_type {
|
||||
if let data_harvester::processes::ProcessSorting::PID = &app.process_sorting_type {
|
||||
if app.is_grouped() {
|
||||
app.process_sorting_type = data_collection::processes::ProcessSorting::CPU; // Go back to default, negate PID for group
|
||||
app.process_sorting_type = data_harvester::processes::ProcessSorting::CPU; // Go back to default, negate PID for group
|
||||
app.process_sorting_reverse = true;
|
||||
}
|
||||
}
|
||||
|
@ -322,94 +338,6 @@ fn main() -> error::Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
type TempProcess = (f64, Option<f64>, Option<u64>, Vec<u32>);
|
||||
|
||||
fn handle_process_sorting(app: &mut app::App) {
|
||||
// Handle combining multi-pid processes to form one entry in table.
|
||||
// This was done this way to save time and avoid code
|
||||
// duplication... sorry future me. Really.
|
||||
|
||||
// First, convert this all into a BTreeMap. The key is by name. This
|
||||
// pulls double duty by allowing us to combine entries AND it sorts!
|
||||
|
||||
// Fields for tuple: CPU%, MEM%, MEM_KB, PID_VEC
|
||||
let mut process_map: BTreeMap<String, TempProcess> = BTreeMap::new();
|
||||
for process in &app.data.list_of_processes {
|
||||
let entry_val =
|
||||
process_map
|
||||
.entry(process.name.clone())
|
||||
.or_insert((0.0, None, None, vec![]));
|
||||
if let Some(mem_usage) = process.mem_usage_percent {
|
||||
entry_val.0 += process.cpu_usage_percent;
|
||||
if let Some(m) = &mut entry_val.1 {
|
||||
*m += mem_usage;
|
||||
}
|
||||
entry_val.3.push(process.pid);
|
||||
} else if let Some(mem_usage_kb) = process.mem_usage_kb {
|
||||
entry_val.0 += process.cpu_usage_percent;
|
||||
if let Some(m) = &mut entry_val.2 {
|
||||
*m += mem_usage_kb;
|
||||
}
|
||||
entry_val.3.push(process.pid);
|
||||
}
|
||||
}
|
||||
|
||||
// Now... turn this back into the exact same vector... but now with merged processes!
|
||||
app.data.grouped_list_of_processes = Some(
|
||||
process_map
|
||||
.iter()
|
||||
.map(|(name, data)| {
|
||||
ProcessData {
|
||||
pid: 0, // Irrelevant
|
||||
cpu_usage_percent: data.0,
|
||||
mem_usage_percent: data.1,
|
||||
mem_usage_kb: data.2,
|
||||
name: name.clone(),
|
||||
pid_vec: Some(data.3.clone()),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
);
|
||||
|
||||
if let Some(grouped_list_of_processes) = &mut app.data.grouped_list_of_processes {
|
||||
if let data_collection::processes::ProcessSorting::PID = &app.process_sorting_type {
|
||||
data_collection::processes::sort_processes(
|
||||
grouped_list_of_processes,
|
||||
&data_collection::processes::ProcessSorting::CPU, // Go back to default, negate PID for group
|
||||
true,
|
||||
);
|
||||
} else {
|
||||
data_collection::processes::sort_processes(
|
||||
grouped_list_of_processes,
|
||||
&app.process_sorting_type,
|
||||
app.process_sorting_reverse,
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
data_collection::processes::sort_processes(
|
||||
&mut app.data.list_of_processes,
|
||||
&app.process_sorting_type,
|
||||
app.process_sorting_reverse,
|
||||
);
|
||||
|
||||
let tuple_results = if app.use_simple {
|
||||
simple_update_process_row(
|
||||
&app.data,
|
||||
&(app.get_current_search_query().to_ascii_lowercase()),
|
||||
app.is_searching_with_pid(),
|
||||
)
|
||||
} else {
|
||||
regex_update_process_row(
|
||||
&app.data,
|
||||
app.get_current_regex_matcher(),
|
||||
app.is_searching_with_pid(),
|
||||
)
|
||||
};
|
||||
app.canvas_data.process_data = tuple_results.0;
|
||||
app.canvas_data.grouped_process_data = tuple_results.1;
|
||||
}
|
||||
|
||||
fn cleanup(
|
||||
terminal: &mut tui::terminal::Terminal<tui::backend::CrosstermBackend<std::io::Stdout>>,
|
||||
) -> error::Result<()> {
|
||||
|
@ -420,3 +348,73 @@ fn cleanup(
|
|||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn update_final_process_list(app: &mut app::App) {
|
||||
let mut filtered_process_data: Vec<ConvertedProcessData> = if app.is_grouped() {
|
||||
app.canvas_data
|
||||
.grouped_process_data
|
||||
.clone()
|
||||
.into_iter()
|
||||
.filter(|process| {
|
||||
if let Ok(matcher) = app.get_current_regex_matcher() {
|
||||
matcher.is_match(&process.name)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect::<Vec<ConvertedProcessData>>()
|
||||
} else {
|
||||
app.canvas_data
|
||||
.process_data
|
||||
.iter()
|
||||
.filter(|(_pid, process)| {
|
||||
if let Ok(matcher) = app.get_current_regex_matcher() {
|
||||
if app.is_searching_with_pid() {
|
||||
matcher.is_match(&process.pid.to_string())
|
||||
} else {
|
||||
matcher.is_match(&process.name)
|
||||
}
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.map(|(_pid, process)| ConvertedProcessData {
|
||||
pid: process.pid,
|
||||
name: process.name.clone(),
|
||||
cpu_usage: process.cpu_usage_percent,
|
||||
mem_usage: process.mem_usage_percent,
|
||||
group_pids: vec![process.pid],
|
||||
})
|
||||
.collect::<Vec<ConvertedProcessData>>()
|
||||
};
|
||||
|
||||
sort_process_data(&mut filtered_process_data, app);
|
||||
app.canvas_data.finalized_process_data = filtered_process_data;
|
||||
}
|
||||
|
||||
fn sort_process_data(to_sort_vec: &mut Vec<ConvertedProcessData>, app: &app::App) {
|
||||
to_sort_vec.sort_by(|a, b| utils::gen_util::get_ordering(&a.name, &b.name, false));
|
||||
|
||||
match app.process_sorting_type {
|
||||
ProcessSorting::CPU => {
|
||||
to_sort_vec.sort_by(|a, b| {
|
||||
utils::gen_util::get_ordering(a.cpu_usage, b.cpu_usage, app.process_sorting_reverse)
|
||||
});
|
||||
}
|
||||
ProcessSorting::MEM => {
|
||||
to_sort_vec.sort_by(|a, b| {
|
||||
utils::gen_util::get_ordering(a.mem_usage, b.mem_usage, app.process_sorting_reverse)
|
||||
});
|
||||
}
|
||||
ProcessSorting::NAME => to_sort_vec.sort_by(|a, b| {
|
||||
utils::gen_util::get_ordering(&a.name, &b.name, app.process_sorting_reverse)
|
||||
}),
|
||||
ProcessSorting::PID => {
|
||||
if !app.is_grouped() {
|
||||
to_sort_vec.sort_by(|a, b| {
|
||||
utils::gen_util::get_ordering(a.pid, b.pid, app.process_sorting_reverse)
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -59,3 +59,29 @@ pub fn get_simple_byte_values(bytes: u64, spacing: bool) -> (f64, String) {
|
|||
_ => (bytes as f64 / 1_000_000_000_000.0, "TB".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gotta get partial ordering? No problem, here's something to deal with it~
|
||||
pub fn get_ordering<T: std::cmp::PartialOrd>(
|
||||
a_val: T, b_val: T, reverse_order: bool,
|
||||
) -> std::cmp::Ordering {
|
||||
match a_val.partial_cmp(&b_val) {
|
||||
Some(x) => match x {
|
||||
Ordering::Greater => {
|
||||
if reverse_order {
|
||||
std::cmp::Ordering::Less
|
||||
} else {
|
||||
std::cmp::Ordering::Greater
|
||||
}
|
||||
}
|
||||
Ordering::Less => {
|
||||
if reverse_order {
|
||||
std::cmp::Ordering::Greater
|
||||
} else {
|
||||
std::cmp::Ordering::Less
|
||||
}
|
||||
}
|
||||
Ordering::Equal => Ordering::Equal,
|
||||
},
|
||||
None => Ordering::Equal,
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in a new issue