mirror of
https://github.com/ClementTsang/bottom
synced 2024-11-22 20:23:12 +00:00
Moved temp and drives over...
This commit is contained in:
parent
57aa15978c
commit
abe8a4bb86
11 changed files with 217 additions and 197 deletions
36
src/app.rs
36
src/app.rs
|
@ -2,10 +2,10 @@ pub mod data_harvester;
|
|||
use data_harvester::{processes, temperature};
|
||||
use std::time::Instant;
|
||||
|
||||
pub mod data_janitor;
|
||||
use data_janitor::*;
|
||||
pub mod data_farmer;
|
||||
use data_farmer::*;
|
||||
|
||||
use crate::{canvas, constants, data_conversion::ConvertedProcessData, utils::error::Result};
|
||||
use crate::{canvas, constants, data_conversion::ConvertedProcessHarvest, utils::error::Result};
|
||||
|
||||
mod process_killer;
|
||||
|
||||
|
@ -76,7 +76,7 @@ pub struct App {
|
|||
pub show_help: bool,
|
||||
pub show_dd: bool,
|
||||
pub dd_err: Option<String>,
|
||||
to_delete_process_list: Option<Vec<ConvertedProcessData>>,
|
||||
to_delete_process_list: Option<Vec<ConvertedProcessHarvest>>,
|
||||
pub is_frozen: bool,
|
||||
pub left_legend: bool,
|
||||
pub use_current_cpu_total: bool,
|
||||
|
@ -196,19 +196,13 @@ impl App {
|
|||
self.enable_grouping
|
||||
}
|
||||
|
||||
pub fn toggle_searching(&mut self) {
|
||||
pub fn enable_searching(&mut self) {
|
||||
if !self.is_in_dialog() {
|
||||
match self.current_application_position {
|
||||
ApplicationPosition::Process | ApplicationPosition::ProcessSearch => {
|
||||
if self.enable_searching {
|
||||
// Toggle off
|
||||
self.enable_searching = false;
|
||||
self.current_application_position = ApplicationPosition::Process;
|
||||
} else {
|
||||
// Toggle on
|
||||
self.enable_searching = true;
|
||||
self.current_application_position = ApplicationPosition::ProcessSearch;
|
||||
}
|
||||
// Toggle on
|
||||
self.enable_searching = true;
|
||||
self.current_application_position = ApplicationPosition::ProcessSearch;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
@ -397,7 +391,7 @@ impl App {
|
|||
} else {
|
||||
match caught_char {
|
||||
'/' => {
|
||||
self.toggle_searching();
|
||||
self.enable_searching();
|
||||
}
|
||||
'd' => {
|
||||
if let ApplicationPosition::Process = self.current_application_position {
|
||||
|
@ -405,7 +399,7 @@ impl App {
|
|||
self.awaiting_second_char = false;
|
||||
self.second_char = ' ';
|
||||
let current_process = if self.is_grouped() {
|
||||
let mut res: Vec<ConvertedProcessData> = Vec::new();
|
||||
let mut res: Vec<ConvertedProcessHarvest> = Vec::new();
|
||||
for pid in &self.canvas_data.grouped_process_data
|
||||
[self.currently_selected_process_position as usize]
|
||||
.group
|
||||
|
@ -530,7 +524,7 @@ impl App {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_current_highlighted_process_list(&self) -> Option<Vec<ConvertedProcessData>> {
|
||||
pub fn get_current_highlighted_process_list(&self) -> Option<Vec<ConvertedProcessHarvest>> {
|
||||
self.to_delete_process_list.clone()
|
||||
}
|
||||
|
||||
|
@ -633,10 +627,10 @@ impl App {
|
|||
}
|
||||
ApplicationPosition::Temp => {
|
||||
self.currently_selected_temperature_position =
|
||||
self.data.list_of_temperature_sensor.len() as i64 - 1
|
||||
self.data.temperature_sensors.len() as i64 - 1
|
||||
}
|
||||
ApplicationPosition::Disk => {
|
||||
self.currently_selected_disk_position = self.data.list_of_disks.len() as i64 - 1
|
||||
self.currently_selected_disk_position = self.data.disks.len() as i64 - 1
|
||||
}
|
||||
ApplicationPosition::Cpu => {
|
||||
self.currently_selected_cpu_table_position =
|
||||
|
@ -698,7 +692,7 @@ impl App {
|
|||
fn change_temp_position(&mut self, num_to_change_by: i64) {
|
||||
if self.currently_selected_temperature_position + num_to_change_by >= 0
|
||||
&& self.currently_selected_temperature_position + num_to_change_by
|
||||
< self.data.list_of_temperature_sensor.len() as i64
|
||||
< self.data.temperature_sensors.len() as i64
|
||||
{
|
||||
self.currently_selected_temperature_position += num_to_change_by;
|
||||
}
|
||||
|
@ -707,7 +701,7 @@ impl App {
|
|||
fn change_disk_position(&mut self, num_to_change_by: i64) {
|
||||
if self.currently_selected_disk_position + num_to_change_by >= 0
|
||||
&& self.currently_selected_disk_position + num_to_change_by
|
||||
< self.data.list_of_disks.len() as i64
|
||||
< self.data.disks.len() as i64
|
||||
{
|
||||
self.currently_selected_disk_position += num_to_change_by;
|
||||
}
|
||||
|
|
|
@ -1,6 +1,18 @@
|
|||
use crate::data_harvester::{cpu, mem, network, Data};
|
||||
/// In charge of cleaning and managing data. I couldn't think of a better
|
||||
/// name for the file.
|
||||
use crate::data_harvester::{cpu, disks, mem, network, processes, temperature, Data};
|
||||
/// In charge of cleaning, processing, and managing data. I couldn't think of
|
||||
/// a better name for the file. Since I called data collection "harvesting",
|
||||
/// then this is the farmer I guess.
|
||||
///
|
||||
/// Essentially the main goal is to shift the initial calculation and distribution
|
||||
/// of joiner points and data to one central location that will only do it
|
||||
/// *once* upon receiving the data --- as opposed to doing it on canvas draw,
|
||||
/// which will be a costly process.
|
||||
///
|
||||
/// This will also handle the *cleaning* of stale data. That should be done
|
||||
/// in some manner (timer on another thread, some loop) that will occasionally
|
||||
/// call the purging function. Failure to do so *will* result in a growing
|
||||
/// memory usage and higher CPU usage - you will be trying to process more and
|
||||
/// more points as this is used!
|
||||
use std::time::Instant;
|
||||
use std::vec::Vec;
|
||||
|
||||
|
@ -15,8 +27,9 @@ pub struct TimedData {
|
|||
pub cpu_data: Vec<JoinedDataPoints>,
|
||||
pub mem_data: JoinedDataPoints,
|
||||
pub swap_data: JoinedDataPoints,
|
||||
pub temp_data: JoinedDataPoints,
|
||||
pub io_data: JoinedDataPoints,
|
||||
// Unused for now
|
||||
// pub io_data : JoinedDataPoints
|
||||
// pub temp_data: JoinedDataPoints,
|
||||
}
|
||||
|
||||
/// AppCollection represents the pooled data stored within the main app
|
||||
|
@ -36,6 +49,12 @@ pub struct DataCollection {
|
|||
pub memory_harvest: mem::MemHarvest,
|
||||
pub swap_harvest: mem::MemHarvest,
|
||||
pub cpu_harvest: cpu::CPUHarvest,
|
||||
pub process_harvest: processes::ProcessHarvest,
|
||||
pub disk_harvest: Vec<disks::DiskHarvest>,
|
||||
pub io_harvest: disks::IOHarvest,
|
||||
pub io_labels: Vec<(u64, u64)>,
|
||||
io_prev: Vec<(u64, u64)>,
|
||||
pub temp_harvest: Vec<temperature::TempHarvest>,
|
||||
}
|
||||
|
||||
impl Default for DataCollection {
|
||||
|
@ -47,12 +66,20 @@ impl Default for DataCollection {
|
|||
memory_harvest: mem::MemHarvest::default(),
|
||||
swap_harvest: mem::MemHarvest::default(),
|
||||
cpu_harvest: cpu::CPUHarvest::default(),
|
||||
process_harvest: processes::ProcessHarvest::default(),
|
||||
disk_harvest: Vec::default(),
|
||||
io_harvest: disks::IOHarvest::default(),
|
||||
io_labels: Vec::default(),
|
||||
io_prev: Vec::default(),
|
||||
temp_harvest: Vec::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DataCollection {
|
||||
pub fn clean_data(&mut self) {}
|
||||
pub fn clean_data(&mut self) {
|
||||
// TODO: [OPT] To implement to clean
|
||||
}
|
||||
|
||||
pub fn eat_data(&mut self, harvested_data: &Data) {
|
||||
let harvested_time = harvested_data.last_collection_time;
|
||||
|
@ -67,6 +94,14 @@ impl DataCollection {
|
|||
// CPU
|
||||
self.eat_cpu(&harvested_data, &harvested_time, &mut new_entry);
|
||||
|
||||
// Temp
|
||||
self.eat_temp(&harvested_data, &harvested_time, &mut new_entry);
|
||||
|
||||
// Disks
|
||||
self.eat_disks(&harvested_data, &harvested_time, &mut new_entry);
|
||||
|
||||
// Processes
|
||||
|
||||
// And we're done eating.
|
||||
self.current_instant = harvested_time;
|
||||
self.timed_data_vec.push((harvested_time, new_entry));
|
||||
|
@ -147,7 +182,7 @@ impl DataCollection {
|
|||
// Note this only pre-calculates the data points - the names will be
|
||||
// within the local copy of cpu_harvest. Since it's all sequential
|
||||
// it probably doesn't matter anyways.
|
||||
for (itx, cpu) in harvested_data.cpu.cpu_vec.iter().enumerate() {
|
||||
for (itx, cpu) in harvested_data.cpu.iter().enumerate() {
|
||||
let cpu_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
|
||||
generate_joining_points(
|
||||
&time,
|
||||
|
@ -165,6 +200,51 @@ impl DataCollection {
|
|||
|
||||
self.cpu_harvest = harvested_data.cpu.clone();
|
||||
}
|
||||
|
||||
fn eat_temp(
|
||||
&mut self, harvested_data: &Data, _harvested_time: &Instant, _new_entry: &mut TimedData,
|
||||
) {
|
||||
// TODO: [PO] To implement
|
||||
self.temp_harvest = harvested_data.temperature_sensors.clone();
|
||||
}
|
||||
|
||||
fn eat_disks(
|
||||
&mut self, harvested_data: &Data, harvested_time: &Instant, _new_entry: &mut TimedData,
|
||||
) {
|
||||
// TODO: [PO] To implement
|
||||
|
||||
let time_since_last_harvest = harvested_time
|
||||
.duration_since(self.current_instant)
|
||||
.as_secs_f64();
|
||||
|
||||
for (itx, device) in harvested_data.disks.iter().enumerate() {
|
||||
if let Some(trim) = device.name.split('/').last() {
|
||||
let io_device = harvested_data.io.get(trim);
|
||||
if let Some(io) = io_device {
|
||||
let io_r_pt = io.read_bytes;
|
||||
let io_w_pt = io.write_bytes;
|
||||
|
||||
if self.io_labels.len() <= itx {
|
||||
self.io_prev.push((io_r_pt, io_w_pt));
|
||||
self.io_labels.push((0, 0));
|
||||
} else {
|
||||
let r_rate = ((io_r_pt - self.io_prev[itx].0) as f64
|
||||
/ time_since_last_harvest)
|
||||
.round() as u64;
|
||||
let w_rate = ((io_w_pt - self.io_prev[itx].1) as f64
|
||||
/ time_since_last_harvest)
|
||||
.round() as u64;
|
||||
|
||||
self.io_labels[itx] = (r_rate, w_rate);
|
||||
self.io_prev[itx] = (io_r_pt, io_w_pt);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.disk_harvest = harvested_data.disks.clone();
|
||||
self.io_harvest = harvested_data.io.clone();
|
||||
}
|
||||
}
|
||||
|
||||
pub fn generate_joining_points(
|
|
@ -17,23 +17,17 @@ fn set_if_valid<T: std::clone::Clone>(result: &Result<T>, value_to_set: &mut T)
|
|||
}
|
||||
}
|
||||
|
||||
fn push_if_valid<T: std::clone::Clone>(result: &Result<T>, vector_to_push: &mut Vec<T>) {
|
||||
if let Ok(result) = result {
|
||||
vector_to_push.push(result.clone());
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Data {
|
||||
pub cpu: cpu::CPUHarvest,
|
||||
pub list_of_io: Vec<disks::IOPackage>,
|
||||
pub memory: mem::MemHarvest,
|
||||
pub swap: mem::MemHarvest,
|
||||
pub list_of_temperature_sensor: Vec<temperature::TempData>,
|
||||
pub temperature_sensors: Vec<temperature::TempHarvest>,
|
||||
pub network: network::NetworkHarvest,
|
||||
pub list_of_processes: Vec<processes::ProcessData>,
|
||||
pub grouped_list_of_processes: Option<Vec<processes::ProcessData>>,
|
||||
pub list_of_disks: Vec<disks::DiskData>,
|
||||
pub list_of_processes: Vec<processes::ProcessHarvest>,
|
||||
pub grouped_list_of_processes: Option<Vec<processes::ProcessHarvest>>,
|
||||
pub disks: Vec<disks::DiskHarvest>,
|
||||
pub io: disks::IOHarvest,
|
||||
pub last_collection_time: Instant,
|
||||
}
|
||||
|
||||
|
@ -41,13 +35,13 @@ impl Default for Data {
|
|||
fn default() -> Self {
|
||||
Data {
|
||||
cpu: cpu::CPUHarvest::default(),
|
||||
list_of_io: Vec::default(),
|
||||
memory: mem::MemHarvest::default(),
|
||||
swap: mem::MemHarvest::default(),
|
||||
list_of_temperature_sensor: Vec::default(),
|
||||
temperature_sensors: Vec::default(),
|
||||
list_of_processes: Vec::default(),
|
||||
grouped_list_of_processes: None,
|
||||
list_of_disks: Vec::default(),
|
||||
disks: Vec::default(),
|
||||
io: disks::IOHarvest::default(),
|
||||
network: network::NetworkHarvest::default(),
|
||||
last_collection_time: Instant::now(),
|
||||
}
|
||||
|
@ -56,11 +50,11 @@ impl Default for Data {
|
|||
|
||||
impl Data {
|
||||
pub fn first_run_cleanup(&mut self) {
|
||||
self.list_of_io = Vec::new();
|
||||
self.list_of_temperature_sensor = Vec::new();
|
||||
self.io = disks::IOHarvest::default();
|
||||
self.temperature_sensors = Vec::new();
|
||||
self.list_of_processes = Vec::new();
|
||||
self.grouped_list_of_processes = None;
|
||||
self.list_of_disks = Vec::new();
|
||||
self.disks = Vec::new();
|
||||
|
||||
self.network.first_run_cleanup();
|
||||
self.memory = mem::MemHarvest::default();
|
||||
|
@ -149,6 +143,20 @@ impl DataState {
|
|||
// CPU
|
||||
self.data.cpu = cpu::get_cpu_data_list(&self.sys);
|
||||
|
||||
// Disks
|
||||
if let Ok(disks) = disks::get_disk_usage_list().await {
|
||||
self.data.disks = disks;
|
||||
}
|
||||
if let Ok(io) = disks::get_io_usage_list(false).await {
|
||||
self.data.io = io;
|
||||
}
|
||||
|
||||
// Temp
|
||||
if let Ok(temp) = temperature::get_temperature_data(&self.sys, &self.temperature_type).await
|
||||
{
|
||||
self.data.temperature_sensors = temp;
|
||||
}
|
||||
|
||||
// What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update!
|
||||
set_if_valid(
|
||||
&processes::get_sorted_processes_list(
|
||||
|
@ -163,19 +171,7 @@ impl DataState {
|
|||
&mut self.data.list_of_processes,
|
||||
);
|
||||
|
||||
set_if_valid(
|
||||
&disks::get_disk_usage_list().await,
|
||||
&mut self.data.list_of_disks,
|
||||
);
|
||||
push_if_valid(
|
||||
&disks::get_io_usage_list(false).await,
|
||||
&mut self.data.list_of_io,
|
||||
);
|
||||
set_if_valid(
|
||||
&temperature::get_temperature_data(&self.sys, &self.temperature_type).await,
|
||||
&mut self.data.list_of_temperature_sensor,
|
||||
);
|
||||
|
||||
// Update time
|
||||
self.data.last_collection_time = current_instant;
|
||||
|
||||
// Filter out stale timed entries
|
||||
|
@ -192,15 +188,6 @@ impl DataState {
|
|||
for stale in stale_list {
|
||||
self.prev_pid_stats.remove(&stale);
|
||||
}
|
||||
self.data.list_of_io = self
|
||||
.data
|
||||
.list_of_io
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|entry| {
|
||||
clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.last_clean = clean_instant;
|
||||
}
|
||||
|
|
|
@ -6,10 +6,7 @@ pub struct CPUData {
|
|||
pub cpu_usage: f64,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct CPUHarvest {
|
||||
pub cpu_vec: Vec<CPUData>,
|
||||
}
|
||||
pub type CPUHarvest = Vec<CPUData>;
|
||||
|
||||
pub fn get_cpu_data_list(sys: &System) -> CPUHarvest {
|
||||
let cpu_data = sys.get_processor_list();
|
||||
|
@ -22,5 +19,5 @@ pub fn get_cpu_data_list(sys: &System) -> CPUHarvest {
|
|||
});
|
||||
}
|
||||
|
||||
CPUHarvest { cpu_vec }
|
||||
cpu_vec
|
||||
}
|
||||
|
|
|
@ -1,11 +1,10 @@
|
|||
use futures::stream::StreamExt;
|
||||
use heim::units::information;
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DiskData {
|
||||
pub name: Box<str>,
|
||||
pub mount_point: Box<str>,
|
||||
pub struct DiskHarvest {
|
||||
pub name: String,
|
||||
pub mount_point: String,
|
||||
pub free_space: u64,
|
||||
pub used_space: u64,
|
||||
pub total_space: u64,
|
||||
|
@ -13,18 +12,13 @@ pub struct DiskData {
|
|||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct IOData {
|
||||
pub mount_point: Box<str>,
|
||||
pub read_bytes: u64,
|
||||
pub write_bytes: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IOPackage {
|
||||
pub io_hash: std::collections::HashMap<String, IOData>,
|
||||
pub instant: Instant,
|
||||
}
|
||||
pub type IOHarvest = std::collections::HashMap<String, IOData>;
|
||||
|
||||
pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Result<IOPackage> {
|
||||
pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Result<IOHarvest> {
|
||||
let mut io_hash: std::collections::HashMap<String, IOData> = std::collections::HashMap::new();
|
||||
if get_physical {
|
||||
let mut physical_counter_stream = heim::disk::io_counters_physical();
|
||||
|
@ -34,7 +28,6 @@ pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Resul
|
|||
io_hash.insert(
|
||||
mount_point.to_string(),
|
||||
IOData {
|
||||
mount_point: Box::from(mount_point),
|
||||
read_bytes: io.read_bytes().get::<information::megabyte>(),
|
||||
write_bytes: io.write_bytes().get::<information::megabyte>(),
|
||||
},
|
||||
|
@ -48,7 +41,6 @@ pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Resul
|
|||
io_hash.insert(
|
||||
mount_point.to_string(),
|
||||
IOData {
|
||||
mount_point: Box::from(mount_point),
|
||||
read_bytes: io.read_bytes().get::<information::byte>(),
|
||||
write_bytes: io.write_bytes().get::<information::byte>(),
|
||||
},
|
||||
|
@ -56,14 +48,11 @@ pub async fn get_io_usage_list(get_physical: bool) -> crate::utils::error::Resul
|
|||
}
|
||||
}
|
||||
|
||||
Ok(IOPackage {
|
||||
io_hash,
|
||||
instant: Instant::now(),
|
||||
})
|
||||
Ok(io_hash)
|
||||
}
|
||||
|
||||
pub async fn get_disk_usage_list() -> crate::utils::error::Result<Vec<DiskData>> {
|
||||
let mut vec_disks: Vec<DiskData> = Vec::new();
|
||||
pub async fn get_disk_usage_list() -> crate::utils::error::Result<Vec<DiskHarvest>> {
|
||||
let mut vec_disks: Vec<DiskHarvest> = Vec::new();
|
||||
let mut partitions_stream = heim::disk::partitions_physical();
|
||||
|
||||
while let Some(part) = partitions_stream.next().await {
|
||||
|
@ -71,23 +60,21 @@ pub async fn get_disk_usage_list() -> crate::utils::error::Result<Vec<DiskData>>
|
|||
let partition = part;
|
||||
let usage = heim::disk::usage(partition.mount_point().to_path_buf()).await?;
|
||||
|
||||
vec_disks.push(DiskData {
|
||||
vec_disks.push(DiskHarvest {
|
||||
free_space: usage.free().get::<information::byte>(),
|
||||
used_space: usage.used().get::<information::byte>(),
|
||||
total_space: usage.total().get::<information::byte>(),
|
||||
mount_point: Box::from(
|
||||
partition
|
||||
.mount_point()
|
||||
.to_str()
|
||||
.unwrap_or("Name Unavailable"),
|
||||
),
|
||||
name: Box::from(
|
||||
partition
|
||||
.device()
|
||||
.unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable"))
|
||||
.to_str()
|
||||
.unwrap_or("Name Unavailable"),
|
||||
),
|
||||
mount_point: (partition
|
||||
.mount_point()
|
||||
.to_str()
|
||||
.unwrap_or("Name Unavailable"))
|
||||
.to_string(),
|
||||
name: (partition
|
||||
.device()
|
||||
.unwrap_or_else(|| std::ffi::OsStr::new("Name Unavailable"))
|
||||
.to_str()
|
||||
.unwrap_or("Name Unavailable"))
|
||||
.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
|
|
@ -18,7 +18,7 @@ impl Default for ProcessSorting {
|
|||
}
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ProcessData {
|
||||
pub struct ProcessHarvest {
|
||||
pub pid: u32,
|
||||
pub cpu_usage_percent: f64,
|
||||
pub mem_usage_percent: f64,
|
||||
|
@ -182,9 +182,9 @@ fn convert_ps(
|
|||
process: &str, cpu_usage: f64, cpu_percentage: f64,
|
||||
prev_pid_stats: &mut HashMap<String, (f64, Instant)>, use_current_cpu_total: bool,
|
||||
curr_time: &Instant,
|
||||
) -> std::io::Result<ProcessData> {
|
||||
) -> std::io::Result<ProcessHarvest> {
|
||||
if process.trim().to_string().is_empty() {
|
||||
return Ok(ProcessData {
|
||||
return Ok(ProcessHarvest {
|
||||
pid: 0,
|
||||
name: "".to_string(),
|
||||
mem_usage_percent: 0.0,
|
||||
|
@ -205,7 +205,7 @@ fn convert_ps(
|
|||
.parse::<f64>()
|
||||
.unwrap_or(0_f64);
|
||||
|
||||
Ok(ProcessData {
|
||||
Ok(ProcessHarvest {
|
||||
pid,
|
||||
name,
|
||||
mem_usage_percent,
|
||||
|
@ -225,8 +225,8 @@ pub fn get_sorted_processes_list(
|
|||
sys: &System, prev_idle: &mut f64, prev_non_idle: &mut f64,
|
||||
prev_pid_stats: &mut std::collections::HashMap<String, (f64, Instant)>,
|
||||
use_current_cpu_total: bool, mem_total_kb: u64, curr_time: &Instant,
|
||||
) -> crate::utils::error::Result<Vec<ProcessData>> {
|
||||
let mut process_vector: Vec<ProcessData> = Vec::new();
|
||||
) -> crate::utils::error::Result<Vec<ProcessHarvest>> {
|
||||
let mut process_vector: Vec<ProcessHarvest> = Vec::new();
|
||||
|
||||
if cfg!(target_os = "linux") {
|
||||
// Linux specific - this is a massive pain... ugh.
|
||||
|
@ -282,7 +282,7 @@ pub fn get_sorted_processes_list(
|
|||
process_val.name().to_string()
|
||||
};
|
||||
|
||||
process_vector.push(ProcessData {
|
||||
process_vector.push(ProcessHarvest {
|
||||
pid: process_val.pid() as u32,
|
||||
name,
|
||||
mem_usage_percent: process_val.memory() as f64 * 100.0 / mem_total_kb as f64,
|
||||
|
@ -296,7 +296,7 @@ pub fn get_sorted_processes_list(
|
|||
}
|
||||
|
||||
pub fn sort_processes(
|
||||
process_vector: &mut Vec<ProcessData>, sorting_method: &ProcessSorting, reverse_order: bool,
|
||||
process_vector: &mut Vec<ProcessHarvest>, sorting_method: &ProcessSorting, reverse_order: bool,
|
||||
) {
|
||||
// Always sort alphabetically first!
|
||||
process_vector.sort_by(|a, b| get_ordering(&a.name, &b.name, false));
|
||||
|
|
|
@ -3,9 +3,9 @@ use heim::units::thermodynamic_temperature;
|
|||
use std::cmp::Ordering;
|
||||
use sysinfo::{ComponentExt, System, SystemExt};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TempData {
|
||||
pub component_name: Box<str>,
|
||||
#[derive(Default, Debug, Clone)]
|
||||
pub struct TempHarvest {
|
||||
pub component_name: String,
|
||||
pub temperature: f32,
|
||||
}
|
||||
|
||||
|
@ -24,15 +24,15 @@ impl Default for TemperatureType {
|
|||
|
||||
pub async fn get_temperature_data(
|
||||
sys: &System, temp_type: &TemperatureType,
|
||||
) -> crate::utils::error::Result<Vec<TempData>> {
|
||||
let mut temperature_vec: Vec<TempData> = Vec::new();
|
||||
) -> crate::utils::error::Result<Vec<TempHarvest>> {
|
||||
let mut temperature_vec: Vec<TempHarvest> = Vec::new();
|
||||
|
||||
if cfg!(target_os = "linux") {
|
||||
let mut sensor_data = heim::sensors::temperatures();
|
||||
while let Some(sensor) = sensor_data.next().await {
|
||||
if let Ok(sensor) = sensor {
|
||||
temperature_vec.push(TempData {
|
||||
component_name: Box::from(sensor.unit()),
|
||||
temperature_vec.push(TempHarvest {
|
||||
component_name: sensor.unit().to_string(),
|
||||
temperature: match temp_type {
|
||||
TemperatureType::Celsius => sensor
|
||||
.current()
|
||||
|
@ -52,8 +52,8 @@ pub async fn get_temperature_data(
|
|||
} else {
|
||||
let sensor_data = sys.get_components_list();
|
||||
for component in sensor_data {
|
||||
temperature_vec.push(TempData {
|
||||
component_name: Box::from(component.get_label()),
|
||||
temperature_vec.push(TempHarvest {
|
||||
component_name: component.get_label().to_string(),
|
||||
temperature: match temp_type {
|
||||
TemperatureType::Celsius => component.get_temperature(),
|
||||
TemperatureType::Kelvin => component.get_temperature() + 273.15,
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
use crate::{
|
||||
app, constants,
|
||||
data_conversion::{ConvertedCpuData, ConvertedProcessData},
|
||||
data_conversion::{ConvertedCpuData, ConvertedProcessHarvest},
|
||||
utils::{error, gen_util::*},
|
||||
};
|
||||
use std::cmp::max;
|
||||
|
@ -94,8 +94,8 @@ pub struct CanvasData {
|
|||
pub network_data_tx: Vec<(f64, f64)>,
|
||||
pub disk_data: Vec<Vec<String>>,
|
||||
pub temp_sensor_data: Vec<Vec<String>>,
|
||||
pub process_data: Vec<ConvertedProcessData>,
|
||||
pub grouped_process_data: Vec<ConvertedProcessData>,
|
||||
pub process_data: Vec<ConvertedProcessHarvest>,
|
||||
pub grouped_process_data: Vec<ConvertedProcessHarvest>,
|
||||
pub mem_label: String,
|
||||
pub swap_label: String,
|
||||
pub mem_data: Vec<(f64, f64)>,
|
||||
|
@ -951,7 +951,7 @@ fn draw_search_field<B: backend::Backend>(
|
|||
fn draw_processes_table<B: backend::Backend>(
|
||||
f: &mut Frame<B>, app_state: &mut app::App, draw_loc: Rect,
|
||||
) {
|
||||
let process_data: &[ConvertedProcessData] = if app_state.is_grouped() {
|
||||
let process_data: &[ConvertedProcessHarvest] = if app_state.is_grouped() {
|
||||
&app_state.canvas_data.grouped_process_data
|
||||
} else {
|
||||
&app_state.canvas_data.process_data
|
||||
|
@ -971,7 +971,7 @@ fn draw_processes_table<B: backend::Backend>(
|
|||
app_state.currently_selected_process_position,
|
||||
);
|
||||
|
||||
let sliced_vec: Vec<ConvertedProcessData> = (&process_data[start_position as usize..]).to_vec();
|
||||
let sliced_vec: Vec<ConvertedProcessHarvest> = (&process_data[start_position as usize..]).to_vec();
|
||||
let mut process_counter = 0;
|
||||
|
||||
// Draw!
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
// TODO: Store like three minutes of data, then change how much is shown based on scaling!
|
||||
pub const STALE_MAX_MILLISECONDS: u64 = 180 * 1000; // We wish to store at most 60 seconds worth of data. This may change in the future, or be configurable.
|
||||
pub const TIME_STARTS_FROM: u64 = 60 * 1000;
|
||||
pub const TICK_RATE_IN_MILLISECONDS: u64 = 200; // We use this as it's a good value to work with.
|
||||
pub const TICK_RATE_IN_MILLISECONDS: u64 = 200; // How fast the screen refreshes
|
||||
pub const DEFAULT_REFRESH_RATE_IN_MILLISECONDS: u128 = 1000;
|
||||
pub const MAX_KEY_TIMEOUT_IN_MILLISECONDS: u128 = 1000;
|
||||
pub const NUM_COLOURS: i32 = 256;
|
||||
|
|
|
@ -2,8 +2,9 @@
|
|||
//! can actually handle.
|
||||
|
||||
use crate::{
|
||||
app::data_farmer,
|
||||
app::data_harvester,
|
||||
app::data_janitor,
|
||||
app::App,
|
||||
constants,
|
||||
utils::gen_util::{get_exact_byte_values, get_simple_byte_values},
|
||||
};
|
||||
|
@ -21,7 +22,7 @@ pub struct ConvertedNetworkData {
|
|||
}
|
||||
|
||||
#[derive(Clone, Default, Debug)]
|
||||
pub struct ConvertedProcessData {
|
||||
pub struct ConvertedProcessHarvest {
|
||||
pub pid: u32,
|
||||
pub name: String,
|
||||
pub cpu_usage: String,
|
||||
|
@ -55,15 +56,16 @@ impl From<&CpuPoint> for (f64, f64) {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn update_temp_row(
|
||||
app_data: &data_harvester::Data, temp_type: &data_harvester::temperature::TemperatureType,
|
||||
) -> Vec<Vec<String>> {
|
||||
pub fn update_temp_row(app: &App) -> Vec<Vec<String>> {
|
||||
let mut sensor_vector: Vec<Vec<String>> = Vec::new();
|
||||
|
||||
if (&app_data.list_of_temperature_sensor).is_empty() {
|
||||
let current_data = &app.data_collection;
|
||||
let temp_type = &app.temperature_type;
|
||||
|
||||
if current_data.temp_harvest.is_empty() {
|
||||
sensor_vector.push(vec!["No Sensors Found".to_string(), "".to_string()])
|
||||
} else {
|
||||
for sensor in &app_data.list_of_temperature_sensor {
|
||||
for sensor in ¤t_data.temp_harvest {
|
||||
sensor_vector.push(vec![
|
||||
sensor.component_name.to_string(),
|
||||
(sensor.temperature.ceil() as u64).to_string()
|
||||
|
@ -79,44 +81,18 @@ pub fn update_temp_row(
|
|||
sensor_vector
|
||||
}
|
||||
|
||||
pub fn update_disk_row(app_data: &data_harvester::Data) -> Vec<Vec<String>> {
|
||||
pub fn update_disk_row(current_data: &data_farmer::DataCollection) -> Vec<Vec<String>> {
|
||||
let mut disk_vector: Vec<Vec<String>> = Vec::new();
|
||||
for disk in &app_data.list_of_disks {
|
||||
let io_activity = {
|
||||
let mut final_result = ("0B/s".to_string(), "0B/s".to_string());
|
||||
if app_data.list_of_io.len() > 2 {
|
||||
if let Some(io_package) = &app_data.list_of_io.last() {
|
||||
if let Some(trimmed_mount) = disk.name.to_string().split('/').last() {
|
||||
let prev_io_package = &app_data.list_of_io[app_data.list_of_io.len() - 2];
|
||||
|
||||
let io_hashmap = &io_package.io_hash;
|
||||
let prev_io_hashmap = &prev_io_package.io_hash;
|
||||
let time_difference = io_package
|
||||
.instant
|
||||
.duration_since(prev_io_package.instant)
|
||||
.as_secs_f64();
|
||||
if io_hashmap.contains_key(trimmed_mount)
|
||||
&& prev_io_hashmap.contains_key(trimmed_mount)
|
||||
{
|
||||
// Ideally change this...
|
||||
let ele = &io_hashmap[trimmed_mount];
|
||||
let prev = &prev_io_hashmap[trimmed_mount];
|
||||
let read_bytes_per_sec = ((ele.read_bytes - prev.read_bytes) as f64
|
||||
/ time_difference) as u64;
|
||||
let write_bytes_per_sec = ((ele.write_bytes - prev.write_bytes) as f64
|
||||
/ time_difference) as u64;
|
||||
let converted_read = get_simple_byte_values(read_bytes_per_sec, false);
|
||||
let converted_write =
|
||||
get_simple_byte_values(write_bytes_per_sec, false);
|
||||
final_result = (
|
||||
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
|
||||
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
final_result
|
||||
for (itx, disk) in current_data.disk_harvest.iter().enumerate() {
|
||||
let io_activity = if current_data.io_labels.len() > itx {
|
||||
let converted_read = get_simple_byte_values(current_data.io_labels[itx].0, false);
|
||||
let converted_write = get_simple_byte_values(current_data.io_labels[itx].1, false);
|
||||
(
|
||||
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
|
||||
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
|
||||
)
|
||||
} else {
|
||||
("0B/s".to_string(), "0B/s".to_string())
|
||||
};
|
||||
|
||||
let converted_free_space = get_simple_byte_values(disk.free_space, false);
|
||||
|
@ -143,8 +119,8 @@ pub fn update_disk_row(app_data: &data_harvester::Data) -> Vec<Vec<String>> {
|
|||
|
||||
pub fn simple_update_process_row(
|
||||
app_data: &data_harvester::Data, matching_string: &str, use_pid: bool,
|
||||
) -> (Vec<ConvertedProcessData>, Vec<ConvertedProcessData>) {
|
||||
let process_vector: Vec<ConvertedProcessData> = app_data
|
||||
) -> (Vec<ConvertedProcessHarvest>, Vec<ConvertedProcessHarvest>) {
|
||||
let process_vector: Vec<ConvertedProcessHarvest> = app_data
|
||||
.list_of_processes
|
||||
.iter()
|
||||
.filter(|process| {
|
||||
|
@ -161,7 +137,7 @@ pub fn simple_update_process_row(
|
|||
.map(|process| return_mapped_process(process))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut grouped_process_vector: Vec<ConvertedProcessData> = Vec::new();
|
||||
let mut grouped_process_vector: Vec<ConvertedProcessHarvest> = Vec::new();
|
||||
if let Some(grouped_list_of_processes) = &app_data.grouped_list_of_processes {
|
||||
grouped_process_vector = grouped_list_of_processes
|
||||
.iter()
|
||||
|
@ -186,8 +162,8 @@ pub fn simple_update_process_row(
|
|||
pub fn regex_update_process_row(
|
||||
app_data: &data_harvester::Data, regex_matcher: &std::result::Result<Regex, regex::Error>,
|
||||
use_pid: bool,
|
||||
) -> (Vec<ConvertedProcessData>, Vec<ConvertedProcessData>) {
|
||||
let process_vector: Vec<ConvertedProcessData> = app_data
|
||||
) -> (Vec<ConvertedProcessHarvest>, Vec<ConvertedProcessHarvest>) {
|
||||
let process_vector: Vec<ConvertedProcessHarvest> = app_data
|
||||
.list_of_processes
|
||||
.iter()
|
||||
.filter(|process| {
|
||||
|
@ -204,7 +180,7 @@ pub fn regex_update_process_row(
|
|||
.map(|process| return_mapped_process(process))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut grouped_process_vector: Vec<ConvertedProcessData> = Vec::new();
|
||||
let mut grouped_process_vector: Vec<ConvertedProcessHarvest> = Vec::new();
|
||||
if let Some(grouped_list_of_processes) = &app_data.grouped_list_of_processes {
|
||||
grouped_process_vector = grouped_list_of_processes
|
||||
.iter()
|
||||
|
@ -226,8 +202,10 @@ pub fn regex_update_process_row(
|
|||
(process_vector, grouped_process_vector)
|
||||
}
|
||||
|
||||
fn return_mapped_process(process: &data_harvester::processes::ProcessData) -> ConvertedProcessData {
|
||||
ConvertedProcessData {
|
||||
fn return_mapped_process(
|
||||
process: &data_harvester::processes::ProcessHarvest,
|
||||
) -> ConvertedProcessHarvest {
|
||||
ConvertedProcessHarvest {
|
||||
pid: process.pid,
|
||||
name: process.name.to_string(),
|
||||
cpu_usage: format!("{:.1}%", process.cpu_usage_percent),
|
||||
|
@ -237,7 +215,7 @@ fn return_mapped_process(process: &data_harvester::processes::ProcessData) -> Co
|
|||
}
|
||||
|
||||
pub fn update_cpu_data_points(
|
||||
show_avg_cpu: bool, current_data: &data_janitor::DataCollection,
|
||||
show_avg_cpu: bool, current_data: &data_farmer::DataCollection,
|
||||
) -> Vec<ConvertedCpuData> {
|
||||
let mut cpu_data_vector: Vec<ConvertedCpuData> = Vec::new();
|
||||
let current_time = current_data.current_instant;
|
||||
|
@ -260,9 +238,7 @@ pub fn update_cpu_data_points(
|
|||
cpu_data_vector[itx_offset].cpu_name = if show_avg_cpu && itx_offset == 0 {
|
||||
"AVG".to_string()
|
||||
} else {
|
||||
current_data.cpu_harvest.cpu_vec[itx]
|
||||
.cpu_name
|
||||
.to_uppercase()
|
||||
current_data.cpu_harvest[itx].cpu_name.to_uppercase()
|
||||
};
|
||||
}
|
||||
|
||||
|
@ -285,7 +261,7 @@ pub fn update_cpu_data_points(
|
|||
cpu_data_vector
|
||||
}
|
||||
|
||||
pub fn update_mem_data_points(current_data: &data_janitor::DataCollection) -> Vec<(f64, f64)> {
|
||||
pub fn update_mem_data_points(current_data: &data_farmer::DataCollection) -> Vec<(f64, f64)> {
|
||||
let mut result: Vec<(f64, f64)> = Vec::new();
|
||||
let current_time = current_data.current_instant;
|
||||
|
||||
|
@ -306,7 +282,7 @@ pub fn update_mem_data_points(current_data: &data_janitor::DataCollection) -> Ve
|
|||
result
|
||||
}
|
||||
|
||||
pub fn update_swap_data_points(current_data: &data_janitor::DataCollection) -> Vec<(f64, f64)> {
|
||||
pub fn update_swap_data_points(current_data: &data_farmer::DataCollection) -> Vec<(f64, f64)> {
|
||||
let mut result: Vec<(f64, f64)> = Vec::new();
|
||||
let current_time = current_data.current_instant;
|
||||
|
||||
|
@ -327,7 +303,7 @@ pub fn update_swap_data_points(current_data: &data_janitor::DataCollection) -> V
|
|||
result
|
||||
}
|
||||
|
||||
pub fn update_mem_labels(current_data: &data_janitor::DataCollection) -> (String, String) {
|
||||
pub fn update_mem_labels(current_data: &data_farmer::DataCollection) -> (String, String) {
|
||||
let mem_label = if current_data.memory_harvest.mem_total_in_mb == 0 {
|
||||
"".to_string()
|
||||
} else {
|
||||
|
@ -360,13 +336,11 @@ pub fn update_mem_labels(current_data: &data_janitor::DataCollection) -> (String
|
|||
)
|
||||
};
|
||||
|
||||
debug!("{:?}", mem_label);
|
||||
|
||||
(mem_label, swap_label)
|
||||
}
|
||||
|
||||
pub fn convert_network_data_points(
|
||||
current_data: &data_janitor::DataCollection,
|
||||
current_data: &data_farmer::DataCollection,
|
||||
) -> ConvertedNetworkData {
|
||||
let mut rx: Vec<(f64, f64)> = Vec::new();
|
||||
let mut tx: Vec<(f64, f64)> = Vec::new();
|
||||
|
|
15
src/main.rs
15
src/main.rs
|
@ -36,7 +36,7 @@ mod constants;
|
|||
mod data_conversion;
|
||||
|
||||
use app::data_harvester;
|
||||
use app::data_harvester::processes::ProcessData;
|
||||
use app::data_harvester::processes::ProcessHarvest;
|
||||
use constants::TICK_RATE_IN_MILLISECONDS;
|
||||
use data_conversion::*;
|
||||
use std::collections::BTreeMap;
|
||||
|
@ -203,13 +203,14 @@ fn main() -> error::Result<()> {
|
|||
}
|
||||
|
||||
loop {
|
||||
// TODO: [OPT] this should not block... let's properly use tick rates and non-blocking, okay?
|
||||
if let Ok(recv) = rx.recv_timeout(Duration::from_millis(TICK_RATE_IN_MILLISECONDS)) {
|
||||
match recv {
|
||||
Event::KeyInput(event) => {
|
||||
if event.modifiers.is_empty() {
|
||||
// If only a code, and no modifiers, don't bother...
|
||||
|
||||
// Required to catch for while typing
|
||||
// Required catch for searching - otherwise you couldn't search with q.
|
||||
if event.code == KeyCode::Char('q') && !app.is_in_search_widget() {
|
||||
break;
|
||||
}
|
||||
|
@ -233,7 +234,7 @@ fn main() -> error::Result<()> {
|
|||
if let KeyModifiers::CONTROL = event.modifiers {
|
||||
match event.code {
|
||||
KeyCode::Char('c') => break,
|
||||
KeyCode::Char('f') => app.toggle_searching(), // Note that this is fine for now, assuming '/' does not do anything other than search.
|
||||
KeyCode::Char('f') => app.enable_searching(),
|
||||
KeyCode::Left | KeyCode::Char('h') => app.move_left(),
|
||||
KeyCode::Right | KeyCode::Char('l') => app.move_right(),
|
||||
KeyCode::Up | KeyCode::Char('k') => app.move_up(),
|
||||
|
@ -245,6 +246,7 @@ fn main() -> error::Result<()> {
|
|||
app.reset();
|
||||
}
|
||||
}
|
||||
// TODO: [SEARCH] Rename "simple" search to just... search without cases...
|
||||
KeyCode::Char('s') => app.toggle_simple_search(),
|
||||
KeyCode::Char('a') => app.skip_cursor_beginning(),
|
||||
KeyCode::Char('e') => app.skip_cursor_end(),
|
||||
|
@ -280,11 +282,10 @@ fn main() -> error::Result<()> {
|
|||
app.canvas_data.total_tx_display = network_data.total_tx_display;
|
||||
|
||||
// Disk
|
||||
app.canvas_data.disk_data = update_disk_row(&app.data);
|
||||
app.canvas_data.disk_data = update_disk_row(&app.data_collection);
|
||||
|
||||
// Temperatures
|
||||
app.canvas_data.temp_sensor_data =
|
||||
update_temp_row(&app.data, &app.temperature_type);
|
||||
app.canvas_data.temp_sensor_data = update_temp_row(&app);
|
||||
// Memory
|
||||
app.canvas_data.mem_data = update_mem_data_points(&app.data_collection);
|
||||
app.canvas_data.swap_data = update_swap_data_points(&app.data_collection);
|
||||
|
@ -349,7 +350,7 @@ fn handle_process_sorting(app: &mut app::App) {
|
|||
process_map
|
||||
.iter()
|
||||
.map(|(name, data)| {
|
||||
ProcessData {
|
||||
ProcessHarvest {
|
||||
pid: 0, // Irrelevant
|
||||
cpu_usage_percent: data.0,
|
||||
mem_usage_percent: data.1,
|
||||
|
|
Loading…
Reference in a new issue