mirror of
https://github.com/ClementTsang/bottom
synced 2025-02-15 12:48:28 +00:00
Converted mem over, will need to move the label over too
This commit is contained in:
parent
13f6dfc529
commit
10b7aa6f78
8 changed files with 193 additions and 185 deletions
|
@ -27,8 +27,8 @@ fn push_if_valid<T: std::clone::Clone>(result: &Result<T>, vector_to_push: &mut
|
|||
pub struct Data {
|
||||
pub list_of_cpu_packages: Vec<cpu::CPUPackage>,
|
||||
pub list_of_io: Vec<disks::IOPackage>,
|
||||
pub memory: Vec<mem::MemData>,
|
||||
pub swap: Vec<mem::MemData>,
|
||||
pub memory: mem::MemHarvest,
|
||||
pub swap: mem::MemHarvest,
|
||||
pub list_of_temperature_sensor: Vec<temperature::TempData>,
|
||||
pub network: network::NetworkHarvest,
|
||||
pub list_of_processes: Vec<processes::ProcessData>,
|
||||
|
@ -42,8 +42,8 @@ impl Default for Data {
|
|||
Data {
|
||||
list_of_cpu_packages: Vec::default(),
|
||||
list_of_io: Vec::default(),
|
||||
memory: Vec::default(),
|
||||
swap: Vec::default(),
|
||||
memory: mem::MemHarvest::default(),
|
||||
swap: mem::MemHarvest::default(),
|
||||
list_of_temperature_sensor: Vec::default(),
|
||||
list_of_processes: Vec::default(),
|
||||
grouped_list_of_processes: None,
|
||||
|
@ -58,12 +58,14 @@ impl Data {
|
|||
pub fn first_run_cleanup(&mut self) {
|
||||
self.list_of_cpu_packages = Vec::new();
|
||||
self.list_of_io = Vec::new();
|
||||
self.memory = Vec::new();
|
||||
self.swap = Vec::new();
|
||||
self.list_of_temperature_sensor = Vec::new();
|
||||
self.list_of_processes = Vec::new();
|
||||
self.grouped_list_of_processes = None;
|
||||
self.list_of_disks = Vec::new();
|
||||
|
||||
self.network.first_run_cleanup();
|
||||
self.memory = mem::MemHarvest::default();
|
||||
self.swap = mem::MemHarvest::default();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -74,6 +76,7 @@ pub struct DataState {
|
|||
prev_pid_stats: HashMap<String, (f64, Instant)>,
|
||||
prev_idle: f64,
|
||||
prev_non_idle: f64,
|
||||
mem_total_kb: u64,
|
||||
temperature_type: temperature::TemperatureType,
|
||||
last_clean: Instant, // Last time stale data was cleared
|
||||
use_current_cpu_total: bool,
|
||||
|
@ -88,6 +91,7 @@ impl Default for DataState {
|
|||
prev_pid_stats: HashMap::new(),
|
||||
prev_idle: 0_f64,
|
||||
prev_non_idle: 0_f64,
|
||||
mem_total_kb: 0,
|
||||
temperature_type: temperature::TemperatureType::Celsius,
|
||||
last_clean: Instant::now(),
|
||||
use_current_cpu_total: false,
|
||||
|
@ -106,6 +110,9 @@ impl DataState {
|
|||
|
||||
pub fn init(&mut self) {
|
||||
self.sys.refresh_all();
|
||||
self.mem_total_kb = self.sys.get_total_memory();
|
||||
futures::executor::block_on(self.update_data());
|
||||
self.data.first_run_cleanup();
|
||||
}
|
||||
|
||||
pub async fn update_data(&mut self) {
|
||||
|
@ -129,20 +136,20 @@ impl DataState {
|
|||
)
|
||||
.await;
|
||||
|
||||
// Mem and swap
|
||||
if let Ok(memory) = mem::get_mem_data_list().await {
|
||||
self.data.memory = memory;
|
||||
}
|
||||
|
||||
if let Ok(swap) = mem::get_swap_data_list().await {
|
||||
self.data.swap = swap;
|
||||
}
|
||||
|
||||
// What we want to do: For timed data, if there is an error, just do not add. For other data, just don't update!
|
||||
push_if_valid(
|
||||
&cpu::get_cpu_data_list(&self.sys, ¤t_instant),
|
||||
&mut self.data.list_of_cpu_packages,
|
||||
);
|
||||
|
||||
push_if_valid(
|
||||
&mem::get_mem_data_list(¤t_instant).await,
|
||||
&mut self.data.memory,
|
||||
);
|
||||
push_if_valid(
|
||||
&mem::get_swap_data_list(¤t_instant).await,
|
||||
&mut self.data.swap,
|
||||
);
|
||||
set_if_valid(
|
||||
&processes::get_sorted_processes_list(
|
||||
&self.sys,
|
||||
|
@ -150,6 +157,7 @@ impl DataState {
|
|||
&mut self.prev_non_idle,
|
||||
&mut self.prev_pid_stats,
|
||||
self.use_current_cpu_total,
|
||||
self.mem_total_kb,
|
||||
¤t_instant,
|
||||
),
|
||||
&mut self.data.list_of_processes,
|
||||
|
@ -185,8 +193,6 @@ impl DataState {
|
|||
self.prev_pid_stats.remove(&stale);
|
||||
}
|
||||
|
||||
// TODO: [OPT] cleaning stale network
|
||||
|
||||
self.data.list_of_cpu_packages = self
|
||||
.data
|
||||
.list_of_cpu_packages
|
||||
|
@ -197,26 +203,6 @@ impl DataState {
|
|||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.data.memory = self
|
||||
.data
|
||||
.memory
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|entry| {
|
||||
clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.data.swap = self
|
||||
.data
|
||||
.swap
|
||||
.iter()
|
||||
.cloned()
|
||||
.filter(|entry| {
|
||||
clean_instant.duration_since(entry.instant).as_secs() <= self.stale_max_seconds
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
self.data.list_of_io = self
|
||||
.data
|
||||
.list_of_io
|
||||
|
|
|
@ -1,30 +1,35 @@
|
|||
use heim::units::information;
|
||||
use std::time::Instant;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MemData {
|
||||
pub struct MemHarvest {
|
||||
pub mem_total_in_mb: u64,
|
||||
pub mem_used_in_mb: u64,
|
||||
pub instant: Instant,
|
||||
}
|
||||
|
||||
pub async fn get_mem_data_list(curr_time: &Instant) -> crate::utils::error::Result<MemData> {
|
||||
impl Default for MemHarvest {
|
||||
fn default() -> Self {
|
||||
MemHarvest {
|
||||
mem_total_in_mb: 0,
|
||||
mem_used_in_mb: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_mem_data_list() -> crate::utils::error::Result<MemHarvest> {
|
||||
let memory = heim::memory::memory().await?;
|
||||
|
||||
Ok(MemData {
|
||||
Ok(MemHarvest {
|
||||
mem_total_in_mb: memory.total().get::<information::megabyte>(),
|
||||
mem_used_in_mb: memory.total().get::<information::megabyte>()
|
||||
- memory.available().get::<information::megabyte>(),
|
||||
instant: *curr_time,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn get_swap_data_list(curr_time: &Instant) -> crate::utils::error::Result<MemData> {
|
||||
pub async fn get_swap_data_list() -> crate::utils::error::Result<MemHarvest> {
|
||||
let memory = heim::memory::swap().await?;
|
||||
|
||||
Ok(MemData {
|
||||
Ok(MemHarvest {
|
||||
mem_total_in_mb: memory.total().get::<information::megabyte>(),
|
||||
mem_used_in_mb: memory.used().get::<information::megabyte>(),
|
||||
instant: *curr_time,
|
||||
})
|
||||
}
|
||||
|
|
|
@ -12,6 +12,13 @@ pub struct NetworkHarvest {
|
|||
pub total_tx: u64,
|
||||
}
|
||||
|
||||
impl NetworkHarvest {
|
||||
pub fn first_run_cleanup(&mut self) {
|
||||
self.rx = 0;
|
||||
self.tx = 0;
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_network_data(
|
||||
sys: &System, prev_net_access_time: &Instant, prev_net_rx: &mut u64, prev_net_tx: &mut u64,
|
||||
curr_time: &Instant,
|
||||
|
|
|
@ -21,8 +21,7 @@ impl Default for ProcessSorting {
|
|||
pub struct ProcessData {
|
||||
pub pid: u32,
|
||||
pub cpu_usage_percent: f64,
|
||||
pub mem_usage_percent: Option<f64>,
|
||||
pub mem_usage_kb: Option<u64>,
|
||||
pub mem_usage_percent: f64,
|
||||
pub name: String,
|
||||
pub pid_vec: Option<Vec<u32>>,
|
||||
}
|
||||
|
@ -188,9 +187,8 @@ fn convert_ps(
|
|||
return Ok(ProcessData {
|
||||
pid: 0,
|
||||
name: "".to_string(),
|
||||
mem_usage_percent: None,
|
||||
mem_usage_kb: None,
|
||||
cpu_usage_percent: 0_f64,
|
||||
mem_usage_percent: 0.0,
|
||||
cpu_usage_percent: 0.0,
|
||||
pid_vec: None,
|
||||
});
|
||||
}
|
||||
|
@ -201,19 +199,16 @@ fn convert_ps(
|
|||
.parse::<u32>()
|
||||
.unwrap_or(0);
|
||||
let name = (&process[11..61]).trim().to_string();
|
||||
let mem_usage_percent = Some(
|
||||
(&process[62..])
|
||||
.trim()
|
||||
.to_string()
|
||||
.parse::<f64>()
|
||||
.unwrap_or(0_f64),
|
||||
);
|
||||
let mem_usage_percent = (&process[62..])
|
||||
.trim()
|
||||
.to_string()
|
||||
.parse::<f64>()
|
||||
.unwrap_or(0_f64);
|
||||
|
||||
Ok(ProcessData {
|
||||
pid,
|
||||
name,
|
||||
mem_usage_percent,
|
||||
mem_usage_kb: None,
|
||||
cpu_usage_percent: linux_cpu_usage(
|
||||
pid,
|
||||
cpu_usage,
|
||||
|
@ -229,7 +224,7 @@ fn convert_ps(
|
|||
pub fn get_sorted_processes_list(
|
||||
sys: &System, prev_idle: &mut f64, prev_non_idle: &mut f64,
|
||||
prev_pid_stats: &mut std::collections::HashMap<String, (f64, Instant)>,
|
||||
use_current_cpu_total: bool, curr_time: &Instant,
|
||||
use_current_cpu_total: bool, mem_total_kb: u64, curr_time: &Instant,
|
||||
) -> crate::utils::error::Result<Vec<ProcessData>> {
|
||||
let mut process_vector: Vec<ProcessData> = Vec::new();
|
||||
|
||||
|
@ -241,7 +236,6 @@ pub fn get_sorted_processes_list(
|
|||
.output()?;
|
||||
let ps_stdout = String::from_utf8_lossy(&ps_result.stdout);
|
||||
let split_string = ps_stdout.split('\n');
|
||||
//debug!("{:?}", split_string);
|
||||
let cpu_calc = cpu_usage_calculation(prev_idle, prev_non_idle);
|
||||
if let Ok((cpu_usage, cpu_percentage)) = cpu_calc {
|
||||
let process_stream = split_string.collect::<Vec<&str>>();
|
||||
|
@ -291,8 +285,7 @@ pub fn get_sorted_processes_list(
|
|||
process_vector.push(ProcessData {
|
||||
pid: process_val.pid() as u32,
|
||||
name,
|
||||
mem_usage_percent: None,
|
||||
mem_usage_kb: Some(process_val.memory()),
|
||||
mem_usage_percent: process_val.memory() as f64 * 100.0 / mem_total_kb as f64,
|
||||
cpu_usage_percent: f64::from(process_val.cpu_usage()),
|
||||
pid_vec: None,
|
||||
});
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
use crate::{data_harvester::network, data_harvester::Data};
|
||||
use crate::data_harvester::{mem, network, Data};
|
||||
/// In charge of cleaning and managing data. I couldn't think of a better
|
||||
/// name for the file.
|
||||
use std::time::Instant;
|
||||
|
@ -15,6 +15,8 @@ pub struct TimedData {
|
|||
pub cpu_data: JoinedDataPoints,
|
||||
pub mem_data: JoinedDataPoints,
|
||||
pub swap_data: JoinedDataPoints,
|
||||
pub temp_data: JoinedDataPoints,
|
||||
pub io_data: JoinedDataPoints,
|
||||
}
|
||||
|
||||
/// AppCollection represents the pooled data stored within the main app
|
||||
|
@ -31,9 +33,9 @@ pub struct DataCollection {
|
|||
pub current_instant: Instant,
|
||||
pub timed_data_vec: Vec<(Instant, TimedData)>,
|
||||
pub network_harvest: network::NetworkHarvest,
|
||||
pub memory_harvest: mem::MemHarvest,
|
||||
pub swap_harvest: mem::MemHarvest,
|
||||
// pub process_data: ProcessData,
|
||||
// pub disk_data: DiskData,
|
||||
// pub temp_data: TempData,
|
||||
}
|
||||
|
||||
impl Default for DataCollection {
|
||||
|
@ -42,6 +44,8 @@ impl Default for DataCollection {
|
|||
current_instant: Instant::now(),
|
||||
timed_data_vec: Vec::default(),
|
||||
network_harvest: network::NetworkHarvest::default(),
|
||||
memory_harvest: mem::MemHarvest::default(),
|
||||
swap_harvest: mem::MemHarvest::default(),
|
||||
// process_data: ProcessData::default(),
|
||||
}
|
||||
}
|
||||
|
@ -54,6 +58,52 @@ impl DataCollection {
|
|||
let harvested_time = harvested_data.last_collection_time;
|
||||
let mut new_entry = TimedData::default();
|
||||
|
||||
// Network
|
||||
self.eat_network(&harvested_data, &harvested_time, &mut new_entry);
|
||||
|
||||
// Memory and Swap
|
||||
self.eat_memory_and_swap(&harvested_data, &harvested_time, &mut new_entry);
|
||||
|
||||
// And we're done eating.
|
||||
self.current_instant = harvested_time;
|
||||
self.timed_data_vec.push((harvested_time, new_entry));
|
||||
}
|
||||
|
||||
fn eat_memory_and_swap(
|
||||
&mut self, harvested_data: &Data, harvested_time: &Instant, new_entry: &mut TimedData,
|
||||
) {
|
||||
// Memory
|
||||
let mem_percent = harvested_data.memory.mem_used_in_mb as f64
|
||||
/ harvested_data.memory.mem_total_in_mb as f64
|
||||
* 100.0;
|
||||
let mem_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
|
||||
generate_joining_points(&time, last_pt.mem_data.0, &harvested_time, mem_percent)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let mem_pt = (mem_percent, mem_joining_pts);
|
||||
new_entry.mem_data = mem_pt;
|
||||
|
||||
// Swap
|
||||
let swap_percent = harvested_data.swap.mem_used_in_mb as f64
|
||||
/ harvested_data.swap.mem_total_in_mb as f64
|
||||
* 100.0;
|
||||
let swap_joining_pt = if let Some((time, last_pt)) = self.timed_data_vec.last() {
|
||||
generate_joining_points(&time, last_pt.swap_data.0, &harvested_time, swap_percent)
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
let swap_pt = (swap_percent, swap_joining_pt);
|
||||
new_entry.swap_data = swap_pt;
|
||||
|
||||
// In addition copy over latest data for easy reference
|
||||
self.memory_harvest = harvested_data.memory.clone();
|
||||
self.swap_harvest = harvested_data.swap.clone();
|
||||
}
|
||||
|
||||
fn eat_network(
|
||||
&mut self, harvested_data: &Data, harvested_time: &Instant, new_entry: &mut TimedData,
|
||||
) {
|
||||
// RX
|
||||
let rx_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
|
||||
generate_joining_points(
|
||||
|
@ -82,12 +132,8 @@ impl DataCollection {
|
|||
let tx_pt = (harvested_data.network.tx as f64, tx_joining_pts);
|
||||
new_entry.tx_data = tx_pt;
|
||||
|
||||
// Copy over data
|
||||
// In addition copy over latest data for easy reference
|
||||
self.network_harvest = harvested_data.network.clone();
|
||||
|
||||
// And we're done eating.
|
||||
self.current_instant = harvested_time;
|
||||
self.timed_data_vec.push((harvested_time, new_entry));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -585,12 +585,13 @@ fn draw_memory_graph<B: backend::Backend>(f: &mut Frame<B>, app_state: &app::App
|
|||
|
||||
let x_axis: Axis<String> = Axis::default()
|
||||
.style(Style::default().fg(GRAPH_COLOUR))
|
||||
.bounds([0.0, constants::TIME_STARTS_FROM as f64 * 10.0]);
|
||||
.bounds([0.0, constants::TIME_STARTS_FROM as f64]);
|
||||
let y_axis = Axis::default()
|
||||
.style(Style::default().fg(GRAPH_COLOUR))
|
||||
.bounds([-0.5, 100.5]) // Offset as the zero value isn't drawn otherwise...
|
||||
.labels(&["0%", "100%"]);
|
||||
|
||||
// TODO: [OPT] Move this
|
||||
let mem_name = "RAM:".to_string()
|
||||
+ &format!(
|
||||
"{:3}%",
|
||||
|
|
|
@ -159,7 +159,7 @@ pub fn simple_update_process_row(
|
|||
process.name.to_ascii_lowercase().contains(matching_string)
|
||||
}
|
||||
})
|
||||
.map(|process| return_mapped_process(process, app_data))
|
||||
.map(|process| return_mapped_process(process))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut grouped_process_vector: Vec<ConvertedProcessData> = Vec::new();
|
||||
|
@ -177,7 +177,7 @@ pub fn simple_update_process_row(
|
|||
process.name.to_ascii_lowercase().contains(matching_string)
|
||||
}
|
||||
})
|
||||
.map(|process| return_mapped_process(process, app_data))
|
||||
.map(|process| return_mapped_process(process))
|
||||
.collect::<Vec<_>>();
|
||||
}
|
||||
|
||||
|
@ -202,7 +202,7 @@ pub fn regex_update_process_row(
|
|||
true
|
||||
}
|
||||
})
|
||||
.map(|process| return_mapped_process(process, app_data))
|
||||
.map(|process| return_mapped_process(process))
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
let mut grouped_process_vector: Vec<ConvertedProcessData> = Vec::new();
|
||||
|
@ -220,34 +220,19 @@ pub fn regex_update_process_row(
|
|||
true
|
||||
}
|
||||
})
|
||||
.map(|process| return_mapped_process(process, app_data))
|
||||
.map(|process| return_mapped_process(process))
|
||||
.collect::<Vec<_>>();
|
||||
}
|
||||
|
||||
(process_vector, grouped_process_vector)
|
||||
}
|
||||
|
||||
fn return_mapped_process(
|
||||
process: &data_harvester::processes::ProcessData, app_data: &data_harvester::Data,
|
||||
) -> ConvertedProcessData {
|
||||
fn return_mapped_process(process: &data_harvester::processes::ProcessData) -> ConvertedProcessData {
|
||||
ConvertedProcessData {
|
||||
pid: process.pid,
|
||||
name: process.name.to_string(),
|
||||
cpu_usage: format!("{:.1}%", process.cpu_usage_percent),
|
||||
mem_usage: format!(
|
||||
"{:.1}%",
|
||||
if let Some(mem_usage) = process.mem_usage_percent {
|
||||
mem_usage
|
||||
} else if let Some(mem_usage_kb) = process.mem_usage_kb {
|
||||
if let Some(mem_data) = app_data.memory.last() {
|
||||
(mem_usage_kb / 1000) as f64 / mem_data.mem_total_in_mb as f64 * 100_f64 // TODO: [OPT] Get rid of this
|
||||
} else {
|
||||
0_f64
|
||||
}
|
||||
} else {
|
||||
0_f64
|
||||
}
|
||||
),
|
||||
mem_usage: format!("{:.1}%", process.mem_usage_percent),
|
||||
group: vec![],
|
||||
}
|
||||
}
|
||||
|
@ -331,71 +316,65 @@ pub fn update_cpu_data_points(
|
|||
cpu_data_vector
|
||||
}
|
||||
|
||||
pub fn update_mem_data_points(app_data: &data_harvester::Data) -> Vec<(f64, f64)> {
|
||||
convert_mem_data(&app_data.memory)
|
||||
}
|
||||
|
||||
pub fn update_swap_data_points(app_data: &data_harvester::Data) -> Vec<(f64, f64)> {
|
||||
convert_mem_data(&app_data.swap)
|
||||
}
|
||||
|
||||
pub fn update_mem_data_values(app_data: &data_harvester::Data) -> Vec<(u64, u64)> {
|
||||
let mut result: Vec<(u64, u64)> = Vec::new();
|
||||
result.push(get_most_recent_mem_values(&app_data.memory));
|
||||
result.push(get_most_recent_mem_values(&app_data.swap));
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn get_most_recent_mem_values(mem_data: &[data_harvester::mem::MemData]) -> (u64, u64) {
|
||||
let mut result: (u64, u64) = (0, 0);
|
||||
|
||||
if !mem_data.is_empty() {
|
||||
if let Some(most_recent) = mem_data.last() {
|
||||
result.0 = most_recent.mem_used_in_mb;
|
||||
result.1 = most_recent.mem_total_in_mb;
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
fn convert_mem_data(mem_data: &[data_harvester::mem::MemData]) -> Vec<(f64, f64)> {
|
||||
pub fn update_mem_data_points(current_data: &data_janitor::DataCollection) -> Vec<(f64, f64)> {
|
||||
let mut result: Vec<(f64, f64)> = Vec::new();
|
||||
let current_time = current_data.current_instant;
|
||||
|
||||
for data in mem_data {
|
||||
let current_time = std::time::Instant::now();
|
||||
let new_entry = (
|
||||
((TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(data.instant).as_millis() as f64)
|
||||
* 10_f64)
|
||||
.floor(),
|
||||
if data.mem_total_in_mb == 0 {
|
||||
-1000.0
|
||||
} else {
|
||||
(data.mem_used_in_mb as f64 * 100_f64) / data.mem_total_in_mb as f64
|
||||
},
|
||||
);
|
||||
for (time, data) in ¤t_data.timed_data_vec {
|
||||
let time_from_start: f64 = (TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(*time).as_millis() as f64)
|
||||
.floor();
|
||||
|
||||
// Now, inject our joining points...
|
||||
if !result.is_empty() {
|
||||
let previous_element_data = *(result.last().unwrap());
|
||||
for idx in 0..50 {
|
||||
result.push((
|
||||
previous_element_data.0
|
||||
+ ((new_entry.0 - previous_element_data.0) / 50.0 * f64::from(idx)),
|
||||
previous_element_data.1
|
||||
+ ((new_entry.1 - previous_element_data.1) / 50.0 * f64::from(idx)),
|
||||
));
|
||||
}
|
||||
//Insert joiner points
|
||||
for &(joiner_offset, joiner_val) in &data.mem_data.1 {
|
||||
let offset_time = time_from_start - joiner_offset as f64;
|
||||
result.push((offset_time, joiner_val));
|
||||
}
|
||||
|
||||
result.push(new_entry);
|
||||
result.push((time_from_start, data.mem_data.0));
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub fn update_swap_data_points(current_data: &data_janitor::DataCollection) -> Vec<(f64, f64)> {
|
||||
let mut result: Vec<(f64, f64)> = Vec::new();
|
||||
let current_time = current_data.current_instant;
|
||||
|
||||
for (time, data) in ¤t_data.timed_data_vec {
|
||||
let time_from_start: f64 = (TIME_STARTS_FROM as f64
|
||||
- current_time.duration_since(*time).as_millis() as f64)
|
||||
.floor();
|
||||
|
||||
//Insert joiner points
|
||||
for &(joiner_offset, joiner_val) in &data.swap_data.1 {
|
||||
let offset_time = time_from_start - joiner_offset as f64;
|
||||
result.push((offset_time, joiner_val));
|
||||
}
|
||||
|
||||
result.push((time_from_start, data.swap_data.0));
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub fn update_mem_labels(current_data: &data_janitor::DataCollection) -> Vec<(u64, u64)> {
|
||||
let mut result: Vec<(u64, u64)> = Vec::new();
|
||||
|
||||
// This wants (u64, u64) values - left is usage in MB, right is total in MB
|
||||
result.push((
|
||||
current_data.memory_harvest.mem_used_in_mb,
|
||||
current_data.memory_harvest.mem_total_in_mb,
|
||||
));
|
||||
|
||||
result.push((
|
||||
current_data.swap_harvest.mem_used_in_mb,
|
||||
current_data.swap_harvest.mem_total_in_mb,
|
||||
));
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub fn convert_network_data_points(
|
||||
current_data: &data_janitor::DataCollection,
|
||||
) -> ConvertedNetworkData {
|
||||
|
@ -433,8 +412,22 @@ pub fn convert_network_data_points(
|
|||
));
|
||||
}
|
||||
|
||||
rx.push((time_from_start, data.rx_data.0));
|
||||
tx.push((time_from_start, data.tx_data.0));
|
||||
rx.push((
|
||||
time_from_start,
|
||||
if data.rx_data.0 > 0.0 {
|
||||
(data.rx_data.0).log(2.0)
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
));
|
||||
tx.push((
|
||||
time_from_start,
|
||||
if data.rx_data.0 > 0.0 {
|
||||
(data.rx_data.0).log(2.0)
|
||||
} else {
|
||||
0.0
|
||||
},
|
||||
));
|
||||
}
|
||||
|
||||
let total_rx_converted_result: (f64, String);
|
||||
|
|
47
src/main.rs
47
src/main.rs
|
@ -179,7 +179,6 @@ fn main() -> error::Result<()> {
|
|||
let (rtx, rrx) = mpsc::channel();
|
||||
{
|
||||
let tx = tx;
|
||||
let mut first_run = true;
|
||||
let temp_type = app.temperature_type.clone();
|
||||
thread::spawn(move || {
|
||||
let tx = tx.clone();
|
||||
|
@ -191,21 +190,11 @@ fn main() -> error::Result<()> {
|
|||
if let Ok(message) = rrx.try_recv() {
|
||||
match message {
|
||||
ResetEvent::Reset => {
|
||||
//debug!("Received reset message");
|
||||
first_run = true;
|
||||
data_state.data = app::data_harvester::Data::default();
|
||||
data_state.data.first_run_cleanup();
|
||||
}
|
||||
}
|
||||
}
|
||||
futures::executor::block_on(data_state.update_data());
|
||||
|
||||
if first_run {
|
||||
// Fix for if you set a really long time for update periods (and just gives a faster first value)
|
||||
data_state.data.first_run_cleanup(); // TODO: [OPT] we can remove this later.
|
||||
thread::sleep(Duration::from_millis(250));
|
||||
futures::executor::block_on(data_state.update_data());
|
||||
first_run = false;
|
||||
}
|
||||
tx.send(Event::Update(Box::from(data_state.data.clone())))
|
||||
.unwrap(); // TODO: [UNWRAP] Might be required, it's in a closure and idk how to deal with it
|
||||
thread::sleep(Duration::from_millis(update_rate_in_milliseconds as u64));
|
||||
|
@ -295,9 +284,9 @@ fn main() -> error::Result<()> {
|
|||
app.canvas_data.disk_data = update_disk_row(&app.data);
|
||||
app.canvas_data.temp_sensor_data =
|
||||
update_temp_row(&app.data, &app.temperature_type);
|
||||
app.canvas_data.mem_data = update_mem_data_points(&app.data);
|
||||
app.canvas_data.memory_labels = update_mem_data_values(&app.data);
|
||||
app.canvas_data.swap_data = update_swap_data_points(&app.data);
|
||||
app.canvas_data.mem_data = update_mem_data_points(&app.data_collection);
|
||||
app.canvas_data.swap_data = update_swap_data_points(&app.data_collection);
|
||||
app.canvas_data.memory_labels = update_mem_labels(&app.data_collection);
|
||||
app.canvas_data.cpu_data =
|
||||
update_cpu_data_points(app.show_average_cpu, &app.data);
|
||||
}
|
||||
|
@ -325,7 +314,7 @@ fn main() -> error::Result<()> {
|
|||
Ok(())
|
||||
}
|
||||
|
||||
type TempProcess = (f64, Option<f64>, Option<u64>, Vec<u32>);
|
||||
type TempProcess = (f64, f64, Vec<u32>);
|
||||
|
||||
fn handle_process_sorting(app: &mut app::App) {
|
||||
// Handle combining multi-pid processes to form one entry in table.
|
||||
|
@ -338,23 +327,12 @@ fn handle_process_sorting(app: &mut app::App) {
|
|||
// Fields for tuple: CPU%, MEM%, MEM_KB, PID_VEC
|
||||
let mut process_map: BTreeMap<String, TempProcess> = BTreeMap::new();
|
||||
for process in &app.data.list_of_processes {
|
||||
let entry_val =
|
||||
process_map
|
||||
.entry(process.name.clone())
|
||||
.or_insert((0.0, None, None, vec![]));
|
||||
if let Some(mem_usage) = process.mem_usage_percent {
|
||||
entry_val.0 += process.cpu_usage_percent;
|
||||
if let Some(m) = &mut entry_val.1 {
|
||||
*m += mem_usage;
|
||||
}
|
||||
entry_val.3.push(process.pid);
|
||||
} else if let Some(mem_usage_kb) = process.mem_usage_kb {
|
||||
entry_val.0 += process.cpu_usage_percent;
|
||||
if let Some(m) = &mut entry_val.2 {
|
||||
*m += mem_usage_kb;
|
||||
}
|
||||
entry_val.3.push(process.pid);
|
||||
}
|
||||
let entry_val = process_map
|
||||
.entry(process.name.clone())
|
||||
.or_insert((0.0, 0.0, vec![]));
|
||||
entry_val.0 += process.cpu_usage_percent;
|
||||
entry_val.1 += process.mem_usage_percent;
|
||||
entry_val.2.push(process.pid);
|
||||
}
|
||||
|
||||
// Now... turn this back into the exact same vector... but now with merged processes!
|
||||
|
@ -366,9 +344,8 @@ fn handle_process_sorting(app: &mut app::App) {
|
|||
pid: 0, // Irrelevant
|
||||
cpu_usage_percent: data.0,
|
||||
mem_usage_percent: data.1,
|
||||
mem_usage_kb: data.2,
|
||||
name: name.clone(),
|
||||
pid_vec: Some(data.3.clone()),
|
||||
pid_vec: Some(data.2.clone()),
|
||||
}
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
|
|
Loading…
Add table
Reference in a new issue