refactor/bug: Array bound checking, fix CPU jump

This commit is contained in:
Clement Tsang 2020-04-06 23:04:04 -04:00 committed by GitHub
parent 0a63ee46ef
commit 9127cb1468
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 223 additions and 206 deletions

View file

@ -30,6 +30,7 @@ dirs = "2.0.2"
fern = "0.6.0"
futures = "0.3.4"
heim = "0.0.10"
itertools = "0.9.0"
log = "0.4.8"
regex = "1.3"
sysinfo = "0.12"

View file

@ -583,6 +583,7 @@ impl App {
cpu_widget_state.scroll_state.current_scroll_position = new_position;
cpu_widget_state.scroll_state.previous_scroll_position = 0;
}
self.is_resized = true;
}
}
BottomWidgetType::CpuLegend => {
@ -600,6 +601,7 @@ impl App {
cpu_widget_state.scroll_state.current_scroll_position = new_position;
cpu_widget_state.scroll_state.previous_scroll_position = 0;
}
self.is_resized = true;
}
}
BottomWidgetType::Proc => {
@ -614,6 +616,7 @@ impl App {
.search_state
.is_enabled = false;
}
self.is_resized = true;
}
}
BottomWidgetType::ProcSearch => {
@ -629,6 +632,7 @@ impl App {
.is_enabled = false;
self.move_widget_selection_up();
}
self.is_resized = true;
}
}
_ => {}
@ -1943,7 +1947,7 @@ impl App {
if let Some(cpu_widget_state) = self
.cpu_state
.widget_states
.get_mut(&self.current_widget.widget_id)
.get_mut(&(self.current_widget.widget_id - 1))
{
cpu_widget_state.scroll_state.current_scroll_position = 0;
cpu_widget_state.scroll_state.scroll_direction = ScrollDirection::UP;
@ -2010,7 +2014,7 @@ impl App {
if let Some(cpu_widget_state) = self
.cpu_state
.widget_states
.get_mut(&self.current_widget.widget_id)
.get_mut(&(self.current_widget.widget_id - 1))
{
let cap = if is_filtering_or_searching {
self.canvas_data.cpu_data.len()

View file

@ -54,8 +54,7 @@ pub struct DataCollection {
pub process_harvest: Vec<processes::ProcessHarvest>,
pub disk_harvest: Vec<disks::DiskHarvest>,
pub io_harvest: disks::IOHarvest,
pub io_labels: Vec<(u64, u64)>,
io_prev: Vec<(u64, u64)>,
pub io_labels_and_prev: Vec<((u64, u64), (u64, u64))>,
pub temp_harvest: Vec<temperature::TempHarvest>,
}
@ -72,8 +71,7 @@ impl Default for DataCollection {
process_harvest: Vec::default(),
disk_harvest: Vec::default(),
io_harvest: disks::IOHarvest::default(),
io_labels: Vec::default(),
io_prev: Vec::default(),
io_labels_and_prev: Vec::default(),
temp_harvest: Vec::default(),
}
}
@ -89,8 +87,7 @@ impl DataCollection {
self.process_harvest = Vec::default();
self.disk_harvest = Vec::default();
self.io_harvest = disks::IOHarvest::default();
self.io_labels = Vec::default();
self.io_prev = Vec::default();
self.io_labels_and_prev = Vec::default();
self.temp_harvest = Vec::default();
}
@ -219,20 +216,18 @@ impl DataCollection {
// Note this only pre-calculates the data points - the names will be
// within the local copy of cpu_harvest. Since it's all sequential
// it probably doesn't matter anyways.
for (itx, cpu) in harvested_data.cpu.iter().enumerate() {
let cpu_joining_pts = if let Some((time, last_pt)) = self.timed_data_vec.last() {
generate_joining_points(
*time,
last_pt.cpu_data[itx].0,
harvested_time,
cpu.cpu_usage,
)
} else {
Vec::new()
};
let cpu_pt = (cpu.cpu_usage, cpu_joining_pts);
new_entry.cpu_data.push(cpu_pt);
if let Some((time, last_pt)) = self.timed_data_vec.last() {
for (cpu, last_pt_data) in harvested_data.cpu.iter().zip(&last_pt.cpu_data) {
let cpu_joining_pts =
generate_joining_points(*time, last_pt_data.0, harvested_time, cpu.cpu_usage);
let cpu_pt = (cpu.cpu_usage, cpu_joining_pts);
new_entry.cpu_data.push(cpu_pt);
}
} else {
for cpu in harvested_data.cpu.iter() {
let cpu_pt = (cpu.cpu_usage, Vec::new());
new_entry.cpu_data.push(cpu_pt);
}
}
self.cpu_harvest = harvested_data.cpu.clone();
@ -257,19 +252,16 @@ impl DataCollection {
let io_r_pt = io.read_bytes;
let io_w_pt = io.write_bytes;
if self.io_labels.len() <= itx {
self.io_prev.push((io_r_pt, io_w_pt));
self.io_labels.push((0, 0));
} else {
let r_rate = ((io_r_pt - self.io_prev[itx].0) as f64
/ time_since_last_harvest)
.round() as u64;
let w_rate = ((io_w_pt - self.io_prev[itx].1) as f64
/ time_since_last_harvest)
.round() as u64;
if self.io_labels_and_prev.len() <= itx {
self.io_labels_and_prev.push(((0, 0), (io_r_pt, io_w_pt)));
} else if let Some((io_curr, io_prev)) = self.io_labels_and_prev.get_mut(itx) {
let r_rate =
((io_r_pt - io_prev.0) as f64 / time_since_last_harvest).round() as u64;
let w_rate =
((io_w_pt - io_prev.1) as f64 / time_since_last_harvest).round() as u64;
self.io_labels[itx] = (r_rate, w_rate);
self.io_prev[itx] = (io_r_pt, io_w_pt);
*io_curr = (r_rate, w_rate);
*io_prev = (io_r_pt, io_w_pt);
}
}
}

View file

@ -1,3 +1,4 @@
use itertools::izip;
use std::cmp::max;
use std::collections::HashMap;
@ -442,103 +443,101 @@ impl Painter {
let col_draw_locs = self
.col_constraints
.iter()
.enumerate()
.map(|(itx, col_constraint)| {
.zip(&row_draw_locs)
.map(|(col_constraint, row_draw_loc)| {
Layout::default()
.constraints(col_constraint.as_ref())
.direction(Direction::Horizontal)
.split(row_draw_locs[itx])
.split(*row_draw_loc)
})
.collect::<Vec<_>>();
let col_row_draw_locs = self
.col_row_constraints
.iter()
.enumerate()
.map(|(col_itx, col_row_constraints)| {
.zip(&col_draw_locs)
.map(|(col_row_constraints, row_draw_loc)| {
col_row_constraints
.iter()
.enumerate()
.map(|(itx, col_row_constraint)| {
.zip(row_draw_loc)
.map(|(col_row_constraint, col_draw_loc)| {
Layout::default()
.constraints(col_row_constraint.as_ref())
.direction(Direction::Vertical)
.split(col_draw_locs[col_itx][itx])
.split(*col_draw_loc)
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
// Now... draw!
self.layout_constraints.iter().enumerate().for_each(
|(row_itx, col_constraint_vec)| {
col_constraint_vec.iter().enumerate().for_each(
|(col_itx, col_row_constraint_vec)| {
col_row_constraint_vec.iter().enumerate().for_each(
|(col_row_itx, widget_constraints)| {
let widget_draw_locs = Layout::default()
.constraints(widget_constraints.as_ref())
.direction(Direction::Horizontal)
.split(
col_row_draw_locs[row_itx][col_itx][col_row_itx],
);
izip!(
&self.layout_constraints,
col_row_draw_locs,
&self.widget_layout.rows
)
.for_each(|(row_constraint_vec, row_draw_loc, cols)| {
izip!(row_constraint_vec, row_draw_loc, &cols.children).for_each(
|(col_constraint_vec, col_draw_loc, col_rows)| {
izip!(col_constraint_vec, col_draw_loc, &col_rows.children).for_each(
|(col_row_constraint_vec, col_row_draw_loc, widgets)| {
// Note that col_row_constraint_vec CONTAINS the widget constraints
let widget_draw_locs = Layout::default()
.constraints(col_row_constraint_vec.as_ref())
.direction(Direction::Horizontal)
.split(col_row_draw_loc);
for (widget_itx, widget) in self.widget_layout.rows[row_itx]
.children[col_itx]
.children[col_row_itx]
.children
.iter()
.enumerate()
{
match widget.widget_type {
Empty => {}
Cpu => self.draw_cpu(
&mut f,
app_state,
widget_draw_locs[widget_itx],
widget.widget_id,
),
Mem => self.draw_memory_graph(
&mut f,
app_state,
widget_draw_locs[widget_itx],
widget.widget_id,
),
Net => self.draw_network(
&mut f,
app_state,
widget_draw_locs[widget_itx],
widget.widget_id,
),
Temp => self.draw_temp_table(
&mut f,
app_state,
widget_draw_locs[widget_itx],
true,
widget.widget_id,
),
Disk => self.draw_disk_table(
&mut f,
app_state,
widget_draw_locs[widget_itx],
true,
widget.widget_id,
),
Proc => self.draw_process_and_search(
&mut f,
app_state,
widget_draw_locs[widget_itx],
true,
widget.widget_id,
),
_ => {}
}
for (widget, widget_draw_loc) in
widgets.children.iter().zip(widget_draw_locs)
{
match widget.widget_type {
Empty => {}
Cpu => self.draw_cpu(
&mut f,
app_state,
widget_draw_loc,
widget.widget_id,
),
Mem => self.draw_memory_graph(
&mut f,
app_state,
widget_draw_loc,
widget.widget_id,
),
Net => self.draw_network(
&mut f,
app_state,
widget_draw_loc,
widget.widget_id,
),
Temp => self.draw_temp_table(
&mut f,
app_state,
widget_draw_loc,
true,
widget.widget_id,
),
Disk => self.draw_disk_table(
&mut f,
app_state,
widget_draw_loc,
true,
widget.widget_id,
),
Proc => self.draw_process_and_search(
&mut f,
app_state,
widget_draw_loc,
true,
widget.widget_id,
),
_ => {}
}
},
);
},
);
},
);
}
},
);
},
);
});
}
})?;

View file

@ -83,10 +83,12 @@ pub fn gen_n_styles(num_to_gen: i32) -> Vec<Style> {
];
let mut h: f32 = 0.4; // We don't need random colours... right?
for _i in 0..(num_to_gen - 10) {
h = gen_hsv(h);
let result = hsv_to_rgb(h, 0.5, 0.95);
colour_vec.push(Style::default().fg(Color::Rgb(result.0, result.1, result.2)));
if num_to_gen - 10 > 0 {
for _i in 0..(num_to_gen - 10) {
h = gen_hsv(h);
let result = hsv_to_rgb(h, 0.5, 0.95);
colour_vec.push(Style::default().fg(Color::Rgb(result.0, result.1, result.2)));
}
}
colour_vec

View file

@ -1,4 +1,5 @@
use crate::app;
use itertools::izip;
/// A somewhat jury-rigged solution to simulate a variable intrinsic layout for
/// table widths. Note that this will do one main pass to try to properly
@ -20,20 +21,24 @@ pub fn get_variable_intrinsic_widths(
.map(|&desired_width_ratio| (desired_width_ratio * total_width as f64) as i32)
.collect::<Vec<_>>();
for (itx, desired_width) in desired_widths.into_iter().enumerate() {
resulting_widths[itx] = if desired_width < width_thresholds[itx] as i32 {
for (desired_width, resulting_width, width_threshold) in izip!(
desired_widths.into_iter(),
resulting_widths.iter_mut(),
width_thresholds
) {
*resulting_width = if desired_width < *width_threshold as i32 {
// Try to take threshold, else, 0
if remaining_width < width_thresholds[itx] as i32 {
if remaining_width < *width_threshold as i32 {
0
} else {
remaining_width -= width_thresholds[itx] as i32;
width_thresholds[itx] as u16
remaining_width -= *width_threshold as i32;
*width_threshold as u16
}
} else {
// Take as large as possible
if remaining_width < desired_width {
// Check the biggest chunk possible
if remaining_width < width_thresholds[itx] as i32 {
if remaining_width < *width_threshold as i32 {
0
} else {
let temp_width = remaining_width;
@ -46,7 +51,7 @@ pub fn get_variable_intrinsic_widths(
}
};
if resulting_widths[itx] == 0 {
if *resulting_width == 0 {
break;
} else {
last_index += 1;

View file

@ -97,34 +97,38 @@ impl CpuBasicWidget for Painter {
let mut row_counter = num_cpus;
let mut start_index = 0;
for (itx, chunk) in chunks.iter().enumerate() {
let to_divide = REQUIRED_COLUMNS - itx;
let how_many_cpus = min(
remaining_height,
(row_counter / to_divide) + (if row_counter % to_divide == 0 { 0 } else { 1 }),
);
row_counter -= how_many_cpus;
let end_index = min(start_index + how_many_cpus, num_cpus);
let cpu_column: Vec<Text<'_>> = (start_index..end_index)
.map(|cpu_index| {
Text::Styled(
(&cpu_bars[cpu_index]).into(),
self.colours.cpu_colour_styles
[cpu_index as usize % self.colours.cpu_colour_styles.len()],
)
})
.collect::<Vec<_>>();
// Explicitly check... don't want an accidental DBZ or underflow
if REQUIRED_COLUMNS > itx {
let to_divide = REQUIRED_COLUMNS - itx;
let how_many_cpus = min(
remaining_height,
(row_counter / to_divide)
+ (if row_counter % to_divide == 0 { 0 } else { 1 }),
);
row_counter -= how_many_cpus;
let end_index = min(start_index + how_many_cpus, num_cpus);
let cpu_column: Vec<Text<'_>> = (start_index..end_index)
.map(|cpu_index| {
Text::Styled(
(&cpu_bars[cpu_index]).into(),
self.colours.cpu_colour_styles
[cpu_index as usize % self.colours.cpu_colour_styles.len()],
)
})
.collect::<Vec<_>>();
start_index += how_many_cpus;
start_index += how_many_cpus;
let margined_loc = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(100)].as_ref())
.horizontal_margin(1)
.split(*chunk);
let margined_loc = Layout::default()
.direction(Direction::Horizontal)
.constraints([Constraint::Percentage(100)].as_ref())
.horizontal_margin(1)
.split(*chunk);
Paragraph::new(cpu_column.iter())
.block(Block::default())
.render(f, margined_loc[0]);
Paragraph::new(cpu_column.iter())
.block(Block::default())
.render(f, margined_loc[0]);
}
}
}
}

View file

@ -141,10 +141,11 @@ impl CpuGraphWidget for Painter {
let show_avg_cpu = app_state.app_config_fields.show_average_cpu;
let dataset_vector: Vec<Dataset<'_>> = cpu_data
.iter()
.zip(&cpu_widget_state.core_show_vec)
.enumerate()
.rev()
.filter_map(|(itx, cpu)| {
if cpu_widget_state.core_show_vec[itx] {
.filter_map(|(itx, (cpu, cpu_show_vec))| {
if *cpu_show_vec {
Some(
Dataset::default()
.marker(if use_dot {
@ -231,20 +232,28 @@ impl CpuGraphWidget for Painter {
let show_avg_cpu = app_state.app_config_fields.show_average_cpu;
let cpu_rows = sliced_cpu_data.iter().enumerate().filter_map(|(itx, cpu)| {
let cpu_string_row: Vec<Cow<'_, str>> = if cpu_widget_state.is_showing_tray {
vec![
Cow::Borrowed(&cpu.cpu_name),
if cpu_widget_state.core_show_vec[itx + start_position as usize] {
"[*]".into()
} else {
"[ ]".into()
},
]
} else if show_disabled_data || cpu_widget_state.core_show_vec[itx] {
vec![
Cow::Borrowed(&cpu.cpu_name),
Cow::Borrowed(&cpu.legend_value),
]
let cpu_string_row: Vec<Cow<'_, str>> = if let Some(cpu_core_show_vec) =
cpu_widget_state
.core_show_vec
.get(itx + start_position as usize)
{
if cpu_widget_state.is_showing_tray {
vec![
Cow::Borrowed(&cpu.cpu_name),
if *cpu_core_show_vec {
"[*]".into()
} else {
"[ ]".into()
},
]
} else if show_disabled_data || *cpu_core_show_vec {
vec![
Cow::Borrowed(&cpu.cpu_name),
Cow::Borrowed(&cpu.legend_value),
]
} else {
Vec::new()
}
} else {
Vec::new()
};

View file

@ -68,36 +68,36 @@ pub fn convert_temp_row(app: &App) -> Vec<Vec<String>> {
pub fn convert_disk_row(current_data: &data_farmer::DataCollection) -> Vec<Vec<String>> {
let mut disk_vector: Vec<Vec<String>> = Vec::new();
for (itx, disk) in current_data.disk_harvest.iter().enumerate() {
let io_activity = if current_data.io_labels.len() > itx {
let converted_read = get_simple_byte_values(current_data.io_labels[itx].0, false);
let converted_write = get_simple_byte_values(current_data.io_labels[itx].1, false);
(
current_data
.disk_harvest
.iter()
.zip(&current_data.io_labels_and_prev)
.for_each(|(disk, (io_label, _io_prev))| {
let converted_read = get_simple_byte_values(io_label.0, false);
let converted_write = get_simple_byte_values(io_label.1, false);
let io_activity = (
format!("{:.*}{}/s", 0, converted_read.0, converted_read.1),
format!("{:.*}{}/s", 0, converted_write.0, converted_write.1),
)
} else {
("0B/s".to_string(), "0B/s".to_string())
};
);
let converted_free_space = get_simple_byte_values(disk.free_space, false);
let converted_total_space = get_simple_byte_values(disk.total_space, false);
disk_vector.push(vec![
disk.name.to_string(),
disk.mount_point.to_string(),
format!(
"{:.0}%",
disk.used_space as f64 / disk.total_space as f64 * 100_f64
),
format!("{:.*}{}", 0, converted_free_space.0, converted_free_space.1),
format!(
"{:.*}{}",
0, converted_total_space.0, converted_total_space.1
),
io_activity.0,
io_activity.1,
]);
}
let converted_free_space = get_simple_byte_values(disk.free_space, false);
let converted_total_space = get_simple_byte_values(disk.total_space, false);
disk_vector.push(vec![
disk.name.to_string(),
disk.mount_point.to_string(),
format!(
"{:.0}%",
disk.used_space as f64 / disk.total_space as f64 * 100_f64
),
format!("{:.*}{}", 0, converted_free_space.0, converted_free_space.1),
format!(
"{:.*}{}",
0, converted_total_space.0, converted_total_space.1
),
io_activity.0,
io_activity.1,
]);
});
disk_vector
}
@ -121,26 +121,28 @@ pub fn convert_cpu_data_points(
for (itx, cpu) in data.cpu_data.iter().enumerate() {
// Check if the vector exists yet
let itx_offset = itx;
if cpu_data_vector.len() <= itx_offset {
cpu_data_vector.push(ConvertedCpuData::default());
cpu_data_vector[itx_offset].cpu_name =
current_data.cpu_harvest[itx].cpu_name.clone();
if cpu_data_vector.len() <= itx {
let mut new_cpu_data = ConvertedCpuData::default();
new_cpu_data.cpu_name = if let Some(cpu_harvest) = current_data.cpu_harvest.get(itx)
{
cpu_harvest.cpu_name.clone()
} else {
String::default()
};
cpu_data_vector.push(new_cpu_data);
}
cpu_data_vector[itx_offset].legend_value = format!("{:.0}%", cpu.0.round());
if let Some(cpu_data) = cpu_data_vector.get_mut(itx) {
cpu_data.legend_value = format!("{:.0}%", cpu.0.round());
//Insert joiner points
for &(joiner_offset, joiner_val) in &cpu.1 {
let offset_time = time_from_start + joiner_offset as f64;
cpu_data_vector[itx_offset]
.cpu_data
.push((-offset_time, joiner_val));
//Insert joiner points
for &(joiner_offset, joiner_val) in &cpu.1 {
let offset_time = time_from_start + joiner_offset as f64;
cpu_data.cpu_data.push((-offset_time, joiner_val));
}
cpu_data.cpu_data.push((-time_from_start, cpu.0));
}
cpu_data_vector[itx_offset]
.cpu_data
.push((-time_from_start, cpu.0));
}
if *time == current_time {
@ -269,7 +271,6 @@ pub fn get_rx_tx_data_points(
current_data.current_instant
};
// TODO: [REFACTOR] Can we use collect on this, CPU, and MEM?
for (time, data) in &current_data.timed_data_vec {
let time_from_start: f64 = (current_time.duration_since(*time).as_millis() as f64).floor();