diff --git a/Cargo.toml b/Cargo.toml index 08329e4c..f81ce845 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -93,7 +93,7 @@ indexmap = "2.0.0" itertools = "0.11.0" kstring = { version = "2.0.0", features = ["arc"] } log = { version = "0.4.20", optional = true } -nvml-wrapper = { version = "0.9.0", optional = true } +nvml-wrapper = { version = "0.9.0", optional = true, features = ["legacy-functions"] } once_cell = "1.18.0" regex = "1.9.4" serde = { version = "=1.0.188 ", features = ["derive"] } diff --git a/docs/content/configuration/command-line-flags.md b/docs/content/configuration/command-line-flags.md index e0a7192f..17f78f8a 100644 --- a/docs/content/configuration/command-line-flags.md +++ b/docs/content/configuration/command-line-flags.md @@ -20,7 +20,7 @@ see information on these flags by running `btm -h`, or run `btm --help` to displ | --disable_click | Disables mouse clicks. | | -m, --dot_marker | Uses a dot marker for graphs. | | --enable_cache_memory | Enable collecting and displaying cache and buffer memory. | -| --enable_gpu_memory | Enable collecting and displaying GPU memory usage. | +| --enable_gpu | Enable collecting and displaying GPU usage. | | -e, --expanded | Expand the default widget upon starting the app. | | -f, --fahrenheit | Sets the temperature type to Fahrenheit. | | -g, --group | Groups processes with the same name by default. | diff --git a/docs/content/configuration/config-file/flags.md b/docs/content/configuration/config-file/flags.md index eeca23a3..ed9d0aef 100644 --- a/docs/content/configuration/config-file/flags.md +++ b/docs/content/configuration/config-file/flags.md @@ -38,7 +38,7 @@ each time: | `network_use_binary_prefix` | Boolean | Displays the network widget with binary prefixes. | | `network_use_bytes` | Boolean | Displays the network widget using bytes. | | `network_use_log` | Boolean | Displays the network widget with a log scale. | -| `enable_gpu_memory` | Boolean | Shows the GPU memory widget. | +| `enable_gpu` | Boolean | Shows the GPU widgets. | | `retention` | String (human readable time, such as "10m", "1h", etc.) | How much data is stored at once in terms of time. | | `unnormalized_cpu` | Boolean | Show process CPU% without normalizing over the number of cores. | | `expanded_on_startup` | Boolean | Expand the default widget upon starting the app. | diff --git a/docs/content/configuration/config-file/processes.md b/docs/content/configuration/config-file/processes.md index 2871e1cd..8a2c57b2 100644 --- a/docs/content/configuration/config-file/processes.md +++ b/docs/content/configuration/config-file/processes.md @@ -7,5 +7,5 @@ You can configure which columns are shown by the process widget by setting the ` ```toml [processes] # Pick which columns you want to use in any order. -columns = ["cpu%", "mem%", "pid", "name", "read", "write", "tread", "twrite", "state", "user", "time"] +columns = ["cpu%", "mem%", "pid", "name", "read", "write", "tread", "twrite", "state", "user", "time", "gmem%", "gpu%"] ``` diff --git a/docs/content/usage/widgets/memory.md b/docs/content/usage/widgets/memory.md index 5059d2bf..4304a3f7 100644 --- a/docs/content/usage/widgets/memory.md +++ b/docs/content/usage/widgets/memory.md @@ -13,7 +13,7 @@ If the total RAM or swap available is 0, then it is automatically hidden from th One can also adjust the displayed time range through either the keyboard or mouse, with a range of 30s to 600s. -This widget can also be configured to display Nvidia GPU memory usage (`--enable_gpu_memory`) or cache memory usage (`--enable_cache_memory`). +This widget can also be configured to display Nvidia GPU memory usage (`--enable_gpu` on Linux/Windows) or cache memory usage (`--enable_cache_memory`). ## Key bindings diff --git a/docs/content/usage/widgets/process.md b/docs/content/usage/widgets/process.md index 2a11e3a7..83139da1 100644 --- a/docs/content/usage/widgets/process.md +++ b/docs/content/usage/widgets/process.md @@ -32,6 +32,12 @@ It can also additionally display the following columns: - Process running time +With the feature flag (`--enable_gpu` on Linux/Windows) and gpu process columns enabled in the configuration: + +- GPU memory use percentage +- GPU core utilization percentage + + See [the processes configuration page](../../configuration/config-file/processes.md) on how to customize which columns are shown. @@ -147,6 +153,9 @@ Note all keywords are case-insensitive. To search for a process/command that col | `user` | `user=root` | Matches by user; supports regex | | `state` | `state=running` | Matches by state; supports regex | | `()` | `( AND ) OR ` | Group together a condition | +| `gmem` | `gmem > 1000 b` | Matches the gpu memory column in terms of bytes; supports comparison operators | +| `gmem%` | `gmem% < 0.5` | Matches the gpu memory column in terms of percent; supports comparison operators| +| `gpu%` | `gpu% > 0` | Matches the gpu usage column in terms of percent; supports comparison operators | #### Comparison operators @@ -207,6 +216,8 @@ Note that key bindings are generally case-sensitive. | ++I++ | Invert the current sort | | ++"%"++ | Toggle between values and percentages for memory usage | | ++t++ , ++f5++ | Toggle tree mode | +| ++M++ | Sort by gpu memory usage, press again to reverse sorting order | +| ++C++ | Sort by gpu usage, press again to reverse sorting order | ### Sort sub-widget diff --git a/docs/content/usage/widgets/temperature.md b/docs/content/usage/widgets/temperature.md index 34d28c98..3cadc9a8 100644 --- a/docs/content/usage/widgets/temperature.md +++ b/docs/content/usage/widgets/temperature.md @@ -10,6 +10,8 @@ The temperature widget provides a table of temperature sensors and their current The temperature widget provides the sensor name as well as its current temperature. +This widget can also be configured to display Nvidia GPU temperatures (`--enable_gpu` on Linux/Windows). + ## Key bindings Note that key bindings are generally case-sensitive. diff --git a/sample_configs/default_config.toml b/sample_configs/default_config.toml index e7b4058e..7d6a9f28 100644 --- a/sample_configs/default_config.toml +++ b/sample_configs/default_config.toml @@ -74,7 +74,7 @@ # Hides advanced options to stop a process on Unix-like systems. #disable_advanced_kill = false # Shows GPU(s) memory -#enable_gpu_memory = false +#enable_gpu = false # Shows cache and buffer memory #enable_cache_memory = false # How much data is stored at once in terms of time. @@ -83,7 +83,7 @@ # These are flags around the process widget. #[processes] -#columns = ["PID", "Name", "CPU%", "Mem%", "R/s", "W/s", "T.Read", "T.Write", "User", "State"] +#columns = ["PID", "Name", "CPU%", "Mem%", "R/s", "W/s", "T.Read", "T.Write", "User", "State", "GMEM%", "GPU%"] # These are all the components that support custom theming. Note that colour support # will depend on terminal support. @@ -103,7 +103,7 @@ #swap_color="LightYellow" # Represents the colour ARC will use in the memory legend and graph. #arc_color="LightCyan" -# Represents the colour the GPU will use in the memory legend and graph. +# Represents the colour the GPU will use in the legend and graph. #gpu_core_colors=["LightGreen", "LightBlue", "LightRed", "Cyan", "Green", "Blue", "Red"] # Represents the colour rx will use in the network legend and graph. #rx_color="LightCyan" diff --git a/src/app.rs b/src/app.rs index 468eeed8..32f0af06 100644 --- a/src/app.rs +++ b/src/app.rs @@ -60,7 +60,7 @@ pub struct AppConfigFields { pub use_old_network_legend: bool, pub table_gap: u16, pub disable_click: bool, - pub enable_gpu_memory: bool, + pub enable_gpu: bool, pub enable_cache_memory: bool, pub show_table_scroll_position: bool, pub is_advanced_kill: bool, @@ -1277,6 +1277,30 @@ impl App { disk.set_index(3); } } + #[cfg(feature = "gpu")] + 'M' => { + if let BottomWidgetType::Proc = self.current_widget.widget_type { + if let Some(proc_widget_state) = self + .states + .proc_state + .get_mut_widget_state(self.current_widget.widget_id) + { + proc_widget_state.select_column(ProcWidgetColumn::GpuMem); + } + } + } + #[cfg(feature = "gpu")] + 'C' => { + if let BottomWidgetType::Proc = self.current_widget.widget_type { + if let Some(proc_widget_state) = self + .states + .proc_state + .get_mut_widget_state(self.current_widget.widget_id) + { + proc_widget_state.select_column(ProcWidgetColumn::GpuUtil); + } + } + } '?' => { self.help_dialog_state.is_showing_help = true; self.is_force_redraw = true; @@ -2702,7 +2726,14 @@ impl App { { if (x >= *tlc_x && y >= *tlc_y) && (x <= *brc_x && y <= *brc_y) { - battery_widget_state.currently_selected_battery_index = itx; + if itx >= self.converted_data.battery_data.len() { + // range check to keep within current data + battery_widget_state.currently_selected_battery_index = + self.converted_data.battery_data.len() - 1; + } else { + battery_widget_state.currently_selected_battery_index = + itx; + } break; } } diff --git a/src/app/data_harvester.rs b/src/app/data_harvester.rs index e4571597..d140e29e 100644 --- a/src/app/data_harvester.rs +++ b/src/app/data_harvester.rs @@ -2,7 +2,7 @@ use std::time::{Duration, Instant}; -#[cfg(target_os = "linux")] +#[cfg(any(target_os = "linux", feature = "gpu"))] use hashbrown::HashMap; #[cfg(feature = "battery")] use starship_battery::{Battery, Manager}; @@ -125,6 +125,11 @@ pub struct DataCollector { #[cfg(target_family = "unix")] user_table: processes::UserTable, + + #[cfg(feature = "gpu")] + gpu_pids: Option>>, + #[cfg(feature = "gpu")] + gpus_total_mem: Option, } impl DataCollector { @@ -153,6 +158,10 @@ impl DataCollector { filters, #[cfg(target_family = "unix")] user_table: Default::default(), + #[cfg(feature = "gpu")] + gpu_pids: None, + #[cfg(feature = "gpu")] + gpus_total_mem: None, } } @@ -288,18 +297,47 @@ impl DataCollector { self.update_cpu_usage(); self.update_memory_usage(); - self.update_processes(); self.update_temps(); + #[cfg(feature = "battery")] + self.update_batteries(); + #[cfg(feature = "gpu")] + self.update_gpus(); // update_gpus before procs for gpu_pids but after temps for appending + self.update_processes(); self.update_network_usage(); self.update_disks(); - #[cfg(feature = "battery")] - self.update_batteries(); - // Update times for future reference. self.last_collection_time = self.data.collection_time; } + #[cfg(feature = "gpu")] + #[inline] + fn update_gpus(&mut self) { + if self.widgets_to_harvest.use_gpu { + #[cfg(feature = "nvidia")] + if let Some(data) = nvidia::get_nvidia_vecs( + &self.temperature_type, + &self.filters.temp_filter, + &self.widgets_to_harvest, + ) { + if let Some(mut temp) = data.temperature { + if let Some(sensors) = &mut self.data.temperature_sensors { + sensors.append(&mut temp); + } else { + self.data.temperature_sensors = Some(temp); + } + } + if let Some(mem) = data.memory { + self.data.gpu = Some(mem); + } + if let Some(proc) = data.procs { + self.gpu_pids = Some(proc.1); + self.gpus_total_mem = Some(proc.0); + } + } + } + } + #[inline] fn update_cpu_usage(&mut self) { if self.widgets_to_harvest.use_cpu { @@ -365,11 +403,6 @@ impl DataCollector { { self.data.arc = memory::arc::get_arc_usage(); } - - #[cfg(feature = "gpu")] - if self.widgets_to_harvest.use_gpu { - self.data.gpu = memory::gpu::get_gpu_mem_usage(); - } } } diff --git a/src/app/data_harvester/cpu.rs b/src/app/data_harvester/cpu.rs index a827b26c..843df161 100644 --- a/src/app/data_harvester/cpu.rs +++ b/src/app/data_harvester/cpu.rs @@ -1,8 +1,4 @@ //! Data collection for CPU usage and load average. -//! -//! For CPU usage, Linux, macOS, and Windows are handled by Heim, FreeBSD by sysinfo. -//! -//! For load average, macOS and Linux are supported through Heim, FreeBSD by sysinfo. pub mod sysinfo; pub use self::sysinfo::*; diff --git a/src/app/data_harvester/memory.rs b/src/app/data_harvester/memory.rs index 2154b00c..dee65d8d 100644 --- a/src/app/data_harvester/memory.rs +++ b/src/app/data_harvester/memory.rs @@ -15,9 +15,6 @@ cfg_if::cfg_if! { } } -#[cfg(feature = "gpu")] -pub mod gpu; - #[cfg(feature = "zfs")] pub mod arc; diff --git a/src/app/data_harvester/memory/gpu.rs b/src/app/data_harvester/memory/gpu.rs deleted file mode 100644 index 6fd66ba3..00000000 --- a/src/app/data_harvester/memory/gpu.rs +++ /dev/null @@ -1,47 +0,0 @@ -use super::MemHarvest; - -/// Return GPU memory usage. -#[cfg(feature = "gpu")] -pub(crate) fn get_gpu_mem_usage() -> Option> { - // As we add more support, expand on this. - - #[cfg(feature = "nvidia")] - get_nvidia_mem_usage() -} - -/// Returns the memory usage of NVIDIA cards. -#[inline] -#[cfg(feature = "nvidia")] -fn get_nvidia_mem_usage() -> Option> { - use crate::data_harvester::nvidia::NVML_DATA; - - if let Ok(nvml) = &*NVML_DATA { - if let Ok(num_gpu) = nvml.device_count() { - let mut results = Vec::with_capacity(num_gpu as usize); - for i in 0..num_gpu { - if let Ok(device) = nvml.device_by_index(i) { - if let (Ok(name), Ok(mem)) = (device.name(), device.memory_info()) { - // add device memory in bytes - results.push(( - name, - MemHarvest { - total_bytes: mem.total, - used_bytes: mem.used, - use_percent: if mem.total == 0 { - None - } else { - Some(mem.used as f64 / mem.total as f64 * 100.0) - }, - }, - )); - } - } - } - Some(results) - } else { - None - } - } else { - None - } -} diff --git a/src/app/data_harvester/nvidia.rs b/src/app/data_harvester/nvidia.rs index 9619da64..19abdb9e 100644 --- a/src/app/data_harvester/nvidia.rs +++ b/src/app/data_harvester/nvidia.rs @@ -1,4 +1,149 @@ +use hashbrown::HashMap; +use nvml_wrapper::enum_wrappers::device::TemperatureSensor; +use nvml_wrapper::enums::device::UsedGpuMemory; use nvml_wrapper::{error::NvmlError, Nvml}; use once_cell::sync::Lazy; +use crate::app::Filter; + +use crate::app::layout_manager::UsedWidgets; +use crate::data_harvester::memory::MemHarvest; +use crate::data_harvester::temperature::{ + convert_temp_unit, is_temp_filtered, TempHarvest, TemperatureType, +}; + pub static NVML_DATA: Lazy> = Lazy::new(Nvml::init); + +pub struct GpusData { + pub memory: Option>, + pub temperature: Option>, + pub procs: Option<(u64, Vec>)>, +} + +/// Returns the GPU data from NVIDIA cards. +#[inline] +pub fn get_nvidia_vecs( + temp_type: &TemperatureType, filter: &Option, widgets_to_harvest: &UsedWidgets, +) -> Option { + if let Ok(nvml) = &*NVML_DATA { + if let Ok(num_gpu) = nvml.device_count() { + let mut temp_vec = Vec::with_capacity(num_gpu as usize); + let mut mem_vec = Vec::with_capacity(num_gpu as usize); + let mut proc_vec = Vec::with_capacity(num_gpu as usize); + let mut total_mem = 0; + for i in 0..num_gpu { + if let Ok(device) = nvml.device_by_index(i) { + if let Ok(name) = device.name() { + if widgets_to_harvest.use_mem { + if let Ok(mem) = device.memory_info() { + mem_vec.push(( + name.clone(), + MemHarvest { + total_bytes: mem.total, + used_bytes: mem.used, + use_percent: if mem.total == 0 { + None + } else { + Some(mem.used as f64 / mem.total as f64 * 100.0) + }, + }, + )); + } + } + if widgets_to_harvest.use_temp && is_temp_filtered(filter, &name) { + if let Ok(temperature) = device.temperature(TemperatureSensor::Gpu) { + let temperature = temperature as f32; + let temperature = convert_temp_unit(temperature, temp_type); + temp_vec.push(TempHarvest { + name: name.clone(), + temperature, + }); + } + } + } + if widgets_to_harvest.use_proc { + let mut procs = HashMap::new(); + if let Ok(gpu_procs) = device.process_utilization_stats(None) { + for proc in gpu_procs { + let pid = proc.pid; + let gpu_util = proc.sm_util + proc.enc_util + proc.dec_util; + procs.insert(pid, (0, gpu_util)); + } + } + if let Ok(compute_procs) = device.running_compute_processes() { + for proc in compute_procs { + let pid = proc.pid; + let gpu_mem = match proc.used_gpu_memory { + UsedGpuMemory::Used(val) => val, + UsedGpuMemory::Unavailable => 0, + }; + if let Some(prev) = procs.get(&pid) { + procs.insert(pid, (gpu_mem, prev.1)); + } else { + procs.insert(pid, (gpu_mem, 0)); + } + } + } + // Use the legacy API too but prefer newer API results + if let Ok(graphics_procs) = device.running_graphics_processes_v2() { + for proc in graphics_procs { + let pid = proc.pid; + let gpu_mem = match proc.used_gpu_memory { + UsedGpuMemory::Used(val) => val, + UsedGpuMemory::Unavailable => 0, + }; + if let Some(prev) = procs.get(&pid) { + procs.insert(pid, (gpu_mem, prev.1)); + } else { + procs.insert(pid, (gpu_mem, 0)); + } + } + } + if let Ok(graphics_procs) = device.running_graphics_processes() { + for proc in graphics_procs { + let pid = proc.pid; + let gpu_mem = match proc.used_gpu_memory { + UsedGpuMemory::Used(val) => val, + UsedGpuMemory::Unavailable => 0, + }; + if let Some(prev) = procs.get(&pid) { + procs.insert(pid, (gpu_mem, prev.1)); + } else { + procs.insert(pid, (gpu_mem, 0)); + } + } + } + if !procs.is_empty() { + proc_vec.push(procs); + } + // running total for proc % + if let Ok(mem) = device.memory_info() { + total_mem += mem.total; + } + } + } + } + Some(GpusData { + memory: if !mem_vec.is_empty() { + Some(mem_vec) + } else { + None + }, + temperature: if !temp_vec.is_empty() { + Some(temp_vec) + } else { + None + }, + procs: if !proc_vec.is_empty() { + Some((total_mem, proc_vec)) + } else { + None + }, + }) + } else { + None + } + } else { + None + } +} diff --git a/src/app/data_harvester/processes.rs b/src/app/data_harvester/processes.rs index 5d14930e..7d042635 100644 --- a/src/app/data_harvester/processes.rs +++ b/src/app/data_harvester/processes.rs @@ -83,6 +83,18 @@ pub struct ProcessHarvest { /// This is the process' user. pub user: Cow<'static, str>, + + /// Gpu memory usage as bytes. + #[cfg(feature = "gpu")] + pub gpu_mem: u64, + + /// Gpu memory usage as percentage. + #[cfg(feature = "gpu")] + pub gpu_mem_percent: f32, + + /// Gpu utilization as a percentage. + #[cfg(feature = "gpu")] + pub gpu_util: u32, // TODO: Additional fields // pub rss_kb: u64, // pub virt_kb: u64, @@ -98,6 +110,12 @@ impl ProcessHarvest { self.total_read_bytes += rhs.total_read_bytes; self.total_write_bytes += rhs.total_write_bytes; self.time += rhs.time; + #[cfg(feature = "gpu")] + { + self.gpu_mem += rhs.gpu_mem; + self.gpu_util += rhs.gpu_util; + self.gpu_mem_percent += rhs.gpu_mem_percent; + } } } diff --git a/src/app/data_harvester/processes/linux.rs b/src/app/data_harvester/processes/linux.rs index 8f50823b..0a1ed539 100644 --- a/src/app/data_harvester/processes/linux.rs +++ b/src/app/data_harvester/processes/linux.rs @@ -250,6 +250,12 @@ fn read_proc( uid, user, time, + #[cfg(feature = "gpu")] + gpu_mem: 0, + #[cfg(feature = "gpu")] + gpu_mem_percent: 0.0, + #[cfg(feature = "gpu")] + gpu_util: 0, }, new_process_times, )) @@ -326,7 +332,8 @@ pub(crate) fn linux_process_data( let pid = process.pid; let prev_proc_details = pid_mapping.entry(pid).or_default(); - if let Ok((process_harvest, new_process_times)) = read_proc( + #[allow(unused_mut)] + if let Ok((mut process_harvest, new_process_times)) = read_proc( prev_proc_details, process, cpu_usage, @@ -336,6 +343,23 @@ pub(crate) fn linux_process_data( total_memory, user_table, ) { + #[cfg(feature = "gpu")] + if let Some(gpus) = &collector.gpu_pids { + gpus.iter().for_each(|gpu| { + // add mem/util for all gpus to pid + if let Some((mem, util)) = gpu.get(&(pid as u32)) { + process_harvest.gpu_mem += mem; + process_harvest.gpu_util += util; + } + }); + if let Some(gpu_total_mem) = &collector.gpus_total_mem { + process_harvest.gpu_mem_percent = (process_harvest.gpu_mem as f64 + / *gpu_total_mem as f64 + * 100.0) + as f32; + } + } + prev_proc_details.cpu_time = new_process_times; prev_proc_details.total_read_bytes = process_harvest.total_read_bytes; prev_proc_details.total_write_bytes = process_harvest.total_write_bytes; diff --git a/src/app/data_harvester/processes/unix/process_ext.rs b/src/app/data_harvester/processes/unix/process_ext.rs index 999a893f..93ef3a94 100644 --- a/src/app/data_harvester/processes/unix/process_ext.rs +++ b/src/app/data_harvester/processes/unix/process_ext.rs @@ -97,6 +97,12 @@ pub(crate) trait UnixProcessExt { }) .unwrap_or_else(|| "N/A".into()), time: Duration::from_secs(process_val.run_time()), + #[cfg(feature = "gpu")] + gpu_mem: 0, + #[cfg(feature = "gpu")] + gpu_mem_percent: 0.0, + #[cfg(feature = "gpu")] + gpu_util: 0, }); } diff --git a/src/app/data_harvester/processes/windows.rs b/src/app/data_harvester/processes/windows.rs index b0b93b16..11c58cf3 100644 --- a/src/app/data_harvester/processes/windows.rs +++ b/src/app/data_harvester/processes/windows.rs @@ -67,6 +67,26 @@ pub fn sysinfo_process_data( let disk_usage = process_val.disk_usage(); let process_state = (process_val.status().to_string(), 'R'); + + #[cfg(feature = "gpu")] + let (gpu_mem, gpu_util, gpu_mem_percent) = { + let mut gpu_mem = 0; + let mut gpu_util = 0; + let mut gpu_mem_percent = 0.0; + if let Some(gpus) = &collector.gpu_pids { + gpus.iter().for_each(|gpu| { + // add mem/util for all gpus to pid + if let Some((mem, util)) = gpu.get(&process_val.pid().as_u32()) { + gpu_mem += mem; + gpu_util += util; + } + }); + } + if let Some(gpu_total_mem) = &collector.gpus_total_mem { + gpu_mem_percent = (gpu_mem as f64 / *gpu_total_mem as f64 * 100.0) as f32; + } + (gpu_mem, gpu_util, gpu_mem_percent) + }; process_vector.push(ProcessHarvest { pid: process_val.pid().as_u32() as _, parent_pid: process_val.parent().map(|p| p.as_u32() as _), @@ -95,6 +115,12 @@ pub fn sysinfo_process_data( } else { Duration::from_secs(process_val.run_time()) }, + #[cfg(feature = "gpu")] + gpu_mem, + #[cfg(feature = "gpu")] + gpu_util, + #[cfg(feature = "gpu")] + gpu_mem_percent, }); } diff --git a/src/app/data_harvester/temperature.rs b/src/app/data_harvester/temperature.rs index ea30f760..8287d585 100644 --- a/src/app/data_harvester/temperature.rs +++ b/src/app/data_harvester/temperature.rs @@ -13,9 +13,6 @@ cfg_if::cfg_if! { } } -#[cfg(feature = "nvidia")] -pub mod nvidia; - use crate::app::Filter; #[derive(Default, Debug, Clone)] @@ -40,7 +37,15 @@ fn convert_celsius_to_fahrenheit(celsius: f32) -> f32 { (celsius * (9.0 / 5.0)) + 32.0 } -fn is_temp_filtered(filter: &Option, text: &str) -> bool { +pub fn convert_temp_unit(temp: f32, temp_type: &TemperatureType) -> f32 { + match temp_type { + TemperatureType::Celsius => temp, + TemperatureType::Kelvin => convert_celsius_to_kelvin(temp), + TemperatureType::Fahrenheit => convert_celsius_to_fahrenheit(temp), + } +} + +pub fn is_temp_filtered(filter: &Option, text: &str) -> bool { if let Some(filter) = filter { let mut ret = filter.is_list_ignored; for r in &filter.list { diff --git a/src/app/data_harvester/temperature/linux.rs b/src/app/data_harvester/temperature/linux.rs index 72ca9eef..5b6fbbb0 100644 --- a/src/app/data_harvester/temperature/linux.rs +++ b/src/app/data_harvester/temperature/linux.rs @@ -9,10 +9,7 @@ use anyhow::Result; use hashbrown::{HashMap, HashSet}; use super::{is_temp_filtered, TempHarvest, TemperatureType}; -use crate::app::{ - data_harvester::temperature::{convert_celsius_to_fahrenheit, convert_celsius_to_kelvin}, - Filter, -}; +use crate::app::{data_harvester::temperature::convert_temp_unit, Filter}; const EMPTY_NAME: &str = "Unknown"; @@ -31,14 +28,6 @@ fn read_temp(path: &Path) -> Result { / 1_000.0) } -fn convert_temp_unit(temp: f32, temp_type: &TemperatureType) -> f32 { - match temp_type { - TemperatureType::Celsius => temp, - TemperatureType::Kelvin => convert_celsius_to_kelvin(temp), - TemperatureType::Fahrenheit => convert_celsius_to_fahrenheit(temp), - } -} - /// Get all candidates from hwmon and coretemp. It will also return the number of entries from hwmon. fn get_hwmon_candidates() -> (HashSet, usize) { let mut dirs = HashSet::default(); @@ -359,11 +348,6 @@ pub fn get_temperature_data( add_thermal_zone_temperatures(&mut results.temperatures, temp_type, filter); } - #[cfg(feature = "nvidia")] - { - super::nvidia::add_nvidia_data(&mut results.temperatures, temp_type, filter)?; - } - Ok(Some(results.temperatures)) } diff --git a/src/app/data_harvester/temperature/nvidia.rs b/src/app/data_harvester/temperature/nvidia.rs deleted file mode 100644 index 00368157..00000000 --- a/src/app/data_harvester/temperature/nvidia.rs +++ /dev/null @@ -1,40 +0,0 @@ -use nvml_wrapper::enum_wrappers::device::TemperatureSensor; - -use super::{ - convert_celsius_to_fahrenheit, convert_celsius_to_kelvin, is_temp_filtered, TempHarvest, - TemperatureType, -}; -use crate::app::Filter; -use crate::data_harvester::nvidia::NVML_DATA; -use crate::utils::error; - -pub fn add_nvidia_data( - temperature_vec: &mut Vec, temp_type: &TemperatureType, filter: &Option, -) -> error::Result<()> { - if let Ok(nvml) = &*NVML_DATA { - if let Ok(gpu_num) = nvml.device_count() { - for i in 0..gpu_num { - if let Ok(device) = nvml.device_by_index(i) { - if let (Ok(name), Ok(temperature)) = - (device.name(), device.temperature(TemperatureSensor::Gpu)) - { - if is_temp_filtered(filter, &name) { - let temperature = temperature as f32; - let temperature = match temp_type { - TemperatureType::Celsius => temperature, - TemperatureType::Kelvin => convert_celsius_to_kelvin(temperature), - TemperatureType::Fahrenheit => { - convert_celsius_to_fahrenheit(temperature) - } - }; - - temperature_vec.push(TempHarvest { name, temperature }); - } - } - } - } - } - } - - Ok(()) -} diff --git a/src/app/data_harvester/temperature/sysinfo.rs b/src/app/data_harvester/temperature/sysinfo.rs index d72901aa..908ff736 100644 --- a/src/app/data_harvester/temperature/sysinfo.rs +++ b/src/app/data_harvester/temperature/sysinfo.rs @@ -33,11 +33,6 @@ pub fn get_temperature_data( } } - #[cfg(feature = "nvidia")] - { - super::nvidia::add_nvidia_data(&mut temperature_vec, temp_type, filter)?; - } - // For RockPro64 boards on FreeBSD, they apparently use "hw.temperature" for sensors. #[cfg(target_os = "freebsd")] { diff --git a/src/app/query.rs b/src/app/query.rs index 97809a0e..7cbabafa 100644 --- a/src/app/query.rs +++ b/src/app/query.rs @@ -127,6 +127,44 @@ pub fn parse_query( Ok(And { lhs, rhs }) } + #[inline] + fn process_prefix_units(query: &mut VecDeque, value: &mut f64) { + // If no unit, assume base. + // + // Furthermore, base must be PEEKED at initially, and will + // require (likely) prefix_type specific checks + // Lastly, if it *is* a unit, remember to POP! + if let Some(potential_unit) = query.front() { + if potential_unit.eq_ignore_ascii_case("tb") { + *value *= TERA_LIMIT_F64; + query.pop_front(); + } else if potential_unit.eq_ignore_ascii_case("tib") { + *value *= TEBI_LIMIT_F64; + query.pop_front(); + } else if potential_unit.eq_ignore_ascii_case("gb") { + *value *= GIGA_LIMIT_F64; + query.pop_front(); + } else if potential_unit.eq_ignore_ascii_case("gib") { + *value *= GIBI_LIMIT_F64; + query.pop_front(); + } else if potential_unit.eq_ignore_ascii_case("mb") { + *value *= MEGA_LIMIT_F64; + query.pop_front(); + } else if potential_unit.eq_ignore_ascii_case("mib") { + *value *= MEBI_LIMIT_F64; + query.pop_front(); + } else if potential_unit.eq_ignore_ascii_case("kb") { + *value *= KILO_LIMIT_F64; + query.pop_front(); + } else if potential_unit.eq_ignore_ascii_case("kib") { + *value *= KIBI_LIMIT_F64; + query.pop_front(); + } else if potential_unit.eq_ignore_ascii_case("b") { + query.pop_front(); + } + } + } + fn process_prefix(query: &mut VecDeque, inside_quotation: bool) -> Result { if let Some(queue_top) = query.pop_front() { if inside_quotation { @@ -389,47 +427,11 @@ pub fn parse_query( | PrefixType::Wps | PrefixType::TRead | PrefixType::TWrite => { - // If no unit, assume base. - // - // Furthermore, base must be PEEKED at initially, and will - // require (likely) prefix_type specific checks - // Lastly, if it *is* a unit, remember to POP! - if let Some(potential_unit) = query.front() { - if potential_unit.eq_ignore_ascii_case("tb") { - value *= TERA_LIMIT_F64; - query.pop_front(); - } else if potential_unit.eq_ignore_ascii_case("tib") - { - value *= TEBI_LIMIT_F64; - query.pop_front(); - } else if potential_unit.eq_ignore_ascii_case("gb") - { - value *= GIGA_LIMIT_F64; - query.pop_front(); - } else if potential_unit.eq_ignore_ascii_case("gib") - { - value *= GIBI_LIMIT_F64; - query.pop_front(); - } else if potential_unit.eq_ignore_ascii_case("mb") - { - value *= MEGA_LIMIT_F64; - query.pop_front(); - } else if potential_unit.eq_ignore_ascii_case("mib") - { - value *= MEBI_LIMIT_F64; - query.pop_front(); - } else if potential_unit.eq_ignore_ascii_case("kb") - { - value *= KILO_LIMIT_F64; - query.pop_front(); - } else if potential_unit.eq_ignore_ascii_case("kib") - { - value *= KIBI_LIMIT_F64; - query.pop_front(); - } else if potential_unit.eq_ignore_ascii_case("b") { - query.pop_front(); - } - } + process_prefix_units(query, &mut value); + } + #[cfg(feature = "gpu")] + PrefixType::GMem => { + process_prefix_units(query, &mut value); } _ => {} } @@ -626,6 +628,12 @@ pub enum PrefixType { State, User, Time, + #[cfg(feature = "gpu")] + PGpu, + #[cfg(feature = "gpu")] + GMem, + #[cfg(feature = "gpu")] + PGMem, __Nonexhaustive, } @@ -637,32 +645,41 @@ impl std::str::FromStr for PrefixType { // TODO: Didn't add mem_bytes, total_read, and total_write // for now as it causes help to be clogged. - let result = if multi_eq_ignore_ascii_case!(s, "cpu" | "cpu%") { - PCpu - } else if multi_eq_ignore_ascii_case!(s, "mem" | "mem%") { - PMem - } else if multi_eq_ignore_ascii_case!(s, "memb") { - MemBytes - } else if multi_eq_ignore_ascii_case!(s, "read" | "r/s" | "rps") { - Rps - } else if multi_eq_ignore_ascii_case!(s, "write" | "w/s" | "wps") { - Wps - } else if multi_eq_ignore_ascii_case!(s, "tread" | "t.read") { - TRead - } else if multi_eq_ignore_ascii_case!(s, "twrite" | "t.write") { - TWrite - } else if multi_eq_ignore_ascii_case!(s, "pid") { - Pid - } else if multi_eq_ignore_ascii_case!(s, "state") { - State - } else if multi_eq_ignore_ascii_case!(s, "user") { - User - } else if multi_eq_ignore_ascii_case!(s, "time") { - Time - } else { - Name - }; + let mut result = Name; + if multi_eq_ignore_ascii_case!(s, "cpu" | "cpu%") { + result = PCpu; + } else if multi_eq_ignore_ascii_case!(s, "mem" | "mem%") { + result = PMem; + } else if multi_eq_ignore_ascii_case!(s, "memb") { + result = MemBytes; + } else if multi_eq_ignore_ascii_case!(s, "read" | "r/s" | "rps") { + result = Rps; + } else if multi_eq_ignore_ascii_case!(s, "write" | "w/s" | "wps") { + result = Wps; + } else if multi_eq_ignore_ascii_case!(s, "tread" | "t.read") { + result = TRead; + } else if multi_eq_ignore_ascii_case!(s, "twrite" | "t.write") { + result = TWrite; + } else if multi_eq_ignore_ascii_case!(s, "pid") { + result = Pid; + } else if multi_eq_ignore_ascii_case!(s, "state") { + result = State; + } else if multi_eq_ignore_ascii_case!(s, "user") { + result = User; + } else if multi_eq_ignore_ascii_case!(s, "time") { + result = Time; + } + #[cfg(feature = "gpu")] + { + if multi_eq_ignore_ascii_case!(s, "gmem") { + result = GMem; + } else if multi_eq_ignore_ascii_case!(s, "gmem%") { + result = PGMem; + } else if multi_eq_ignore_ascii_case!(s, "gpu%") { + result = PGpu; + } + } Ok(result) } } @@ -801,6 +818,24 @@ impl Prefix { process.total_write_bytes as f64, numerical_query.value, ), + #[cfg(feature = "gpu")] + PrefixType::PGpu => matches_condition( + &numerical_query.condition, + process.gpu_util, + numerical_query.value, + ), + #[cfg(feature = "gpu")] + PrefixType::GMem => matches_condition( + &numerical_query.condition, + process.gpu_mem as f64, + numerical_query.value, + ), + #[cfg(feature = "gpu")] + PrefixType::PGMem => matches_condition( + &numerical_query.condition, + process.gpu_mem_percent, + numerical_query.value, + ), _ => true, }, ComparableQuery::Time(time_query) => match prefix_type { diff --git a/src/args.rs b/src/args.rs index dcdc6e98..9d3cccca 100644 --- a/src/args.rs +++ b/src/args.rs @@ -464,10 +464,10 @@ use CPU (3) as the default instead. }, #[cfg(feature = "gpu")] { - Arg::new("enable_gpu_memory") - .long("enable_gpu_memory") + Arg::new("enable_gpu") + .long("enable_gpu") .action(ArgAction::SetTrue) - .help("Enable collecting and displaying GPU memory usage.") + .help("Enable collecting and displaying GPU usage.") }, #[cfg(not(target_os = "windows"))] { diff --git a/src/constants.rs b/src/constants.rs index bf73529a..dff619b9 100644 --- a/src/constants.rs +++ b/src/constants.rs @@ -327,7 +327,7 @@ pub const CPU_HELP_TEXT: [&str; 2] = [ "Mouse scroll Scrolling over an CPU core/average shows only that entry on the chart", ]; -pub const PROCESS_HELP_TEXT: [&str; 15] = [ +pub const PROCESS_HELP_TEXT: [&str; 17] = [ "3 - Process widget", "dd, F9 Kill the selected process", "c Sort by CPU usage, press again to reverse", @@ -343,9 +343,11 @@ pub const PROCESS_HELP_TEXT: [&str; 15] = [ "t, F5 Toggle tree mode", "+, -, click Collapse/expand a branch while in tree mode", "click on header Sorts the entries by that column, click again to invert the sort", + "C Sort by GPU usage, press again to reverse", + "M Sort by GPU memory usage, press again to reverse", ]; -pub const SEARCH_HELP_TEXT: [&str; 48] = [ +pub const SEARCH_HELP_TEXT: [&str; 51] = [ "4 - Process search widget", "Esc Close the search widget (retains the filter)", "Ctrl-a Skip to the start of the search query", @@ -373,6 +375,9 @@ pub const SEARCH_HELP_TEXT: [&str; 48] = [ "twrite, t.write ex: twrite = 1", "user ex: user = root", "state ex: state = running", + "gpu% ex: gpu% < 4.2", + "gmem ex: gmem < 100 kb", + "gmem% ex: gmem% < 4.2", "", "Comparison operators:", "= ex: cpu = 1", @@ -581,8 +586,8 @@ pub const CONFIG_TEXT: &str = r#"# This is a default config file for bottom. Al #network_use_log = false # Hides advanced options to stop a process on Unix-like systems. #disable_advanced_kill = false -# Shows GPU(s) memory -#enable_gpu_memory = false +# Shows GPU(s) information +#enable_gpu = false # Shows cache and buffer memory #enable_cache_memory = false # How much data is stored at once in terms of time. @@ -591,7 +596,7 @@ pub const CONFIG_TEXT: &str = r#"# This is a default config file for bottom. Al # These are flags around the process widget. #[processes] -#columns = ["PID", "Name", "CPU%", "Mem%", "R/s", "W/s", "T.Read", "T.Write", "User", "State"] +#columns = ["PID", "Name", "CPU%", "Mem%", "R/s", "W/s", "T.Read", "T.Write", "User", "State", "GMEM%", "GPU%"] # These are all the components that support custom theming. Note that colour support # will depend on terminal support. @@ -611,7 +616,7 @@ pub const CONFIG_TEXT: &str = r#"# This is a default config file for bottom. Al #swap_color="LightYellow" # Represents the colour ARC will use in the memory legend and graph. #arc_color="LightCyan" -# Represents the colour the GPU will use in the memory legend and graph. +# Represents the colour the GPU will use in the legend and graph. #gpu_core_colors=["LightGreen", "LightBlue", "LightRed", "Cyan", "Green", "Blue", "Red"] # Represents the colour rx will use in the network legend and graph. #rx_color="LightCyan" diff --git a/src/options.rs b/src/options.rs index 8fe1fdd9..031b3cc4 100644 --- a/src/options.rs +++ b/src/options.rs @@ -100,7 +100,7 @@ pub struct ConfigFlags { network_use_bytes: Option, network_use_log: Option, network_use_binary_prefix: Option, - enable_gpu_memory: Option, + enable_gpu: Option, enable_cache_memory: Option, retention: Option, } @@ -278,7 +278,7 @@ pub fn build_app( use_old_network_legend: is_flag_enabled!(use_old_network_legend, matches, config), table_gap: u16::from(!(is_flag_enabled!(hide_table_gap, matches, config))), disable_click: is_flag_enabled!(disable_click, matches, config), - enable_gpu_memory: get_enable_gpu_memory(matches, config), + enable_gpu: get_enable_gpu(matches, config), enable_cache_memory: get_enable_cache_memory(matches, config), show_table_scroll_position: is_flag_enabled!(show_table_scroll_position, matches, config), is_advanced_kill, @@ -433,7 +433,7 @@ pub fn build_app( use_cpu: used_widget_set.get(&Cpu).is_some() || used_widget_set.get(&BasicCpu).is_some(), use_mem, use_cache: use_mem && get_enable_cache_memory(matches, config), - use_gpu: use_mem && get_enable_gpu_memory(matches, config), + use_gpu: use_mem && get_enable_gpu(matches, config), use_net: used_widget_set.get(&Net).is_some() || used_widget_set.get(&BasicNet).is_some(), use_proc: used_widget_set.get(&Proc).is_some(), use_disk: used_widget_set.get(&Disk).is_some(), @@ -742,14 +742,6 @@ fn get_default_widget_and_count( fn get_use_battery(matches: &ArgMatches, config: &Config) -> bool { #[cfg(feature = "battery")] { - if let Ok(battery_manager) = Manager::new() { - if let Ok(batteries) = battery_manager.batteries() { - if batteries.count() == 0 { - return false; - } - } - } - if matches.get_flag("battery") { return true; } else if let Some(flags) = &config.flags { @@ -757,20 +749,28 @@ fn get_use_battery(matches: &ArgMatches, config: &Config) -> bool { return battery; } } + + if let Ok(battery_manager) = Manager::new() { + if let Ok(batteries) = battery_manager.batteries() { + if batteries.count() == 0 { + return false; + } + } + } } false } #[allow(unused_variables)] -fn get_enable_gpu_memory(matches: &ArgMatches, config: &Config) -> bool { +fn get_enable_gpu(matches: &ArgMatches, config: &Config) -> bool { #[cfg(feature = "gpu")] { - if matches.get_flag("enable_gpu_memory") { + if matches.get_flag("enable_gpu") { return true; } else if let Some(flags) = &config.flags { - if let Some(enable_gpu_memory) = flags.enable_gpu_memory { - return enable_gpu_memory; + if let Some(enable_gpu) = flags.enable_gpu { + return enable_gpu; } } } diff --git a/src/widgets/process_table.rs b/src/widgets/process_table.rs index 14d4bcda..5aa03c9a 100644 --- a/src/widgets/process_table.rs +++ b/src/widgets/process_table.rs @@ -91,6 +91,12 @@ fn make_column(column: ProcColumn) -> SortColumn { User => SortColumn::soft(User, Some(0.05)), State => SortColumn::hard(State, 7), Time => SortColumn::new(Time), + #[cfg(feature = "gpu")] + GpuMem => SortColumn::new(GpuMem).default_descending(), + #[cfg(feature = "gpu")] + GpuMemPercent => SortColumn::new(GpuMemPercent).default_descending(), + #[cfg(feature = "gpu")] + GpuUtilPercent => SortColumn::new(GpuUtilPercent).default_descending(), } } @@ -117,6 +123,10 @@ pub enum ProcWidgetColumn { User, State, Time, + #[cfg(feature = "gpu")] + GpuMem, + #[cfg(feature = "gpu")] + GpuUtil, } impl<'de> Deserialize<'de> for ProcWidgetColumn { @@ -140,6 +150,10 @@ impl<'de> Deserialize<'de> for ProcWidgetColumn { "state" => Ok(ProcWidgetColumn::State), "user" => Ok(ProcWidgetColumn::User), "time" => Ok(ProcWidgetColumn::Time), + #[cfg(feature = "gpu")] + "gmem" | "gmem%" => Ok(ProcWidgetColumn::GpuMem), + #[cfg(feature = "gpu")] + "gpu%" => Ok(ProcWidgetColumn::GpuUtil), _ => Err(Error::custom("doesn't match any column type")), } } @@ -277,6 +291,16 @@ impl ProcWidgetState { ProcWidgetColumn::User => User, ProcWidgetColumn::State => State, ProcWidgetColumn::Time => Time, + #[cfg(feature = "gpu")] + ProcWidgetColumn::GpuMem => { + if mem_vals { + GpuMem + } else { + GpuMemPercent + } + } + #[cfg(feature = "gpu")] + ProcWidgetColumn::GpuUtil => GpuUtilPercent, }; make_column(col) @@ -318,6 +342,10 @@ impl ProcWidgetState { State => ProcWidgetColumn::State, User => ProcWidgetColumn::User, Time => ProcWidgetColumn::Time, + #[cfg(feature = "gpu")] + GpuMem | GpuMemPercent => ProcWidgetColumn::GpuMem, + #[cfg(feature = "gpu")] + GpuUtilPercent => ProcWidgetColumn::GpuUtil, } }) .collect::>(); @@ -743,6 +771,23 @@ impl ProcWidgetState { _ => unreachable!(), } + self.sort_table.set_data(self.column_text()); + self.force_data_update(); + } + } + #[cfg(feature = "gpu")] + if let Some(index) = self.column_mapping.get_index_of(&ProcWidgetColumn::GpuMem) { + if let Some(mem) = self.get_mut_proc_col(index) { + match mem { + ProcColumn::GpuMem => { + *mem = ProcColumn::GpuMemPercent; + } + ProcColumn::GpuMemPercent => { + *mem = ProcColumn::GpuMem; + } + _ => unreachable!(), + } + self.sort_table.set_data(self.column_text()); self.force_data_update(); } @@ -1029,6 +1074,10 @@ mod test { num_similar: 0, disabled: false, time: Duration::from_secs(0), + #[cfg(feature = "gpu")] + gpu_mem_usage: MemUsage::Percent(1.1), + #[cfg(feature = "gpu")] + gpu_usage: 0, }; let b = ProcWidgetData { diff --git a/src/widgets/process_table/proc_widget_column.rs b/src/widgets/process_table/proc_widget_column.rs index 91d7277d..ca32b0e3 100644 --- a/src/widgets/process_table/proc_widget_column.rs +++ b/src/widgets/process_table/proc_widget_column.rs @@ -24,6 +24,12 @@ pub enum ProcColumn { State, User, Time, + #[cfg(feature = "gpu")] + GpuMem, + #[cfg(feature = "gpu")] + GpuMemPercent, + #[cfg(feature = "gpu")] + GpuUtilPercent, } impl<'de> Deserialize<'de> for ProcColumn { @@ -47,6 +53,12 @@ impl<'de> Deserialize<'de> for ProcColumn { "state" => Ok(ProcColumn::State), "user" => Ok(ProcColumn::User), "time" => Ok(ProcColumn::Time), + #[cfg(feature = "gpu")] + "gmem" => Ok(ProcColumn::GpuMem), + #[cfg(feature = "gpu")] + "gmem%" => Ok(ProcColumn::GpuMemPercent), + #[cfg(feature = "gpu")] + "gpu%" => Ok(ProcColumn::GpuUtilPercent), _ => Err(Error::custom("doesn't match any column type")), } } @@ -78,6 +90,12 @@ impl ColumnHeader for ProcColumn { ProcColumn::State => "State", ProcColumn::User => "User", ProcColumn::Time => "Time", + #[cfg(feature = "gpu")] + ProcColumn::GpuMem => "GMEM", + #[cfg(feature = "gpu")] + ProcColumn::GpuMemPercent => "GMEM%", + #[cfg(feature = "gpu")] + ProcColumn::GpuUtilPercent => "GPU%", } .into() } @@ -98,6 +116,12 @@ impl ColumnHeader for ProcColumn { ProcColumn::State => "State", ProcColumn::User => "User", ProcColumn::Time => "Time", + #[cfg(feature = "gpu")] + ProcColumn::GpuMem => "GMEM", + #[cfg(feature = "gpu")] + ProcColumn::GpuMemPercent => "GMEM%", + #[cfg(feature = "gpu")] + ProcColumn::GpuUtilPercent => "GPU%", } .into() } @@ -158,6 +182,16 @@ impl SortsRow for ProcColumn { ProcColumn::Time => { data.sort_by(|a, b| sort_partial_fn(descending)(a.time, b.time)); } + #[cfg(feature = "gpu")] + ProcColumn::GpuMem | ProcColumn::GpuMemPercent => { + data.sort_by(|a, b| { + sort_partial_fn(descending)(&a.gpu_mem_usage, &b.gpu_mem_usage) + }); + } + #[cfg(feature = "gpu")] + ProcColumn::GpuUtilPercent => { + data.sort_by(|a, b| sort_partial_fn(descending)(a.gpu_usage, b.gpu_usage)); + } } } } diff --git a/src/widgets/process_table/proc_widget_data.rs b/src/widgets/process_table/proc_widget_data.rs index 418d9997..e234d8ec 100644 --- a/src/widgets/process_table/proc_widget_data.rs +++ b/src/widgets/process_table/proc_widget_data.rs @@ -181,6 +181,10 @@ pub struct ProcWidgetData { pub num_similar: u64, pub disabled: bool, pub time: Duration, + #[cfg(feature = "gpu")] + pub gpu_mem_usage: MemUsage, + #[cfg(feature = "gpu")] + pub gpu_usage: u32, } impl ProcWidgetData { @@ -216,6 +220,14 @@ impl ProcWidgetData { num_similar: 1, disabled: false, time: process.time, + #[cfg(feature = "gpu")] + gpu_mem_usage: if is_mem_percent { + MemUsage::Percent(process.gpu_mem_percent) + } else { + MemUsage::Bytes(process.gpu_mem) + }, + #[cfg(feature = "gpu")] + gpu_usage: process.gpu_util, } } @@ -248,6 +260,18 @@ impl ProcWidgetData { self.wps += other.wps; self.total_read += other.total_read; self.total_write += other.total_write; + #[cfg(feature = "gpu")] + { + self.gpu_mem_usage = match (&self.gpu_mem_usage, &other.gpu_mem_usage) { + (MemUsage::Percent(a), MemUsage::Percent(b)) => MemUsage::Percent(a + b), + (MemUsage::Bytes(a), MemUsage::Bytes(b)) => MemUsage::Bytes(a + b), + (MemUsage::Percent(_), MemUsage::Bytes(_)) + | (MemUsage::Bytes(_), MemUsage::Percent(_)) => { + unreachable!("trying to add together two different memory usage types!") + } + }; + self.gpu_usage += other.gpu_usage; + } } fn to_string(&self, column: &ProcColumn) -> String { @@ -264,6 +288,10 @@ impl ProcWidgetData { ProcColumn::State => self.process_char.to_string(), ProcColumn::User => self.user.clone(), ProcColumn::Time => format_time(self.time), + #[cfg(feature = "gpu")] + ProcColumn::GpuMem | ProcColumn::GpuMemPercent => self.gpu_mem_usage.to_string(), + #[cfg(feature = "gpu")] + ProcColumn::GpuUtilPercent => format!("{:.1}%", self.gpu_usage), } } } @@ -298,6 +326,10 @@ impl DataToCell for ProcWidgetData { } ProcColumn::User => self.user.clone(), ProcColumn::Time => format_time(self.time), + #[cfg(feature = "gpu")] + ProcColumn::GpuMem | ProcColumn::GpuMemPercent => self.gpu_mem_usage.to_string(), + #[cfg(feature = "gpu")] + ProcColumn::GpuUtilPercent => format!("{:.1}%", self.gpu_usage), }, calculated_width, )) diff --git a/tests/arg_tests.rs b/tests/arg_tests.rs index c233c14e..3bb05e95 100644 --- a/tests/arg_tests.rs +++ b/tests/arg_tests.rs @@ -178,10 +178,10 @@ fn test_battery_flag() { #[cfg_attr(feature = "gpu", ignore)] fn test_gpu_flag() { btm_command() - .arg("--enable_gpu_memory") + .arg("--enable_gpu") .assert() .failure() .stderr(predicate::str::contains( - "unexpected argument '--enable_gpu_memory' found", + "unexpected argument '--enable_gpu' found", )); }