diff --git a/README.md b/README.md index 0ea2bc0c..0e12a89a 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,7 @@ [![codecov](https://codecov.io/gh/con/duct/graph/badge.svg?token=JrPazw0Vn4)](https://codecov.io/gh/con/duct) [![PyPI version](https://badge.fury.io/py/con-duct.svg)](https://badge.fury.io/py/con-duct) -## Installation +## Installation pip install con-duct @@ -54,15 +54,15 @@ options: Output template to use when printing the summary following execution. (default: Exit Code: {exit_code} Command: {command} Log files location: {logs_prefix} - Wall Clock Time: {wall_clock_time} sec Memory Peak - Usage (RSS): {peak_rss} Memory Average Usage (RSS): - {average_rss} Virtual Memory Peak Usage (VSZ): - {peak_vsz} Virtual Memory Average Usage (VSZ): - {average_vsz} Memory Peak Percentage: {peak_pmem} - Memory Average Percentage: {average_pmem} CPU Peak - Usage: {peak_pcpu} Average CPU Usage: {average_pcpu} - Samples Collected: {num_samples} Reports Written: - {num_reports} ) + Wall Clock Time: {wall_clock_time:.3f} sec Memory Peak + Usage (RSS): {peak_rss} bytes Memory Average Usage + (RSS): {average_rss} bytes Virtual Memory Peak Usage + (VSZ): {peak_vsz} bytes Virtual Memory Average Usage + (VSZ): {average_vsz} bytes Memory Peak Percentage: + {peak_pmem} Memory Average Percentage: {average_pmem} + CPU Peak Usage: {peak_pcpu} Average CPU Usage: + {average_pcpu} Samples Collected: {num_samples} + Reports Written: {num_reports} ) --clobber Replace log files if they already exist. (default: False) -q, --quiet Suppress duct output. (default: False) diff --git a/src/con_duct/__main__.py b/src/con_duct/__main__.py index b0931b9a..56e3b312 100644 --- a/src/con_duct/__main__.py +++ b/src/con_duct/__main__.py @@ -27,11 +27,11 @@ "Exit Code: {exit_code}\n" "Command: {command}\n" "Log files location: {logs_prefix}\n" - "Wall Clock Time: {wall_clock_time} sec\n" - "Memory Peak Usage (RSS): {peak_rss}\n" - "Memory Average Usage (RSS): {average_rss}\n" - "Virtual Memory Peak Usage (VSZ): {peak_vsz}\n" - "Virtual Memory Average Usage (VSZ): {average_vsz}\n" + "Wall Clock Time: {wall_clock_time:.3f} sec\n" + "Memory Peak Usage (RSS): {peak_rss} bytes\n" + "Memory Average Usage (RSS): {average_rss} bytes\n" + "Virtual Memory Peak Usage (VSZ): {peak_vsz} bytes\n" + "Virtual Memory Average Usage (VSZ): {average_vsz} bytes\n" "Memory Peak Percentage: {peak_pmem}\n" "Memory Average Percentage: {average_pmem}\n" "CPU Peak Usage: {peak_pcpu}\n" @@ -41,6 +41,11 @@ ) +def assert_num(*values: Any) -> None: + for value in values: + assert isinstance(value, (float, int)) + + class Outputs(str, Enum): ALL = "all" NONE = "none" @@ -83,8 +88,8 @@ class SystemInfo: class ProcessStats: pcpu: float # %CPU pmem: float # %MEM - rss: int # Memory Resident Set Size in KiB - vsz: int # Virtual Memory size in KiB + rss: int # Memory Resident Set Size in Bytes + vsz: int # Virtual Memory size in Bytes timestamp: str def max(self, other: ProcessStats) -> ProcessStats: @@ -96,6 +101,12 @@ def max(self, other: ProcessStats) -> ProcessStats: timestamp=max(self.timestamp, other.timestamp), ) + def __post_init__(self) -> None: + self._validate() + + def _validate(self) -> None: + assert_num(self.pcpu, self.pmem, self.rss, self.vsz) + @dataclass class LogPaths: @@ -158,6 +169,9 @@ class Averages: num_samples: int = 0 def update(self: Averages, other: Sample) -> None: + assert_num( + other.total_rss, other.total_vsz, other.total_pmem, other.total_pcpu + ) self.num_samples += 1 self.rss += (other.total_rss - self.rss) / self.num_samples self.vsz += (other.total_vsz - self.vsz) / self.num_samples @@ -166,6 +180,9 @@ def update(self: Averages, other: Sample) -> None: @classmethod def from_sample(cls, sample: Sample) -> Averages: + assert_num( + sample.total_rss, sample.total_vsz, sample.total_pmem, sample.total_pcpu + ) return cls( rss=sample.total_rss, vsz=sample.total_vsz, @@ -217,8 +234,8 @@ def for_json(self) -> dict[str, Any]: "totals": { # total of all processes during this sample "pmem": self.total_pmem, "pcpu": self.total_pcpu, - "rss_kb": self.total_rss, - "vsz_kb": self.total_vsz, + "rss": self.total_rss, + "vsz": self.total_vsz, }, "averages": asdict(self.averages) if self.averages.num_samples >= 1 else {}, } @@ -313,14 +330,14 @@ def collect_sample(self) -> Sample: ) for line in output.splitlines()[1:]: if line: - pid, pcpu, pmem, rss, vsz, etime, cmd = line.split(maxsplit=6) + pid, pcpu, pmem, rss_kib, vsz_kib, etime, cmd = line.split(maxsplit=6) sample.add_pid( int(pid), ProcessStats( pcpu=float(pcpu), pmem=float(pmem), - rss=int(rss), - vsz=int(vsz), + rss=int(rss_kib) * 1024, + vsz=int(vsz_kib) * 1024, timestamp=datetime.now().astimezone().isoformat(), ), ) @@ -345,46 +362,30 @@ def execution_summary(self) -> dict[str, Any]: "exit_code": self.process.returncode, "command": self.command, "logs_prefix": self.log_paths.prefix, - "wall_clock_time": f"{self.elapsed_time:.3f} sec", + "wall_clock_time": self.elapsed_time, "peak_rss": ( - f"{self.max_values.total_rss} KiB" - if self.max_values.stats - else "unknown" + self.max_values.total_rss if self.max_values.stats else "unknown" ), "average_rss": ( - f"{self.averages.rss:.3f} KiB" - if self.averages.num_samples >= 1 - else "unknown" + self.averages.rss if self.averages.num_samples >= 1 else "unknown" ), "peak_vsz": ( - f"{self.max_values.total_vsz} KiB" - if self.max_values.stats - else "unknown" + self.max_values.total_vsz if self.max_values.stats else "unknown" ), "average_vsz": ( - f"{self.averages.vsz:.3f} KiB" - if self.averages.num_samples >= 1 - else "unknown" + self.averages.vsz if self.averages.num_samples >= 1 else "unknown" ), "peak_pmem": ( - f"{self.max_values.total_pmem}%" - if self.max_values.stats - else "unknown%" + self.max_values.total_pmem if self.max_values.stats else "unknown" ), "average_pmem": ( - f"{self.averages.pmem:.3f}%" - if self.averages.num_samples >= 1 - else "unknown%" + self.averages.pmem if self.averages.num_samples >= 1 else "unknown" ), "peak_pcpu": ( - f"{self.max_values.total_pcpu}%" - if self.max_values.stats - else "unknown%" + self.max_values.total_pcpu if self.max_values.stats else "unknown" ), "average_pcpu": ( - f"{self.averages.pcpu:.3f}%" - if self.averages.num_samples >= 1 - else "unknown%" + self.averages.pcpu if self.averages.num_samples >= 1 else "unknown" ), "num_samples": self.averages.num_samples, "num_reports": self.number, diff --git a/test/test_report.py b/test/test_report.py index 94d4b83d..0132f2c7 100644 --- a/test/test_report.py +++ b/test/test_report.py @@ -1,4 +1,6 @@ from __future__ import annotations +from datetime import datetime +import pytest from con_duct.__main__ import Averages, ProcessStats, Sample stat0 = ProcessStats( @@ -89,3 +91,46 @@ def test_averages_three_samples() -> None: averages.update(sample2) averages.update(sample2) assert averages.pcpu == (stat0.pcpu + (2 * stat1.pcpu)) / 3 + + +@pytest.mark.parametrize( + "pcpu, pmem, rss, vsz", + [ + (1.0, 1.1, 1024, 1025), + (0.5, 0.7, 20.48, 40.96), + (1, 2, 3, 4), + (0, 0.0, 0, 0.0), + (2.5, 3.5, 8192, 16384), + (100.0, 99.9, 65536, 131072), + ] +) +def test_process_stats_green(pcpu: float, pmem: float, rss: int, vsz: int) -> None: + # Assert does not raise + ProcessStats( + pcpu=pcpu, + pmem=pmem, + rss=rss, + vsz=vsz, + timestamp=datetime.now().astimezone().isoformat(), + ) + + +@pytest.mark.parametrize( + "pcpu, pmem, rss, vsz", + [ + ("only", 1.1, 1024, 1025), + (0.5, "takes", 20.48, 40.96), + (1, 2, "one", 4), + (1, 2, 3, "value"), + ("2", "fail", "or", "more"), + ] +) +def test_process_stats_red(pcpu: float, pmem: float, rss: int, vsz: int) -> None: + with pytest.raises(AssertionError): + ProcessStats( + pcpu=pcpu, + pmem=pmem, + rss=rss, + vsz=vsz, + timestamp=datetime.now().astimezone().isoformat(), + ) diff --git a/test/test_validation.py b/test/test_validation.py index 00c28abd..f6ce7c21 100644 --- a/test/test_validation.py +++ b/test/test_validation.py @@ -1,6 +1,6 @@ import argparse import pytest -from con_duct.__main__ import Arguments, Outputs, RecordTypes +from con_duct.__main__ import Arguments, Outputs, RecordTypes, assert_num def test_sample_less_than_report_interval() -> None: @@ -52,3 +52,14 @@ def test_sample_equal_greater_than_report_interval() -> None: summary_format="", quiet=False, ) + + +@pytest.mark.parametrize("input_value", [0, 1, 2, -1, 100, 0.001, -1.68]) +def test_assert_num_green(input_value: int) -> None: + assert_num(input_value) + + +@pytest.mark.parametrize("input_value", ["hi", "0", "one"]) +def test_assert_num_red(input_value: int) -> None: + with pytest.raises(AssertionError): + assert_num(input_value)