From 832602c5b6a071ede3e7ad4df2fc3a3f5de4b423 Mon Sep 17 00:00:00 2001 From: EdmundGoodman Date: Sat, 13 Apr 2024 20:02:02 +0100 Subject: [PATCH] Add optional flag to specify output directory, and update README and TODO --- README.md | 33 ++++++++++++++++++++---- TODO.md | 8 +++--- src/hpc_multibench/main.py | 11 +++++++- src/hpc_multibench/test_bench.py | 5 ++-- src/hpc_multibench/test_plan.py | 5 +++- src/hpc_multibench/tui/interactive_ui.py | 4 ++- 6 files changed, 52 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 3ccf6db..13d0c80 100644 --- a/README.md +++ b/README.md @@ -12,29 +12,52 @@ command line. ### Interactively reviewing sample results -First, populate the `results/` directory with the run outputs to review. For -example for the `parallelism` test plan in the `hpccg-rs-kudu-results` submodule: +Using the `parallelism` test plan in the `hpccg-rs-kudu-results` submodule as +an example, we can interactively view the data as follows: ```bash -rm -rf results && mkdir results/ -rsync -av generated_results/hpccg-rs-kudu-results/ results/ --exclude=.git/ -poetry run python3 -m hpc_multibench -y results/_test_plans/parallelism.yaml interactive +poetry run python3 -m hpc_multibench \ + -y generated_results/hpccg-rs-kudu-results/_test_plans/parallelism.yaml \ + -o generated_results/hpccg-rs-kudu-results/ \ + interactive ``` This will open a terminal-user interface allowing interactive visualisation of results. This is rendered inside the terminal, and as such does not require X-forwarding to be set up to present data and plot graphs. +We can see the required `-y` flag being used to select the YAML file for the +test plan, and the option `-o` flag to point to the directory containing the +sample data. The `interactive` subcommand then runs the program in interactice +mode. + ### Dispatching runs +On a system with Slurm installed, runs can be dispatched as follows: + ```bash +poetry run python3 -m hpc_multibench \ + -y generated_results/hpccg-rs-kudu-results/_test_plans/parallelism.yaml \ + record ``` +Since the `-o` flag is not specified here, it will default to writing out the +files to a directory called `results/` at the root of the repository. + ### Reviewing runs non-interactively +Run results can also be viewed non-interactively as follows: + ```bash +poetry run python3 -m hpc_multibench \ + -y generated_results/hpccg-rs-kudu-results/_test_plans/parallelism.yaml \ + -o generated_results/hpccg-rs-kudu-results/ \ + report ``` +This will open a sequence of matplotlib windows and write out any export data +as specified within the YAML file. + ## System requirements Due to the libraries for parsing the YAML schema, Python >=3.10 is required. diff --git a/TODO.md b/TODO.md index 4cbf4db..77299f6 100644 --- a/TODO.md +++ b/TODO.md @@ -7,8 +7,8 @@ - [x] Results analysis - [x] Result aggregation - [x] Result analysis - - [ ] Statistical re-runs - - [ ] YAML schema + - [x] Statistical re-runs + - [x] YAML schema - [x] Multiple nested arguments - [x] Pairwise arguments - [ ] Default groups for executables @@ -25,12 +25,12 @@ - Spawn runs for benches/individual executables - View results/analysis of runs - Modify YAML via inbuilt editor? - - [ ] Final plan + - [x] Final plan - [x] Tree-style bench hierarchy explorer - (is this useful? perhaps better as a list?) - [x] Listing argument variations for test bench - [x] Listing sbatch file contents for run configuration - [x] metric table for parent test bench (even when run config selected), requires improving analysis code - [x] metric plotting --"-- - - [ ] run configuration/bench button to send off jobs to slurm + - [x] run configuration/bench button to send off jobs to slurm - [x] modal dialog to wait on slurm queue (could be an async function?) diff --git a/src/hpc_multibench/main.py b/src/hpc_multibench/main.py index 9ace7c6..f9393de 100755 --- a/src/hpc_multibench/main.py +++ b/src/hpc_multibench/main.py @@ -8,6 +8,8 @@ from hpc_multibench.test_plan import TestPlan from hpc_multibench.tui.interactive_ui import UserInterface +DEFAULT_BASE_OUTPUTS_DIRECTORY = Path("results/") + def get_parser() -> ArgumentParser: # pragma: no cover """Get the argument parser for the tool.""" @@ -23,6 +25,13 @@ def get_parser() -> ArgumentParser: # pragma: no cover type=Path, help="the path to the configuration YAML file", ) + parser.add_argument( + "-o", + "--outputs-directory", + type=Path, + default=DEFAULT_BASE_OUTPUTS_DIRECTORY, + help="the path to the configuration YAML file", + ) sub_parsers = parser.add_subparsers(dest="command", required=True) parser_record = sub_parsers.add_parser( @@ -60,7 +69,7 @@ def get_parser() -> ArgumentParser: # pragma: no cover def main() -> None: # pragma: no cover """Run the tool.""" args = get_parser().parse_args() - test_plan = TestPlan(args.yaml_path) + test_plan = TestPlan(args.yaml_path, args.outputs_directory) if args.command == "record": test_plan.record_all(args) diff --git a/src/hpc_multibench/test_bench.py b/src/hpc_multibench/test_bench.py index 83f9cb7..ec94d61 100755 --- a/src/hpc_multibench/test_bench.py +++ b/src/hpc_multibench/test_bench.py @@ -28,7 +28,6 @@ from hpc_multibench.uncertainties import UFloat, ufloat from hpc_multibench.yaml_model import BenchModel, RunConfigurationModel -BASE_OUTPUT_DIRECTORY = Path("results/") DRY_RUN_SEPARATOR = "\n\n++++++++++\n\n\n" @@ -74,11 +73,13 @@ def __init__( name: str, run_configuration_models: dict[str, RunConfigurationModel], bench_model: BenchModel, + base_output_directory: Path, ) -> None: """Instantiate the test bench.""" self.name = name self.run_configuration_models = run_configuration_models self.bench_model = bench_model + self.base_output_directory = base_output_directory # Validate that all configurations named in the test bench are defined # in the test plan @@ -92,7 +93,7 @@ def __init__( @property def output_directory(self) -> Path: """Get the output directory for the test bench.""" - return BASE_OUTPUT_DIRECTORY / self.name + return self.base_output_directory / self.name @property def instantiations(self) -> list[dict[str, Any]]: diff --git a/src/hpc_multibench/test_plan.py b/src/hpc_multibench/test_plan.py index 7e34f4e..474d402 100755 --- a/src/hpc_multibench/test_plan.py +++ b/src/hpc_multibench/test_plan.py @@ -13,9 +13,11 @@ class TestPlan: """The test plan defined from YAML for a tool run.""" - def __init__(self, yaml_path: Path) -> None: + def __init__(self, yaml_path: Path, base_output_directory: Path) -> None: """Instantiate the test plan from a YAML file.""" self.yaml_path = yaml_path + self.base_output_directory = base_output_directory + test_plan_model = TestPlanModel.from_yaml(yaml_path) self.benches = [ TestBench( @@ -26,6 +28,7 @@ def __init__(self, yaml_path: Path) -> None: if name in bench_model.run_configurations }, bench_model=bench_model, + base_output_directory=base_output_directory, ) for bench_name, bench_model in test_plan_model.benches.items() ] diff --git a/src/hpc_multibench/tui/interactive_ui.py b/src/hpc_multibench/tui/interactive_ui.py index 3457448..d591f21 100755 --- a/src/hpc_multibench/tui/interactive_ui.py +++ b/src/hpc_multibench/tui/interactive_ui.py @@ -418,7 +418,9 @@ def get_plot_model( def action_reload_test_plan(self) -> None: """Reload the test plan for the user interface.""" - self.test_plan = TestPlan(self.test_plan.yaml_path) + self.test_plan = TestPlan( + self.test_plan.yaml_path, self.test_plan.base_output_directory + ) self.initialise_test_plan_tree() self.update_all_tabs()