diff --git a/rebench/configurator.py b/rebench/configurator.py index 36ac7a1e..4c1bf920 100644 --- a/rebench/configurator.py +++ b/rebench/configurator.py @@ -26,6 +26,7 @@ from .configuration_error import ConfigurationError from .model.experiment import Experiment from .model.exp_run_details import ExpRunDetails +from .model.exp_variables import ExpVariables from .model.reporting import Reporting from .model.executor import Executor from .output import UIError @@ -181,13 +182,15 @@ def validate_gauge_adapters(raw_config): class Configurator(object): def __init__(self, raw_config, data_store, ui, cli_options=None, cli_reporter=None, - exp_name=None, data_file=None, build_log=None, run_filter=None): + exp_name=None, data_file=None, build_log=None, run_filter=None, machine=None): self._raw_config_for_debugging = raw_config # kept around for debugging only self.build_log = build_log or raw_config.get('build_log', 'build.log') self.data_file = data_file or raw_config.get('default_data_file', 'rebench.data') self._exp_name = exp_name or raw_config.get('default_experiment', 'all') self.artifact_review = raw_config.get('artifact_review', False) + self.machine = machine + self.machines = raw_config.get('machines', {}) self.config_dir = raw_config.get('__dir__', None) self.config_file = raw_config.get('__file__', None) @@ -201,9 +204,19 @@ def __init__(self, raw_config, data_store, ui, cli_options=None, cli_reporter=No invocations = 1 iterations = 1 - self._root_run_details = ExpRunDetails.compile( - raw_config.get('runs', {}), ExpRunDetails.default( - invocations, iterations)) + raw_machine_config = raw_config.get('machines', {}) + if machine and machine not in raw_machine_config: + raise ValueError( + ("The machine configuration '%s' was selected " + + "but not found under the 'machines:' key.") % machine) + + self.base_run_details = self._assemble_base_run_details( + raw_machine_config.get(machine, {}), + raw_config.get('runs', {}), invocations, iterations) + + self.base_variables = ExpVariables.compile( + raw_machine_config.get(machine, {}), ExpVariables.empty()) + self._root_reporting = Reporting.compile( raw_config.get('reporting', {}), Reporting.empty(cli_reporter), cli_options, ui) @@ -235,6 +248,13 @@ def __init__(self, raw_config, data_store, ui, cli_options=None, cli_reporter=No experiments = raw_config.get('experiments', {}) self._experiments = self._compile_experiments(experiments) + def _assemble_base_run_details(self, machine_raw, run_config, invocations, iterations): + machine_config = ExpRunDetails.compile( + machine_raw, ExpRunDetails.default(invocations, iterations)) + + return ExpRunDetails.compile( + run_config, machine_config) + @property def use_rebench_db(self): report_results = self.options is None or self.options.use_data_reporting @@ -290,10 +310,6 @@ def experiment_name(self): def reporting(self): return self._root_reporting - @property - def run_details(self): - return self._root_run_details - def has_executor(self, executor_name): return executor_name in self._executors diff --git a/rebench/model/experiment.py b/rebench/model/experiment.py index fda66dc8..013cae6a 100644 --- a/rebench/model/experiment.py +++ b/rebench/model/experiment.py @@ -40,8 +40,8 @@ def compile(cls, name, exp, configurator): reporting = Reporting.compile(exp.get('reporting', {}), configurator.reporting, configurator.options, configurator.ui) - run_details = ExpRunDetails.compile(exp, configurator.run_details) - variables = ExpVariables.compile(exp, ExpVariables.empty()) + run_details = ExpRunDetails.compile(exp, configurator.base_run_details) + variables = ExpVariables.compile(exp, configurator.base_variables) executions = exp.get('executions') suites = exp.get('suites') diff --git a/rebench/rebench.py b/rebench/rebench.py index 0ce88896..b2ccc8ea 100755 --- a/rebench/rebench.py +++ b/rebench/rebench.py @@ -115,6 +115,8 @@ def shell_options(self): '-B', '--without-building', action='store_false', dest='do_builds', help='Disables execution of build commands for executors and suites.', default=True) + execution.add_argument('-m', '--machine', action='store', dest='machine', + default=None, help='Name of the machine configuration to be used.') execution.add_argument( '-s', '--scheduler', action='store', dest='scheduler', default='batch', @@ -251,7 +253,7 @@ def run(self, argv=None): config = load_config(args.config[0]) self._config = Configurator(config, data_store, self.ui, args, cli_reporter, exp_name, args.data_file, - args.build_log, exp_filter) + args.build_log, exp_filter, args.machine) except ConfigurationError as exc: raise UIError(exc.message + "\n", exc) except ValueError as exc: diff --git a/rebench/tests/configurator_compile_test.py b/rebench/tests/configurator_compile_test.py index 4dfc89da..c498d48c 100644 --- a/rebench/tests/configurator_compile_test.py +++ b/rebench/tests/configurator_compile_test.py @@ -1,3 +1,4 @@ +# pylint: disable=redefined-outer-name import pytest from ..configurator import validate_config, Configurator @@ -73,6 +74,9 @@ def create_raw_configuration(): ] } }, + 'machines': { + 'testMachine': {} + }, 'executors': { 'TestExec': {'path': 'path', 'executable': 'exec'} }, @@ -99,7 +103,7 @@ def add_setting(config, elem_name, key, value): elif elem_name == 'runs': config['runs'] = {key: value} elif elem_name == 'machine': - config['machines'] = {'testMachine': {key: value}} + config['machines']['testMachine'][key] = value def create_test_input(): @@ -132,10 +136,14 @@ def test_generated_config_is_valid(high_elem, low_elem, val_key, high_val, low_v @pytest.mark.parametrize("high_elem, low_elem, val_key, high_val, low_val", create_test_input()) -def test_experiment_with_higher_priority_setting(ui, data_store, high_elem, low_elem, val_key, high_val, low_val): +def test_experiment_with_higher_priority_setting( + ui, data_store, high_elem, low_elem, val_key, high_val, low_val): raw_config = create_config(high_elem, low_elem, val_key, high_val, low_val) + assert_expected_value_in_config(ui, data_store, raw_config, val_key, high_val) + - cnf = Configurator(raw_config, data_store, ui) +def assert_expected_value_in_config(ui, data_store, raw_config, val_key, high_val): + cnf = Configurator(raw_config, data_store, ui, machine='testMachine') runs = list(cnf.get_runs()) assert len(runs) == 1 @@ -158,3 +166,22 @@ def test_experiment_with_higher_priority_setting(ui, data_store, high_elem, low_ high_val = high_val[0] assert getattr(run, val_key) == high_val + + +def create_machine_test_input(): + result = [] + + for key, value in RUN_DETAILS.items(): + result.append((key, value[1])) + + for key, value in VARIABLES.items(): + result.append((key, value[1])) + return result + + +@pytest.mark.parametrize("key, value", create_machine_test_input()) +def test_machine_settings_being_used(ui, data_store, key, value): + raw_config = create_raw_configuration() + add_setting(raw_config, 'machine', key, value) + + assert_expected_value_in_config(ui, data_store, raw_config, key, value)