diff --git a/.flake8 b/.flake8 deleted file mode 100644 index 260dd214..00000000 --- a/.flake8 +++ /dev/null @@ -1,9 +0,0 @@ -######################## -# Flake8 Configuration # -######################## - -[flake8] - -per-file-ignores = - # Imported but unused - */__init__.py:F401,F403 diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 10ee3412..b31fc375 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,31 +4,8 @@ # `pre-commit run --all-files` as by default only changed files are checked repos: -- repo: https://github.com/psf/black - rev: 23.10.0 - hooks: - - id: black - description: The uncompromising code formatter -- repo: https://github.com/pycqa/isort - rev: 5.12.0 - hooks: - - id: isort - name: isort (python) - - id: isort - name: isort (cython) - types: [cython] - - id: isort - name: isort (pyi) - types: [pyi] -- repo: https://github.com/nbQA-dev/nbQA - rev: 1.7.0 - hooks: - - id: nbqa-black - - id: nbqa-pyupgrade - args: [--py36-plus] - - id: nbqa-isort - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.5.0 + rev: v5.0.0 hooks: - id: check-yaml description: Check yaml files for parseable syntax @@ -44,3 +21,19 @@ repos: description: Fix empty lines at ends of files - id: detect-private-key description: Detects the presence of private keys +- repo: https://github.com/astral-sh/ruff-pre-commit + # Ruff version. + rev: v0.6.9 + hooks: + # Run the linter. + - id: ruff + args: + - --fix + - --config + - pyproject.toml + + # Run the formatter. + - id: ruff-format + args: + - --config + - pyproject.toml diff --git a/doc/conf.py b/doc/conf.py index 51ae4d36..93865e8e 100644 --- a/doc/conf.py +++ b/doc/conf.py @@ -2,15 +2,19 @@ # # For the full list of built-in configuration values, see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html +from __future__ import annotations import inspect +import sphinx + # -- Project information ----------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information -project = 'petab-select' -copyright = '2023, The PEtab Select developers' -author = 'The PEtab Select developers' +project = "PEtab Select" +copyright = "2024, The PEtab Select developers" +author = "The PEtab Select developers" + # -- General configuration --------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration @@ -29,8 +33,8 @@ "sphinx_autodoc_typehints", ] -templates_path = ['_templates'] -exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store'] +templates_path = ["_templates"] +exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"] intersphinx_mapping = { @@ -53,7 +57,7 @@ # -- Options for HTML output ------------------------------------------------- # https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output -html_theme = 'sphinx_rtd_theme' +html_theme = "sphinx_rtd_theme" # html_static_path = ['_static'] html_logo = "logo/logo-wide.svg" @@ -73,5 +77,5 @@ def autodoc_skip_member(app, what, name, obj, skip, options): return None -def setup(app: "sphinx.application.Sphinx"): +def setup(app: sphinx.application.Sphinx): app.connect("autodoc-skip-member", autodoc_skip_member, priority=0) diff --git a/doc/examples/example_cli_famos.ipynb b/doc/examples/example_cli_famos.ipynb index 04fc8f9b..5956c661 100644 --- a/doc/examples/example_cli_famos.ipynb +++ b/doc/examples/example_cli_famos.ipynb @@ -32,9 +32,7 @@ "from pathlib import Path\n", "\n", "from example_cli_famos_helpers import (\n", - " expected_criterion_values,\n", " parse_summary_to_progress_list,\n", - " petab_select_problem_yaml,\n", ")\n", "\n", "output_path = Path().resolve() / \"output_famos\"\n", @@ -50,16 +48,9 @@ "metadata": {}, "outputs": [], "source": [ - "import numpy as np\n", - "import pandas as pd\n", - "import petab\n", + "from petab_select import Method\n", "\n", - "import petab_select\n", - "from petab_select import ESTIMATE, FamosCandidateSpace, Method, Model\n", - "from petab_select.constants import Criterion\n", - "from petab_select.model import default_compare\n", - "\n", - "state = str(output_path / 'state.dill')\n", + "state = str(output_path / \"state.dill\")\n", "\n", "# Each iteration of model selection is described as a 2-tuple here.\n", "# First value is the model selection method.\n", @@ -231,7 +222,7 @@ "metadata": {}, "outputs": [], "source": [ - "progress_list = parse_summary_to_progress_list(output_path / 'summary.tsv')" + "progress_list = parse_summary_to_progress_list(output_path / \"summary.tsv\")" ] }, { diff --git a/doc/examples/example_cli_famos_calibration_tool.py b/doc/examples/example_cli_famos_calibration_tool.py index 2116ffd7..c78cabbe 100644 --- a/doc/examples/example_cli_famos_calibration_tool.py +++ b/doc/examples/example_cli_famos_calibration_tool.py @@ -1,10 +1,8 @@ import sys -import pandas as pd from example_cli_famos_helpers import calibrate import petab_select -from petab_select import ESTIMATE, Criterion, Model models_yaml = sys.argv[1] calibrated_models_yaml = sys.argv[2] @@ -24,6 +22,6 @@ (predecessor_model_hash,) = predecessor_model_hashes else: print( - 'The models of this iteration somehow have different predecessor models.\n' - + '\n'.join(predecessor_model_hashes) + "The models of this iteration somehow have different predecessor models.\n" + + "\n".join(predecessor_model_hashes) ) diff --git a/doc/examples/example_cli_famos_helpers.py b/doc/examples/example_cli_famos_helpers.py index b075a138..532950cd 100644 --- a/doc/examples/example_cli_famos_helpers.py +++ b/doc/examples/example_cli_famos_helpers.py @@ -1,11 +1,10 @@ from pathlib import Path -from typing import List, Tuple import pandas as pd from more_itertools import one import petab_select -from petab_select import ESTIMATE, MODEL_HASH, Criterion, Method, Model +from petab_select import MODEL_HASH, Criterion, Method, Model input_path = ( Path(__file__).resolve().parent.parent.parent @@ -43,9 +42,9 @@ def calibrate( def parse_summary_to_progress_list( summary_tsv: str, -) -> List[Tuple[Method, set]]: +) -> list[tuple[Method, set]]: """Get progress information from the summary file.""" - df_raw = pd.read_csv(summary_tsv, sep='\t') + df_raw = pd.read_csv(summary_tsv, sep="\t") df = df_raw.loc[~pd.isnull(df_raw["predecessor change"])] parameter_list = list( diff --git a/doc/examples/workflow_cli.ipynb b/doc/examples/workflow_cli.ipynb index a88e9a7c..21f42da5 100644 --- a/doc/examples/workflow_cli.ipynb +++ b/doc/examples/workflow_cli.ipynb @@ -147,7 +147,7 @@ } ], "source": [ - "with open(output_path / 'uncalibrated_models_1.yaml') as f:\n", + "with open(output_path / \"uncalibrated_models_1.yaml\") as f:\n", " print(f.read())" ] }, @@ -261,7 +261,7 @@ } ], "source": [ - "with open('model_selection/calibrated_models_1.yaml') as f:\n", + "with open(\"model_selection/calibrated_models_1.yaml\") as f:\n", " print(f.read())" ] }, @@ -362,7 +362,7 @@ } ], "source": [ - "with open(output_path / 'uncalibrated_models_2.yaml') as f:\n", + "with open(output_path / \"uncalibrated_models_2.yaml\") as f:\n", " print(f.read())" ] }, @@ -441,7 +441,7 @@ } ], "source": [ - "with open('model_selection/calibrated_M1_4.yaml') as f:\n", + "with open(\"model_selection/calibrated_M1_4.yaml\") as f:\n", " print(f.read())" ] }, @@ -501,7 +501,7 @@ } ], "source": [ - "with open(output_path / 'uncalibrated_models_3.yaml') as f:\n", + "with open(output_path / \"uncalibrated_models_3.yaml\") as f:\n", " print(f.read())" ] }, @@ -566,7 +566,7 @@ } ], "source": [ - "with open('model_selection/calibrated_M1_7.yaml') as f:\n", + "with open(\"model_selection/calibrated_M1_7.yaml\") as f:\n", " print(f.read())" ] }, @@ -605,7 +605,7 @@ } ], "source": [ - "with open(output_path / 'uncalibrated_models_4.yaml') as f:\n", + "with open(output_path / \"uncalibrated_models_4.yaml\") as f:\n", " print(f.read())" ] }, @@ -699,7 +699,7 @@ } ], "source": [ - "with open(output_path / 'uncalibrated_models_5.yaml') as f:\n", + "with open(output_path / \"uncalibrated_models_5.yaml\") as f:\n", " print(f.read())" ] }, @@ -765,7 +765,7 @@ } ], "source": [ - "with open(output_path / 'best_model.yaml') as f:\n", + "with open(output_path / \"best_model.yaml\") as f:\n", " print(f.read())" ] }, diff --git a/doc/examples/workflow_python.ipynb b/doc/examples/workflow_python.ipynb index beb7d19c..eaf0acc2 100644 --- a/doc/examples/workflow_python.ipynb +++ b/doc/examples/workflow_python.ipynb @@ -46,31 +46,30 @@ ], "source": [ "import petab_select\n", - "from petab_select import ForwardCandidateSpace, Model\n", + "from petab_select import Model\n", "from petab_select.constants import (\n", " CANDIDATE_SPACE,\n", " MODELS,\n", - " TERMINATE,\n", " UNCALIBRATED_MODELS,\n", ")\n", "\n", - "BOLD_TEXT = '\\033[1m'\n", - "NORMAL_TEXT = '\\033[0m'\n", + "BOLD_TEXT = \"\\033[1m\"\n", + "NORMAL_TEXT = \"\\033[0m\"\n", "\n", "# Load the PEtab Select problem.\n", "select_problem = petab_select.Problem.from_yaml(\n", - " 'model_selection/petab_select_problem.yaml'\n", + " \"model_selection/petab_select_problem.yaml\"\n", ")\n", "# Fake criterion values as a surrogate for a model calibration tool.\n", "fake_criterion = {\n", - " 'M1_0': 200,\n", - " 'M1_1': 150,\n", - " 'M1_2': 140,\n", - " 'M1_3': 130,\n", - " 'M1_4': -40,\n", - " 'M1_5': -70,\n", - " 'M1_6': -110,\n", - " 'M1_7': 50,\n", + " \"M1_0\": 200,\n", + " \"M1_1\": 150,\n", + " \"M1_2\": 140,\n", + " \"M1_3\": 130,\n", + " \"M1_4\": -40,\n", + " \"M1_5\": -70,\n", + " \"M1_6\": -110,\n", + " \"M1_7\": 50,\n", "}\n", "\n", "\n", @@ -327,7 +326,7 @@ "\n", "for candidate_model in iteration_results[MODELS].values():\n", " if candidate_model.get_hash() == local_best_model.get_hash():\n", - " print(BOLD_TEXT + 'BEST MODEL OF CURRENT ITERATION' + NORMAL_TEXT)\n", + " print(BOLD_TEXT + \"BEST MODEL OF CURRENT ITERATION\" + NORMAL_TEXT)\n", " print_model(candidate_model)" ] }, @@ -377,7 +376,7 @@ "\n", "for candidate_model in iteration_results[MODELS].values():\n", " if candidate_model.get_hash() == local_best_model.get_hash():\n", - " print(BOLD_TEXT + 'BEST MODEL OF CURRENT ITERATION' + NORMAL_TEXT)\n", + " print(BOLD_TEXT + \"BEST MODEL OF CURRENT ITERATION\" + NORMAL_TEXT)\n", " print_model(candidate_model)" ] }, @@ -420,7 +419,7 @@ "\n", "for candidate_model in iteration_results[MODELS].values():\n", " if candidate_model.get_hash() == local_best_model.get_hash():\n", - " print(BOLD_TEXT + 'BEST MODEL OF CURRENT ITERATION' + NORMAL_TEXT)\n", + " print(BOLD_TEXT + \"BEST MODEL OF CURRENT ITERATION\" + NORMAL_TEXT)\n", " print_model(candidate_model)" ] }, @@ -467,7 +466,7 @@ } ], "source": [ - "print(f'Number of candidate models: {len(iteration_results[MODELS])}.')" + "print(f\"Number of candidate models: {len(iteration_results[MODELS])}.\")" ] }, { diff --git a/doc/index.rst b/doc/index.rst index 62c081a6..8cefeba6 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -3,17 +3,64 @@ You can adapt this file completely to your liking, but it should at least contain the root `toctree` directive. -Welcome to petab-select's documentation! +Welcome to PEtab Select's documentation! ======================================== +PEtab Select brings +`model selection `_ to +`PEtab `_. PEtab Select comprises file +formats, a Python library and a command line interface. + +Model selection is the process of choosing the best model from a set of +candidate models. PEtab Select provides a standardized and compact way to +specify the candidate model space, implements a number of model selection +algorithms and criteria. + +Supported model selection algorithms: + +* brute force +* `forward selection `_ +* `backward selection `_ +* `FAMoS `_ + +Supported model selection criteria: + +* (`corrected `_) + `Akaike Information Criterion `_ (AIC / AICc) +* `Bayesian Information Criterion `_ (BIC) + +Model calibration is performed outside of PEtab Select. For example, +PEtab Select is well-integrated with: + +* `BasiCO `_ + (`example `_) +* `PEtab.jl `_ + (`example `_) +* `pyPESTO `_ + (`example `_) + +Other model calibration tools can easily be integrated using the provided +Python library or command line interface. + +Installation +------------ + +The Python 3 package provides both the Python 3 and command-line (CLI) +interfaces, and can be installed from PyPI, with: + +.. code-block:: bash + + pip install petab-select + + .. toctree:: :maxdepth: 2 :caption: Contents: + problem_definition examples Test Suite api - problem_definition Indices and tables diff --git a/petab_select/__init__.py b/petab_select/__init__.py index c15e690a..d4395bab 100644 --- a/petab_select/__init__.py +++ b/petab_select/__init__.py @@ -1,8 +1,8 @@ """Model selection extension for PEtab.""" -from . import plot import sys +from . import plot # noqa: F401 from .candidate_space import * from .constants import * from .criteria import * @@ -16,5 +16,5 @@ __all__ = [ x for x in dir(sys.modules[__name__]) - if not x.startswith('_') and x != 'sys' + if not x.startswith("_") and x != "sys" ] diff --git a/petab_select/candidate_space.py b/petab_select/candidate_space.py index ec41c1ef..865bebf6 100644 --- a/petab_select/candidate_space.py +++ b/petab_select/candidate_space.py @@ -1,21 +1,21 @@ """Classes and methods related to candidate spaces.""" + import abc import bisect import copy import csv import logging import warnings +from collections.abc import Callable from pathlib import Path -from typing import Any, Callable, Dict, List, Optional, Sequence, Type, Union +from typing import Any import numpy as np from more_itertools import one from .constants import ( ESTIMATE, - METHOD, METHOD_SCHEME, - MODELS, NEXT_METHOD, PREDECESSOR_MODEL, PREVIOUS_METHODS, @@ -29,12 +29,12 @@ from .model import Model, ModelHash, default_compare __all__ = [ - 'BackwardCandidateSpace', - 'BruteForceCandidateSpace', - 'CandidateSpace', - 'FamosCandidateSpace', - 'ForwardCandidateSpace', - 'LateralCandidateSpace', + "BackwardCandidateSpace", + "BruteForceCandidateSpace", + "CandidateSpace", + "FamosCandidateSpace", + "ForwardCandidateSpace", + "LateralCandidateSpace", ] @@ -97,11 +97,11 @@ def __init__( method: Method, criterion: Criterion, # TODO add MODEL_TYPE = Union[str, Model], str for VIRTUAL_INITIAL_MODEL - predecessor_model: Optional[Model] = None, - excluded_hashes: Optional[list[ModelHash]] = None, + predecessor_model: Model | None = None, + excluded_hashes: list[ModelHash] | None = None, limit: TYPE_LIMIT = np.inf, summary_tsv: TYPE_PATH = None, - previous_predecessor_model: Optional[Model] = None, + previous_predecessor_model: Model | None = None, calibrated_models: dict[ModelHash, Model] = None, ): """See class attributes for arguments.""" @@ -133,7 +133,7 @@ def __init__( self.latest_iteration_calibrated_models = {} def set_iteration_user_calibrated_models( - self, user_calibrated_models: Optional[dict[str, Model]] + self, user_calibrated_models: dict[str, Model] | None ) -> None: """Hide previously-calibrated models from the calibration tool. @@ -166,7 +166,7 @@ def set_iteration_user_calibrated_models( is not None ): logging.info( - f'Using user-supplied result for: {model.get_hash()}' + f"Using user-supplied result for: {model.get_hash()}" ) user_model_copy = copy.deepcopy(user_model) user_model_copy.predecessor_model_hash = ( @@ -217,7 +217,7 @@ def get_iteration_calibrated_models( ) return combined_calibrated_models - def write_summary_tsv(self, row: List[Any]): + def write_summary_tsv(self, row: list[Any]): """Write the summary of the last iteration to a TSV file. The destination is defined in ``self.summary_tsv``. @@ -233,11 +233,11 @@ def write_summary_tsv(self, row: List[Any]): if not isinstance(row, list): row = [ row, - *([''] * 5), + *([""] * 5), ] - with open(self.summary_tsv, 'a', encoding="utf-8") as f: - writer = csv.writer(f, delimiter='\t') + with open(self.summary_tsv, "a", encoding="utf-8") as f: + writer = csv.writer(f, delimiter="\t") writer.writerow(row) def _setup_summary_tsv(self): @@ -247,20 +247,20 @@ def _setup_summary_tsv(self): if not self.summary_tsv.exists(): self.write_summary_tsv( [ - 'method', - '# candidates', - 'predecessor change', - 'current model criterion', - 'current model', - 'candidate changes', + "method", + "# candidates", + "predecessor change", + "current model criterion", + "current model", + "candidate changes", ] ) @classmethod def read_arguments_from_yaml_dict( cls, - yaml_dict: Dict[str, str], - ) -> Dict[str, Union[str, Model]]: + yaml_dict: dict[str, str], + ) -> dict[str, str | Model]: """Parse settings that were stored in YAML. Args: @@ -305,7 +305,7 @@ def is_plausible(self, model: Model) -> bool: """ return True - def distance(self, model: Model) -> Union[None, float, int]: + def distance(self, model: Model) -> None | float | int: """Compute the distance between two models that are neighbors. Args: @@ -321,7 +321,7 @@ def distance(self, model: Model) -> Union[None, float, int]: def accept( self, model: Model, - distance: Union[None, float, int], + distance: None | float | int, ) -> None: """Add a candidate model to the candidate space. @@ -350,7 +350,7 @@ def n_accepted(self) -> int: def excluded( self, - model_hash: Union[Model, ModelHash], + model_hash: Model | ModelHash, ) -> bool: """Check whether a model is excluded. @@ -378,7 +378,7 @@ def _consider_method(self, model: Model) -> bool: """ return True - def consider(self, model: Union[Model, None]) -> bool: + def consider(self, model: Model | None) -> bool: """Add a candidate model, if it should be added. Args: @@ -403,9 +403,10 @@ def consider(self, model: Union[Model, None]) -> bool: return False if self.excluded(model): warnings.warn( - f'Model `{model.get_hash()}` has been previously excluded ' - 'from the candidate space so is skipped here.', + f"Model `{model.get_hash()}` has been previously excluded " + "from the candidate space so is skipped here.", RuntimeWarning, + stacklevel=2, ) return True if not self.is_plausible(model): @@ -420,9 +421,7 @@ def reset_accepted(self) -> None: self.models = [] self.distances = [] - def set_predecessor_model( - self, predecessor_model: Union[Model, str, None] - ): + def set_predecessor_model(self, predecessor_model: Model | str | None): """Set the predecessor model. See class attributes for arguments. @@ -433,16 +432,16 @@ def set_predecessor_model( and self.method not in VIRTUAL_INITIAL_MODEL_METHODS ): raise ValueError( - f'A virtual initial model was requested for a method ({self.method}) that does not support them.' + f"A virtual initial model was requested for a method ({self.method}) that does not support them." ) - def get_predecessor_model(self) -> Union[str, Model]: + def get_predecessor_model(self) -> str | Model: """Get the predecessor model.""" return self.predecessor_model def set_excluded_hashes( self, - hashes: Union[Model, ModelHash, list[Union[Model, ModelHash]]], + hashes: Model | ModelHash | list[Model | ModelHash], extend: bool = False, ) -> None: """Set the excluded hashes. @@ -453,7 +452,7 @@ def set_excluded_hashes( extend: Whether to replace or extend the current excluded hashes. """ - if isinstance(hashes, (Model, ModelHash)): + if isinstance(hashes, Model | ModelHash): hashes = [hashes] excluded_hashes = set() for potential_hash in hashes: @@ -473,7 +472,7 @@ def get_excluded_hashes(self) -> set[ModelHash]: The hashes of excluded models. """ try: - return getattr(self, "excluded_hashes") + return self.excluded_hashes except AttributeError: self.excluded_hashes = set() return self.get_excluded_hashes() @@ -517,9 +516,9 @@ def wrapper(): def reset( self, - predecessor_model: Optional[Union[Model, str, None]] = None, + predecessor_model: Model | str | None | None = None, # FIXME change `Any` to some `TYPE_MODEL_HASH` (e.g. union of str/int/float) - excluded_hashes: Optional[list[ModelHash]] = None, + excluded_hashes: list[ModelHash] | None = None, limit: TYPE_LIMIT = None, ) -> None: """Reset the candidate models, optionally reinitialize with a model. @@ -549,8 +548,8 @@ def reset( def distances_in_estimated_parameters( self, model: Model, - predecessor_model: Optional[Model] = None, - ) -> Dict[str, Union[float, int]]: + predecessor_model: Model | None = None, + ) -> dict[str, float | int]: """Distance between two models in model space, using different metrics. All metrics are in terms of estimated parameters. @@ -582,9 +581,9 @@ def distances_in_estimated_parameters( model0.petab_yaml ): raise NotImplementedError( - 'Computation of distances between different PEtab problems is ' - 'currently not supported. This error is also raised if the same ' - 'PEtab problem is read from YAML files in different locations.' + "Computation of distances between different PEtab problems is " + "currently not supported. This error is also raised if the same " + "PEtab problem is read from YAML files in different locations." ) # All parameters from the PEtab problem are used in the computation. @@ -596,9 +595,9 @@ def distances_in_estimated_parameters( parameters0 = np.array([ESTIMATE for _ in parameter_ids]) else: raise NotImplementedError( - 'Distances for the virtual initial model have not yet been ' + "Distances for the virtual initial model have not yet been " f'implemented for the method "{self.method}". Please notify the' - 'developers.' + "developers." ) else: parameter_ids = list(model0.petab_parameters) @@ -611,10 +610,10 @@ def distances_in_estimated_parameters( # in all subspaces. if model0.petab_yaml.resolve() != model1.petab_yaml.resolve(): raise ValueError( - 'Computing the distance between different models that ' + "Computing the distance between different models that " 'have different "base" PEtab problems is not yet ' - f'supported. First base PEtab problem: {model0.petab_yaml}.' - f' Second base PEtab problem: {model1.petab_yaml}.' + f"supported. First base PEtab problem: {model0.petab_yaml}." + f" Second base PEtab problem: {model1.petab_yaml}." ) parameters1 = np.array( model1.get_parameter_values(parameter_ids=parameter_ids) @@ -634,8 +633,8 @@ def distances_in_estimated_parameters( # TODO constants? e.g. Distance.L1 and Distance.Size distances = { - 'l1': l1, - 'size': size, + "l1": l1, + "size": size, } return distances @@ -679,7 +678,7 @@ class ForwardCandidateSpace(CandidateSpace): def __init__( self, *args, - predecessor_model: Optional[Union[Model, str]] = None, + predecessor_model: Model | str | None = None, max_steps: int = None, **kwargs, ): @@ -690,15 +689,15 @@ def __init__( if predecessor_model is None: predecessor_model = VIRTUAL_INITIAL_MODEL super().__init__( - method=Method.FORWARD if self.direction == 1 else Method.BACKWARD, *args, + method=Method.FORWARD if self.direction == 1 else Method.BACKWARD, predecessor_model=predecessor_model, **kwargs, ) def is_plausible(self, model: Model) -> bool: distances = self.distances_in_estimated_parameters(model) - n_steps = self.direction * distances['size'] + n_steps = self.direction * distances["size"] if self.max_steps is not None and n_steps > self.max_steps: raise StopIteration( @@ -709,7 +708,7 @@ def is_plausible(self, model: Model) -> bool: # increases (or decreases, if `self.direction == -1`), and no # previously estimated parameters become fixed. if self.predecessor_model == VIRTUAL_INITIAL_MODEL or ( - n_steps > 0 and distances['l1'] == n_steps + n_steps > 0 and distances["l1"] == n_steps ): return True return False @@ -718,7 +717,7 @@ def distance(self, model: Model) -> int: # TODO calculated here and `is_plausible`. Rewrite to only calculate # once? distances = self.distances_in_estimated_parameters(model) - return distances['l1'] + return distances["l1"] def _consider_method(self, model) -> bool: """See :meth:`CandidateSpace._consider_method`.""" @@ -855,23 +854,23 @@ class FamosCandidateSpace(CandidateSpace): } forwarded_inner = [ - '_consider_method', + "_consider_method", ] _consider_method = None forwarded_super_and_inner = [ - 'reset_accepted', - 'set_predecessor_model', - 'set_excluded_hashes', - 'set_limit', + "reset_accepted", + "set_predecessor_model", + "set_excluded_hashes", + "set_limit", ] def __init__( self, *args, - predecessor_model: Optional[Union[Model, str, None]] = None, - critical_parameter_sets: List = [], - swap_parameter_sets: List = [], - method_scheme: Dict[tuple, str] = None, + predecessor_model: Model | str | None | None = None, + critical_parameter_sets: list = [], + swap_parameter_sets: list = [], + method_scheme: dict[tuple, str] = None, n_reattempts: int = 0, consecutive_laterals: bool = False, **kwargs, @@ -907,7 +906,7 @@ def __init__( and not self.check_critical(predecessor_model) ): raise ValueError( - f'Provided predecessor model {predecessor_model.parameters} does not contain necessary critical parameters {self.critical_parameter_sets}. Provide a valid predecessor model.' + f"Provided predecessor model {predecessor_model.parameters} does not contain necessary critical parameters {self.critical_parameter_sets}. Provide a valid predecessor model." ) if ( @@ -921,22 +920,20 @@ def __init__( # FIXME remove `None` from the resulting `inner_methods` set? inner_methods = set.union( *[ - set( - [ - *( - method_pattern - if method_pattern is not None - else (None,) - ), - next_method, - ] - ) + { + *( + method_pattern + if method_pattern is not None + else (None,) + ), + next_method, + } for method_pattern, next_method in self.method_scheme.items() ] ) if Method.LATERAL in inner_methods and not self.swap_parameter_sets: raise ValueError( - f"Use of the lateral method with FAMoS requires `swap_parameter_sets`." + "Use of the lateral method with FAMoS requires `swap_parameter_sets`." ) for method in inner_methods: @@ -947,8 +944,8 @@ def __init__( Method.MOST_DISTANT, ]: raise NotImplementedError( - f'Methods FAMoS can swap to are `Method.FORWARD`, `Method.BACKWARD` and `Method.LATERAL`, not {method}. \ - Check if the method_scheme scheme provided is correct.' + f"Methods FAMoS can swap to are `Method.FORWARD`, `Method.BACKWARD` and `Method.LATERAL`, not {method}. \ + Check if the method_scheme scheme provided is correct." ) self.inner_candidate_spaces = { @@ -978,8 +975,8 @@ def __init__( ] super().__init__( - method=self.method, *args, + method=self.method, predecessor_model=predecessor_model, **kwargs, ) @@ -992,7 +989,8 @@ def __init__( and (Method.LATERAL,) not in self.method_scheme ): raise ValueError( - "Please provide a method to switch to after a lateral search, if not enabling the `consecutive_laterals` option." + "Please provide a method to switch to after a lateral search, " + "if not enabling the `consecutive_laterals` option." ) if self.n_reattempts: @@ -1032,11 +1030,10 @@ def read_arguments_from_yaml_dict(cls, yaml_dict) -> dict: def update_after_calibration( self, *args, - iteration_calibrated_models: Dict[str, Model], + iteration_calibrated_models: dict[str, Model], **kwargs, ) -> None: """See `CandidateSpace.update_after_calibration`.""" - super().update_after_calibration( *args, iteration_calibrated_models=iteration_calibrated_models, @@ -1070,12 +1067,12 @@ def update_after_calibration( def update_from_iteration_calibrated_models( self, - iteration_calibrated_models: Dict[str, Model], + iteration_calibrated_models: dict[str, Model], ) -> bool: """Update ``self.best_models`` with the latest ``iteration_calibrated_models`` and determine if there was a new best model. If so, return - ``False``. ``True`` otherwise.""" - + ``False``. ``True`` otherwise. + """ go_into_switch_method = True for model in iteration_calibrated_models.values(): if ( @@ -1113,7 +1110,8 @@ def update_from_iteration_calibrated_models( def insert_model_into_best_models(self, model_to_insert: Model) -> None: """Inserts a model into the list of best_models which are sorted - w.r.t. the criterion specified.""" + w.r.t. the criterion specified. + """ insert_index = bisect.bisect_left( [ model.get_criterion(self.criterion) @@ -1123,11 +1121,11 @@ def insert_model_into_best_models(self, model_to_insert: Model) -> None: ) self.best_models.insert(insert_index, model_to_insert) - def consider(self, model: Union[Model, None]) -> bool: + def consider(self, model: Model | None) -> bool: """Re-define ``consider`` of FAMoS to be the ``consider`` method of the ``inner_candidate_space``. Update all the attributes - changed in the ``consider`` method.""" - + changed in the ``consider`` method. + """ if self.limit.reached(): return False @@ -1158,7 +1156,8 @@ def is_plausible(self, model: Model) -> bool: def check_swap(self, model: Model) -> bool: """Check if parameters that are swapped are contained in the - same swap parameter set.""" + same swap parameter set. + """ if self.method != Method.LATERAL: return True @@ -1178,7 +1177,6 @@ def check_swap(self, model: Model) -> bool: def check_critical(self, model: Model) -> bool: """Check if the model contains all necessary critical parameters""" - estimated_parameters_ids = set(model.get_estimated_parameter_ids_all()) for critical_set in self.critical_parameter_sets: if not estimated_parameters_ids.intersection(set(critical_set)): @@ -1189,8 +1187,8 @@ def switch_method( self, ) -> None: """Switch to the next method with respect to the history - of methods used and the switching scheme in ``self.method_scheme``.""" - + of methods used and the switching scheme in ``self.method_scheme``. + """ previous_method = self.method next_method = previous_method logging.info("SWITCHING", self.method_history) @@ -1249,7 +1247,6 @@ def switch_method( def update_method(self, method: Method): """Update ``self.method`` to ``method``.""" - self.method = method def switch_inner_candidate_space( @@ -1262,7 +1259,6 @@ def switch_inner_candidate_space( excluded_hashes: Hashes of excluded models. """ - # if self.method != Method.MOST_DISTANT: self.inner_candidate_space = self.inner_candidate_spaces[self.method] # reset the next inner candidate space with the current history of all @@ -1276,8 +1272,8 @@ def jump_to_most_distant( self, ): """Jump to most distant model with respect to the history of all - calibrated models.""" - + calibrated models. + """ predecessor_model = self.get_most_distant() logging.info("JUMPING: ", predecessor_model.parameters) @@ -1320,7 +1316,6 @@ def get_most_distant( If not we choose the model in a subspace that has least distance to this complement model. """ - most_distance = 0 most_distant_indices = [] @@ -1367,15 +1362,12 @@ def get_most_distant( raise StopIteration("No most_distant model found. Terminating") most_distant_parameter_values = [ - str(index).replace('1', ESTIMATE) for index in most_distant_indices + str(index).replace("1", ESTIMATE) for index in most_distant_indices ] - most_distant_parameters = { - parameter_id: index - for parameter_id, index in zip( - parameter_ids, most_distant_parameter_values - ) - } + most_distant_parameters = dict( + zip(parameter_ids, most_distant_parameter_values, strict=True) + ) most_distant_model = Model( petab_yaml=model.petab_yaml, @@ -1399,7 +1391,7 @@ class LateralCandidateSpace(CandidateSpace): def __init__( self, *args, - predecessor_model: Union[Model, None], + predecessor_model: Model | None, max_steps: int = None, **kwargs, ): @@ -1409,8 +1401,8 @@ def __init__( Maximal allowed number of swap moves. If 0 then there is no maximum. """ super().__init__( - method=Method.LATERAL, *args, + method=Method.LATERAL, predecessor_model=predecessor_model, **kwargs, ) @@ -1419,14 +1411,14 @@ def __init__( def is_plausible(self, model: Model) -> bool: if self.predecessor_model is None: raise ValueError( - f"The predecessor_model is still None. Provide an appropriate predecessor_model" + "The predecessor_model is still None. Provide an appropriate predecessor_model" ) distances = self.distances_in_estimated_parameters(model) # If max_number_of_steps is non-zero and the number of steps made is # larger then move is not plausible. - if self.max_steps is not None and distances['l1'] > 2 * self.max_steps: + if self.max_steps is not None and distances["l1"] > 2 * self.max_steps: raise StopIteration( f"Maximal number of steps for method {self.method} exceeded. Stop sending candidate models." ) @@ -1435,12 +1427,12 @@ def is_plausible(self, model: Model) -> bool: # the same, but some estimated parameters have become fixed and vice # versa. if ( - distances['size'] == 0 + distances["size"] == 0 and # distances['size'] == 0 implies L1 % 2 == 0. # FIXME here and elsewhere, deal with models that are equal # except for the values of their fixed parameters. - distances['l1'] > 0 + distances["l1"] > 0 ): return True return False @@ -1461,8 +1453,8 @@ def __init__(self, *args, **kwargs): # 'brute force candidate space.' # ) super().__init__( - method=Method.BRUTE_FORCE, *args, + method=Method.BRUTE_FORCE, **kwargs, ) @@ -1479,7 +1471,7 @@ def _consider_method(self, model): } -def method_to_candidate_space_class(method: Method) -> Type[CandidateSpace]: +def method_to_candidate_space_class(method: Method) -> type[CandidateSpace]: """Get a candidate space class, given its method name. Args: @@ -1493,8 +1485,8 @@ def method_to_candidate_space_class(method: Method) -> Type[CandidateSpace]: candidate_space_class = candidate_space_classes.get(method, None) if candidate_space_class is None: raise NotImplementedError( - f'The provided method `{method}` does not correspond to an ' - 'implemented candidate space.' + f"The provided method `{method}` does not correspond to an " + "implemented candidate space." ) return candidate_space_class diff --git a/petab_select/cli.py b/petab_select/cli.py index 56ec38f6..f318205e 100644 --- a/petab_select/cli.py +++ b/petab_select/cli.py @@ -1,6 +1,7 @@ """The PEtab Select command-line interface.""" + from pathlib import Path -from typing import Any, Dict, List +from typing import Any import click import dill @@ -12,35 +13,35 @@ from . import ui from .candidate_space import CandidateSpace from .constants import CANDIDATE_SPACE, MODELS, PETAB_YAML, TERMINATE -from .model import Model, ModelHash, models_from_yaml_list, models_to_yaml_list +from .model import ModelHash, models_from_yaml_list, models_to_yaml_list from .problem import Problem -def read_state(filename: str) -> Dict[str, Any]: - with open(filename, 'rb') as f: +def read_state(filename: str) -> dict[str, Any]: + with open(filename, "rb") as f: state = dill.load(f) - state['problem'] = dill.loads(state['problem']) - state['candidate_space'] = dill.loads(state['candidate_space']) + state["problem"] = dill.loads(state["problem"]) + state["candidate_space"] = dill.loads(state["candidate_space"]) return state def write_state( - state: Dict[str, Any], + state: dict[str, Any], filename: str, -) -> Dict[str, Any]: - with open(filename, 'wb') as f: +) -> dict[str, Any]: + with open(filename, "wb") as f: dill.dump(state, f) def get_state( problem: Problem, candidate_space: CandidateSpace, -) -> Dict[str, Any]: +) -> dict[str, Any]: state = { - 'problem': dill.dumps(problem), - 'candidate_space': dill.dumps(candidate_space), + "problem": dill.dumps(problem), + "candidate_space": dill.dumps(candidate_space), } return state @@ -52,32 +53,32 @@ def cli(): @cli.command("start_iteration") @click.option( - '--problem', - '-p', - 'problem_yaml', - help='The PEtab Select YAML problem file.', + "--problem", + "-p", + "problem_yaml", + help="The PEtab Select YAML problem file.", ) @click.option( - '--state', - '-s', - 'state_dill', + "--state", + "-s", + "state_dill", type=str, - help='The file that stores the state.', + help="The file that stores the state.", ) @click.option( - '--output-uncalibrated-models', - '-u', - 'uncalibrated_models_yaml', + "--output-uncalibrated-models", + "-u", + "uncalibrated_models_yaml", type=str, - help='The file where uncalibrated models from this iteration will be stored.', + help="The file where uncalibrated models from this iteration will be stored.", ) @click.option( - '--method', - '-m', - 'method', + "--method", + "-m", + "method", type=str, default=None, - help='The method used to identify the candidate models. Defaults to the method in the problem YAML.', + help="The method used to identify the candidate models. Defaults to the method in the problem YAML.", ) # @click.option( # '--previous-predecessor-model', @@ -108,48 +109,48 @@ def cli(): # ), # ) @click.option( - '--limit', - '-l', - 'limit', + "--limit", + "-l", + "limit", type=float, default=np.inf, - help='(Optional) Limit the number of models in the output.', + help="(Optional) Limit the number of models in the output.", ) @click.option( - '--limit-sent', - '-L', - 'limit_sent', + "--limit-sent", + "-L", + "limit_sent", type=float, default=np.inf, help=( - '(Optional) Limit the number of models sent to the candidate space ' - '(which are possibly rejected and excluded from the output).' + "(Optional) Limit the number of models sent to the candidate space " + "(which are possibly rejected and excluded from the output)." ), ) @click.option( - '--relative-paths/--absolute-paths', - 'relative_paths', + "--relative-paths/--absolute-paths", + "relative_paths", type=bool, default=False, - help='Whether to output paths relative to the output file.', + help="Whether to output paths relative to the output file.", ) @click.option( - '--excluded-models', - '-e', - 'excluded_model_files', + "--excluded-models", + "-e", + "excluded_model_files", type=str, multiple=True, default=None, - help='Exclude models in this file.', + help="Exclude models in this file.", ) @click.option( - '--excluded-model-hashes', - '-E', - 'excluded_model_hash_files', + "--excluded-model-hashes", + "-E", + "excluded_model_hash_files", type=str, multiple=True, default=None, - help='Exclude model hashes in this file (one model hash per line).', + help="Exclude model hashes in this file (one model hash per line).", ) def start_iteration( problem_yaml: str, @@ -163,8 +164,8 @@ def start_iteration( limit: float = np.inf, limit_sent: float = np.inf, relative_paths: bool = False, - excluded_model_files: List[str] = None, - excluded_model_hash_files: List[str] = None, + excluded_model_files: list[str] = None, + excluded_model_hash_files: list[str] = None, ) -> None: """Search for candidate models in the model space. @@ -190,8 +191,8 @@ def start_iteration( "Changing method in the middle of a run is currently not " "supported. Delete the state to start with a new method." ) - problem = state['problem'] - candidate_space = state['candidate_space'] + problem = state["problem"] + candidate_space = state["candidate_space"] excluded_models = [] # TODO seems like default is `()`, not `None`... @@ -203,8 +204,8 @@ def start_iteration( excluded_model_hashes = [] if excluded_model_hash_files is not None: for excluded_model_hash_file in excluded_model_hash_files: - with open(excluded_model_hash_file, 'r') as f: - excluded_model_hashes += f.read().split('\n') + with open(excluded_model_hash_file) as f: + excluded_model_hashes += f.read().split("\n") excluded_hashes = [ excluded_model.get_hash() for excluded_model in excluded_models @@ -277,48 +278,48 @@ def start_iteration( @cli.command("end_iteration") @click.option( - '--state', - '-s', - 'state_dill', + "--state", + "-s", + "state_dill", type=str, - help='The file that stores the state.', + help="The file that stores the state.", ) @click.option( - '--output-models', - '-m', - 'models_yaml', + "--output-models", + "-m", + "models_yaml", type=str, help="The file where this iteration's calibrated models will be stored.", ) @click.option( - '--output-metadata', - '-d', - 'metadata_yaml', + "--output-metadata", + "-d", + "metadata_yaml", type=str, help="The file where this iteration's metadata will be stored.", ) @click.option( - '--calibrated-models', - '-c', - 'calibrated_models_yamls', + "--calibrated-models", + "-c", + "calibrated_models_yamls", type=str, multiple=True, help=( - 'The calibration results for the uncalibrated models of this iteration.' + "The calibration results for the uncalibrated models of this iteration." ), ) @click.option( - '--relative-paths/--absolute-paths', - 'relative_paths', + "--relative-paths/--absolute-paths", + "relative_paths", type=bool, default=False, - help='Whether to output paths relative to the output file.', + help="Whether to output paths relative to the output file.", ) def end_iteration( state_dill: str, models_yaml: str, metadata_yaml: str, - calibrated_models_yamls: List[str] = None, + calibrated_models_yamls: list[str] = None, relative_paths: bool = False, ) -> None: """Finalize a model selection iteration. @@ -328,8 +329,8 @@ def end_iteration( """ # Setup state state = read_state(state_dill) - problem = state['problem'] - candidate_space = state['candidate_space'] + problem = state["problem"] + candidate_space = state["candidate_space"] calibrated_models = {} if calibrated_models_yamls: @@ -366,38 +367,38 @@ def end_iteration( metadata = { TERMINATE: iteration_results[TERMINATE], } - with open(metadata_yaml, 'w') as f: + with open(metadata_yaml, "w") as f: yaml.dump(metadata, f) @cli.command("model_to_petab") @click.option( - '--model', - '-m', - 'models_yamls', + "--model", + "-m", + "models_yamls", multiple=True, - help='The PEtab Select model YAML file.', + help="The PEtab Select model YAML file.", ) @click.option( - '--output', - '-o', - 'output_path', + "--output", + "-o", + "output_path", type=str, - help='The directory where the PEtab files will be output.', + help="The directory where the PEtab files will be output.", ) @click.option( - '--model_id', - '-i', - 'model_id', + "--model_id", + "-i", + "model_id", type=str, default=None, help=( - '(Optional) The ID of the model to use, in case the YAML file ' - 'contains multiple models.' + "(Optional) The ID of the model to use, in case the YAML file " + "contains multiple models." ), ) def model_to_petab( - models_yamls: List[str], + models_yamls: list[str], output_path: str, model_id: str = None, ) -> None: @@ -419,17 +420,17 @@ def model_to_petab( for model in models: if model.model_id == model_id: if model0 is not None: - raise ValueError('There are multiple models with this ID.') + raise ValueError("There are multiple models with this ID.") model0 = model # TODO could `break` here and remove the above `ValueError` # and the `model0` logic if model0 is None: - raise ValueError('Could not find a model with the specified model ID.') + raise ValueError("Could not find a model with the specified model ID.") if model_id is not None and model0.model_id != model_id: raise ValueError( - 'The ID of the model from the YAML file does not match the ' - 'specified ID.' + "The ID of the model from the YAML file does not match the " + "specified ID." ) result = ui.model_to_petab(model0, output_path) @@ -438,22 +439,22 @@ def model_to_petab( @cli.command("models_to_petab") @click.option( - '--models', - '-m', - 'models_yamls', + "--models", + "-m", + "models_yamls", type=str, multiple=True, - help='The PEtab Select model YAML file, containing a list of models.', + help="The PEtab Select model YAML file, containing a list of models.", ) @click.option( - '--output', - '-o', - 'output_path_prefix', + "--output", + "-o", + "output_path_prefix", type=str, - help='The directory where the PEtab files will be output. The PEtab files will be stored in a model-specific subdirectory.', + help="The directory where the PEtab files will be output. The PEtab files will be stored in a model-specific subdirectory.", ) def models_to_petab( - models_yamls: List[str], + models_yamls: list[str], output_path_prefix: str, ) -> None: """Create a PEtab problem for each model in a PEtab Select model YAML file. @@ -472,22 +473,22 @@ def models_to_petab( models.extend(models_from_yaml_list(models_yaml)) model_ids = pd.Series([model.model_id for model in models]) - duplicates = '\n'.join(set(model_ids[model_ids.duplicated()])) + duplicates = "\n".join(set(model_ids[model_ids.duplicated()])) if duplicates: raise ValueError( - 'It appears that the provided PEtab Select model YAML file ' - 'contains multiple models with the same ID. The following ' - f'duplicates were detected: {duplicates}' + "It appears that the provided PEtab Select model YAML file " + "contains multiple models with the same ID. The following " + f"duplicates were detected: {duplicates}" ) results = ui.models_to_petab( models, output_path_prefix=output_path_prefix, ) - result_string = '\n'.join( + result_string = "\n".join( [ - '\t'.join([model.model_id, result[PETAB_YAML]]) - for model, result in zip(models, results) + "\t".join([model.model_id, result[PETAB_YAML]]) + for model, result in zip(models, results, strict=False) ] ) print(result_string) @@ -495,53 +496,53 @@ def models_to_petab( @cli.command("get_best") @click.option( - '--problem', - '-p', - 'problem_yaml', + "--problem", + "-p", + "problem_yaml", type=str, - help='The PEtab Select YAML problem file.', + help="The PEtab Select YAML problem file.", ) @click.option( - '--models', - '-m', - 'models_yamls', + "--models", + "-m", + "models_yamls", type=str, multiple=True, - help='A list of calibrated models.', + help="A list of calibrated models.", ) @click.option( - '--output', - '-o', - 'output', + "--output", + "-o", + "output", type=str, - help='The file where the best model will be stored.', + help="The file where the best model will be stored.", ) @click.option( - '--state', - '-s', - 'state_filename', + "--state", + "-s", + "state_filename", type=str, default=None, - help='The file that stores the state.', + help="The file that stores the state.", ) @click.option( - '--criterion', - '-c', - 'criterion', + "--criterion", + "-c", + "criterion", type=str, default=None, - help='The criterion by which models will be compared.', + help="The criterion by which models will be compared.", ) @click.option( - '--relative-paths/--absolute-paths', - 'relative_paths', + "--relative-paths/--absolute-paths", + "relative_paths", type=bool, default=False, - help='Whether to output paths relative to the output file.', + help="Whether to output paths relative to the output file.", ) def get_best( problem_yaml: str, - models_yamls: List[str], + models_yamls: list[str], output: str, state_filename: str = None, criterion: str = None, diff --git a/petab_select/constants.py b/petab_select/constants.py index 607780aa..692e0198 100644 --- a/petab_select/constants.py +++ b/petab_select/constants.py @@ -1,9 +1,10 @@ """Constants for the PEtab Select package.""" + import string import sys from enum import Enum from pathlib import Path -from typing import Dict, List, Literal, Union +from typing import Literal # Zero-indexed column/row indices MODEL_ID_COLUMN = 0 @@ -13,33 +14,31 @@ PARAMETER_DEFINITIONS_START = 2 HEADER_ROW = 0 -PARAMETER_VALUE_DELIMITER = ';' -CODE_DELIMITER = '-' -ESTIMATE = 'estimate' +PARAMETER_VALUE_DELIMITER = ";" +CODE_DELIMITER = "-" +ESTIMATE = "estimate" PETAB_ESTIMATE_FALSE = 0 PETAB_ESTIMATE_TRUE = 1 # TYPING_PATH = Union[str, Path] -TYPE_PATH = Union[str, Path] +TYPE_PATH = str | Path # Model space file columns # TODO ensure none of these occur twice in the column header (this would # suggest that a parameter has a conflicting name) # MODEL_ID = 'modelId' # TODO already defined, reorganize constants # YAML = 'YAML' # FIXME -MODEL_ID = 'model_id' -MODEL_SUBSPACE_ID = 'model_subspace_id' -MODEL_SUBSPACE_INDICES = 'model_subspace_indices' -MODEL_CODE = 'model_code' -MODEL_HASH = 'model_hash' -MODEL_HASHES = 'model_hashes' -MODEL_HASH_DELIMITER = '-' -MODEL_SUBSPACE_INDICES_HASH_DELIMITER = '.' +MODEL_ID = "model_id" +MODEL_SUBSPACE_ID = "model_subspace_id" +MODEL_SUBSPACE_INDICES = "model_subspace_indices" +MODEL_CODE = "model_code" +MODEL_HASH = "model_hash" +MODEL_HASHES = "model_hashes" +MODEL_HASH_DELIMITER = "-" +MODEL_SUBSPACE_INDICES_HASH_DELIMITER = "." MODEL_SUBSPACE_INDICES_HASH_MAP = ( # [0-9]+[A-Z]+[a-z] - string.digits - + string.ascii_uppercase - + string.ascii_lowercase + string.digits + string.ascii_uppercase + string.ascii_lowercase ) PETAB_HASH_DIGEST_SIZE = None # If `predecessor_model_hash` is defined for a model, it is the ID of the model that the @@ -47,17 +46,17 @@ # only (optionally) set by the PEtab calibration tool. It is not defined by the # PEtab Select model selection problem (but may be subsequently stored in the # PEtab Select model report format. -PREDECESSOR_MODEL_HASH = 'predecessor_model_hash' -PETAB_PROBLEM = 'petab_problem' -PETAB_YAML = 'petab_yaml' -SBML = 'sbml' -HASH = 'hash' +PREDECESSOR_MODEL_HASH = "predecessor_model_hash" +PETAB_PROBLEM = "petab_problem" +PETAB_YAML = "petab_yaml" +SBML = "sbml" +HASH = "hash" # MODEL_SPACE_FILE_NON_PARAMETER_COLUMNS = [MODEL_ID, PETAB_YAML] MODEL_SPACE_FILE_NON_PARAMETER_COLUMNS = [MODEL_SUBSPACE_ID, PETAB_YAML] # COMPARED_MODEL_ID = 'compared_'+MODEL_ID -YAML_FILENAME = 'yaml' +YAML_FILENAME = "yaml" # DISTANCES = { # FORWARD: { @@ -74,39 +73,39 @@ # }, # } -CRITERIA = 'criteria' +CRITERIA = "criteria" -PARAMETERS = 'parameters' +PARAMETERS = "parameters" # PARAMETER_ESTIMATE = 'parameter_estimate' -ESTIMATED_PARAMETERS = 'estimated_parameters' +ESTIMATED_PARAMETERS = "estimated_parameters" # Problem keys -CRITERION = 'criterion' -METHOD = 'method' -VERSION = 'version' -MODEL_SPACE_FILES = 'model_space_files' -PROBLEM_ID = 'problem_id' - -CANDIDATE_SPACE = 'candidate_space' -CANDIDATE_SPACE_ARGUMENTS = 'candidate_space_arguments' -METHOD_SCHEME = 'method_scheme' -PREVIOUS_METHODS = 'previous_methods' -NEXT_METHOD = 'next_method' -PREDECESSOR_MODEL = 'predecessor_model' - -MODEL = 'model' -MODELS = 'models' -UNCALIBRATED_MODELS = 'uncalibrated_models' -TERMINATE = 'terminate' +CRITERION = "criterion" +METHOD = "method" +VERSION = "version" +MODEL_SPACE_FILES = "model_space_files" +PROBLEM_ID = "problem_id" + +CANDIDATE_SPACE = "candidate_space" +CANDIDATE_SPACE_ARGUMENTS = "candidate_space_arguments" +METHOD_SCHEME = "method_scheme" +PREVIOUS_METHODS = "previous_methods" +NEXT_METHOD = "next_method" +PREDECESSOR_MODEL = "predecessor_model" + +MODEL = "model" +MODELS = "models" +UNCALIBRATED_MODELS = "uncalibrated_models" +TERMINATE = "terminate" # Parameters can be fixed to a value, or estimated if indicated with the string # `ESTIMATE`. -TYPE_PARAMETER = Union[float, int, Literal[ESTIMATE]] -TYPE_PARAMETER_OPTIONS = List[TYPE_PARAMETER] +TYPE_PARAMETER = float | int | Literal[ESTIMATE] +TYPE_PARAMETER_OPTIONS = list[TYPE_PARAMETER] # Parameter ID -> parameter value mapping. -TYPE_PARAMETER_DICT = Dict[str, TYPE_PARAMETER] +TYPE_PARAMETER_DICT = dict[str, TYPE_PARAMETER] # Parameter ID -> multiple possible parameter values. -TYPE_PARAMETER_OPTIONS_DICT = Dict[str, TYPE_PARAMETER_OPTIONS] +TYPE_PARAMETER_OPTIONS_DICT = dict[str, TYPE_PARAMETER_OPTIONS] TYPE_CRITERION = float @@ -115,34 +114,34 @@ class Method(str, Enum): """String literals for model selection methods.""" #: The backward stepwise method. - BACKWARD = 'backward' + BACKWARD = "backward" #: The brute-force method. - BRUTE_FORCE = 'brute_force' + BRUTE_FORCE = "brute_force" #: The FAMoS method. - FAMOS = 'famos' + FAMOS = "famos" #: The forward stepwise method. - FORWARD = 'forward' + FORWARD = "forward" #: The lateral, or swap, method. - LATERAL = 'lateral' + LATERAL = "lateral" #: The jump-to-most-distant-model method. - MOST_DISTANT = 'most_distant' + MOST_DISTANT = "most_distant" class Criterion(str, Enum): """String literals for model selection criteria.""" #: The Akaike information criterion. - AIC = 'AIC' + AIC = "AIC" #: The corrected Akaike information criterion. - AICC = 'AICc' + AICC = "AICc" #: The Bayesian information criterion. - BIC = 'BIC' + BIC = "BIC" #: The likelihood. - LH = 'LH' + LH = "LH" #: The log-likelihood. - LLH = 'LLH' + LLH = "LLH" #: The negative log-likelihood. - NLLH = 'NLLH' + NLLH = "NLLH" #: Methods that move through model space by taking steps away from some model. @@ -159,7 +158,7 @@ class Criterion(str, Enum): ] #: Virtual initial models can be used to initialize some initial model methods. -VIRTUAL_INITIAL_MODEL = 'virtual_initial_model' +VIRTUAL_INITIAL_MODEL = "virtual_initial_model" #: Methods that are compatible with a virtual initial model. VIRTUAL_INITIAL_MODEL_METHODS = [ Method.BACKWARD, @@ -170,6 +169,6 @@ class Criterion(str, Enum): __all__ = [ x for x in dir(sys.modules[__name__]) - if not x.startswith('_') - and x not in ('sys', "Enum", "Path", "Dict", "List", "Literal", "Union") + if not x.startswith("_") + and x not in ("sys", "Enum", "Path", "Dict", "List", "Literal", "Union") ] diff --git a/petab_select/criteria.py b/petab_select/criteria.py index a976a0fb..86125009 100644 --- a/petab_select/criteria.py +++ b/petab_select/criteria.py @@ -9,10 +9,10 @@ from .constants import PETAB_PROBLEM, Criterion # LH,; LLH,; NLLH, __all__ = [ - 'calculate_aic', - 'calculate_aicc', - 'calculate_bic', - 'CriterionComputer', + "calculate_aic", + "calculate_aicc", + "calculate_bic", + "CriterionComputer", ] @@ -22,7 +22,7 @@ class CriterionComputer: def __init__( self, - model: 'petab_select.model.Model', + model: "petab_select.model.Model", ): self.model = model self._petab_problem = None @@ -54,7 +54,7 @@ def __call__(self, criterion: Criterion) -> float: Returns: The criterion value. """ - return getattr(self, 'get_' + criterion.value.lower())() + return getattr(self, "get_" + criterion.value.lower())() def get_aic(self) -> float: """Get the Akaike information criterion.""" @@ -109,7 +109,7 @@ def get_lh(self) -> float: return np.exp(-1 * nllh) raise ValueError( - 'Please supply the likelihood (LH) or a compatible transformation. Compatible transformations: log(LH), -log(LH).' + "Please supply the likelihood (LH) or a compatible transformation. Compatible transformations: log(LH), -log(LH)." ) def get_n_estimated(self) -> int: @@ -135,7 +135,7 @@ def get_n_priors(self) -> int: OBJECTIVE_PRIOR_TYPE in df and OBJECTIVE_PRIOR_PARAMETERS in df ): raise NotImplementedError( - 'Currently expect that prior types are specified with prior parameters (no default values). Please provide an example for implementation.' + "Currently expect that prior types are specified with prior parameters (no default values). Please provide an example for implementation." ) # Expect that the number of non-empty values in both objective prior columns @@ -145,7 +145,7 @@ def get_n_priors(self) -> int: == df[OBJECTIVE_PRIOR_PARAMETERS].notna().sum() ): raise NotImplementedError( - 'Some objective prior values are missing.' + "Some objective prior values are missing." ) number_of_priors = df[OBJECTIVE_PRIOR_TYPE].notna().sum() diff --git a/petab_select/handlers.py b/petab_select/handlers.py index 573acf7d..856e0d46 100644 --- a/petab_select/handlers.py +++ b/petab_select/handlers.py @@ -1,7 +1,7 @@ -from typing import Callable, Union +from collections.abc import Callable # `float` for `np.inf` -TYPE_LIMIT = Union[float, int] +TYPE_LIMIT = float | int # TODO exclusions handler diff --git a/petab_select/misc.py b/petab_select/misc.py index 9417944c..b2e493fb 100644 --- a/petab_select/misc.py +++ b/petab_select/misc.py @@ -1,7 +1,7 @@ import hashlib # import json -from typing import Any, List, Optional, Union +from typing import Any from .constants import ( # TYPE_PARAMETER_OPTIONS_DICT, ESTIMATE, @@ -10,7 +10,7 @@ ) __all__ = [ - 'parameter_string_to_value', + "parameter_string_to_value", ] @@ -30,7 +30,7 @@ def hashify(x: Any, **kwargs) -> str: """ # return int(hashlib.sha256(str(x).encode('utf-8')).hexdigest(), 16) return hashlib.blake2b( - str(x).encode('utf-8'), + str(x).encode("utf-8"), **kwargs, ).hexdigest() @@ -50,7 +50,7 @@ def hash_str(str_: str, **kwargs): return hashify(str_, **kwargs) -def hash_list(list_: List, **kwargs): +def hash_list(list_: list, **kwargs): return hashify(list(list_), **kwargs) @@ -64,8 +64,8 @@ def snake_case_to_camel_case(string: str) -> str: Returns: The string, in camel case. """ - string_pieces = string.split('_') - string_camel = '' + string_pieces = string.split("_") + string_camel = "" for string_piece in string_pieces: string_camel += string_piece[0].upper() + string_piece[1:] return string_camel @@ -74,7 +74,7 @@ def snake_case_to_camel_case(string: str) -> str: def parameter_string_to_value( parameter_string: str, passthrough_estimate: bool = False, -) -> Union[float, int, str]: +) -> float | int | str: """Cast a parameter value from string to numeric. Args: @@ -90,7 +90,7 @@ def parameter_string_to_value( if parameter_string == ESTIMATE: if passthrough_estimate: return parameter_string - raise ValueError('Please handle estimated parameters differently.') + raise ValueError("Please handle estimated parameters differently.") float_value = float(parameter_string) int_value = int(float_value) diff --git a/petab_select/model.py b/petab_select/model.py index 573b934f..d1c00c20 100644 --- a/petab_select/model.py +++ b/petab_select/model.py @@ -1,11 +1,11 @@ """The `Model` class.""" + from __future__ import annotations -import string import warnings from os.path import relpath from pathlib import Path -from typing import TYPE_CHECKING, Any, Dict, List, Optional, Tuple, Union +from typing import TYPE_CHECKING, Any import petab.v1 as petab import yaml @@ -24,7 +24,6 @@ MODEL_SUBSPACE_INDICES_HASH_MAP, PARAMETERS, PETAB_ESTIMATE_TRUE, - PETAB_HASH_DIGEST_SIZE, PETAB_PROBLEM, PETAB_YAML, PREDECESSOR_MODEL_HASH, @@ -36,9 +35,6 @@ ) from .criteria import CriterionComputer from .misc import ( - hash_list, - hash_parameter_dict, - hash_str, parameter_string_to_value, ) from .petab import PetabMixin @@ -47,11 +43,11 @@ from .problem import Problem __all__ = [ - 'Model', - 'default_compare', - 'models_from_yaml_list', - 'models_to_yaml_list', - 'ModelHash', + "Model", + "default_compare", + "models_from_yaml_list", + "models_to_yaml_list", + "ModelHash", ] @@ -139,14 +135,14 @@ def __init__( petab_yaml: TYPE_PATH, model_subspace_id: str = None, model_id: str = None, - model_subspace_indices: List[int] = None, + model_subspace_indices: list[int] = None, predecessor_model_hash: str = None, - parameters: Dict[str, Union[int, float]] = None, - estimated_parameters: Dict[str, Union[int, float]] = None, - criteria: Dict[str, float] = None, + parameters: dict[str, int | float] = None, + estimated_parameters: dict[str, int | float] = None, + criteria: dict[str, float] = None, # Optionally provided to reduce repeated parsing of `petab_yaml`. - petab_problem: Optional[petab.Problem] = None, - model_hash: Optional[Any] = None, + petab_problem: petab.Problem | None = None, + model_hash: Any | None = None, ): self.model_id = model_id self.model_subspace_id = model_subspace_id @@ -196,14 +192,15 @@ def set_criterion(self, criterion: Criterion, value: float) -> None: """ if criterion in self.criteria: warnings.warn( - 'Overwriting saved criterion value. ' - f'Criterion: {criterion}. Value: {self.get_criterion(criterion)}.' + "Overwriting saved criterion value. " + f"Criterion: {criterion}. Value: {self.get_criterion(criterion)}.", + stacklevel=2, ) # FIXME debug why value is overwritten during test case 0002. if False: print( - 'Overwriting saved criterion value. ' - f'Criterion: {criterion}. Value: {self.get_criterion(criterion)}.' + "Overwriting saved criterion value. " + f"Criterion: {criterion}. Value: {self.get_criterion(criterion)}." ) breakpoint() self.criteria[criterion] = value @@ -223,7 +220,7 @@ def get_criterion( criterion: Criterion, compute: bool = True, raise_on_failure: bool = True, - ) -> Union[TYPE_CRITERION, None]: + ) -> TYPE_CRITERION | None: """Get a criterion value for the model. Args: @@ -285,7 +282,7 @@ def compute_criterion( def set_estimated_parameters( self, - estimated_parameters: Dict[str, float], + estimated_parameters: dict[str, float], scaled: bool = False, ) -> None: """Set the estimated parameters. @@ -306,10 +303,10 @@ def set_estimated_parameters( @staticmethod def from_dict( - model_dict: Dict[str, Any], + model_dict: dict[str, Any], base_path: TYPE_PATH = None, petab_problem: petab.Problem = None, - ) -> 'Model': + ) -> Model: """Generate a model from a dictionary of attributes. Args: @@ -334,7 +331,9 @@ def from_dict( unknown_attributes = set(model_dict).difference(Model.converters_load) if unknown_attributes: warnings.warn( - 'Ignoring unknown attributes: ' + ', '.join(unknown_attributes) + "Ignoring unknown attributes: " + + ", ".join(unknown_attributes), + stacklevel=2, ) if base_path is not None: @@ -349,7 +348,7 @@ def from_dict( return Model(**model_dict) @staticmethod - def from_yaml(model_yaml: TYPE_PATH) -> 'Model': + def from_yaml(model_yaml: TYPE_PATH) -> Model: """Generate a model from a PEtab Select model YAML file. Args: @@ -369,10 +368,10 @@ def from_yaml(model_yaml: TYPE_PATH) -> 'Model': if len(model_dict) <= 1: raise raise ValueError( - 'The provided YAML file contains a list with greater than ' - 'one element. Use the `models_from_yaml_list` method or ' - 'provide a PEtab Select model YAML file with only one ' - 'model specified.' + "The provided YAML file contains a list with greater than " + "one element. Use the `models_from_yaml_list` method or " + "provide a PEtab Select model YAML file with only one " + "model specified." ) return Model.from_dict(model_dict, base_path=Path(model_yaml).parent) @@ -380,8 +379,8 @@ def from_yaml(model_yaml: TYPE_PATH) -> 'Model': def to_dict( self, resolve_paths: bool = True, - paths_relative_to: Union[str, Path] = None, - ) -> Dict[str, Any]: + paths_relative_to: str | Path = None, + ) -> dict[str, Any]: """Generate a dictionary from the attributes of a :class:`Model` instance. Args: @@ -429,15 +428,15 @@ def to_yaml(self, petab_yaml: TYPE_PATH, *args, **kwargs) -> None: # FIXME change `getattr(self, PETAB_YAML)` to be relative to # destination? # kind of fixed, as the path will be resolved in `to_dict`. - with open(petab_yaml, 'w') as f: + with open(petab_yaml, "w") as f: yaml.dump(self.to_dict(*args, **kwargs), f) # yaml.dump(self.to_dict(), str(petab_yaml)) def to_petab( self, output_path: TYPE_PATH = None, - set_estimated_parameters: Optional[bool] = None, - ) -> Dict[str, Union[petab.Problem, TYPE_PATH]]: + set_estimated_parameters: bool | None = None, + ) -> dict[str, petab.Problem | TYPE_PATH]: """Generate a PEtab problem. Args: @@ -483,9 +482,9 @@ def to_petab( # Else the parameter is to be fixed. else: petab_problem.parameter_df.loc[parameter_id, ESTIMATE] = 0 - petab_problem.parameter_df.loc[ - parameter_id, NOMINAL_VALUE - ] = parameter_string_to_value(parameter_value) + petab_problem.parameter_df.loc[parameter_id, NOMINAL_VALUE] = ( + parameter_string_to_value(parameter_value) + ) # parameter_value petab_yaml = None @@ -520,7 +519,7 @@ def get_hash(self) -> str: def __hash__(self) -> None: """Use `Model.get_hash` instead.""" - raise NotImplementedError('Use `Model.get_hash() instead.`') + raise NotImplementedError("Use `Model.get_hash() instead.`") def __str__(self): """Get a print-ready string representation of the model. @@ -528,17 +527,17 @@ def __str__(self): Returns: The print-ready string representation, in TSV format. """ - parameter_ids = '\t'.join(self.parameters.keys()) - parameter_values = '\t'.join(str(v) for v in self.parameters.values()) - header = '\t'.join([MODEL_ID, PETAB_YAML, parameter_ids]) - data = '\t'.join( + parameter_ids = "\t".join(self.parameters.keys()) + parameter_values = "\t".join(str(v) for v in self.parameters.values()) + header = "\t".join([MODEL_ID, PETAB_YAML, parameter_ids]) + data = "\t".join( [self.model_id, str(self.petab_yaml), parameter_values] ) # header = f'{MODEL_ID}\t{PETAB_YAML}\t{parameter_ids}' # data = f'{self.model_id}\t{self.petab_yaml}\t{parameter_values}' - return f'{header}\n{data}' + return f"{header}\n{data}" - def get_mle(self) -> Dict[str, float]: + def get_mle(self) -> dict[str, float]: """Get the maximum likelihood estimate of the model.""" """ FIXME(dilpath) @@ -574,7 +573,7 @@ def get_mle(self) -> Dict[str, float]: # TODO pass - def get_estimated_parameter_ids_all(self) -> List[str]: + def get_estimated_parameter_ids_all(self) -> list[str]: estimated_parameter_ids = [] # Add all estimated parameters in the PEtab problem. @@ -609,8 +608,8 @@ def get_estimated_parameter_ids_all(self) -> List[str]: def get_parameter_values( self, - parameter_ids: Optional[List[str]] = None, - ) -> List[TYPE_PARAMETER]: + parameter_ids: list[str] | None = None, + ) -> list[TYPE_PARAMETER]: """Get parameter values. Includes ``ESTIMATE`` for parameters that should be estimated. @@ -666,14 +665,18 @@ def default_compare( """ if not model1.has_criterion(criterion): warnings.warn( - f'Model "{model1.model_id}" does not provide a value for the criterion "{criterion}".' + f'Model "{model1.model_id}" does not provide a value for the ' + f'criterion "{criterion}".', + stacklevel=2, ) return False if model0 == VIRTUAL_INITIAL_MODEL or model0 is None: return True if criterion_threshold < 0: warnings.warn( - 'The provided criterion threshold is negative. The absolute value will be used instead.' + "The provided criterion threshold is negative. " + "The absolute value will be used instead.", + stacklevel=2, ) criterion_threshold = abs(criterion_threshold) if criterion in [ @@ -695,14 +698,14 @@ def default_compare( > model0.get_criterion(criterion) + criterion_threshold ) else: - raise NotImplementedError(f'Unknown criterion: {criterion}.') + raise NotImplementedError(f"Unknown criterion: {criterion}.") def models_from_yaml_list( model_list_yaml: TYPE_PATH, petab_problem: petab.Problem = None, allow_single_model: bool = True, -) -> List[Model]: +) -> list[Model]: """Generate a model from a PEtab Select list of model YAML file. Args: @@ -733,7 +736,7 @@ def models_from_yaml_list( petab_problem=petab_problem, ) ] - raise ValueError('The YAML file does not contain a list of models.') + raise ValueError("The YAML file does not contain a list of models.") return [ Model.from_dict( @@ -770,7 +773,7 @@ def models_to_yaml_list( continue if model == VIRTUAL_INITIAL_MODEL: continue - warnings.warn(f"Unexpected model, skipping: {model}.") + warnings.warn(f"Unexpected model, skipping: {model}.", stacklevel=2) skipped_indices.append(index) models = [ model @@ -785,7 +788,7 @@ def models_to_yaml_list( model.to_dict(paths_relative_to=paths_relative_to) for model in models ] model_dicts = None if not model_dicts else model_dicts - with open(output_yaml, 'w') as f: + with open(output_yaml, "w") as f: yaml.dump(model_dicts, f) @@ -870,8 +873,8 @@ def __getnewargs_ex__(self): return ( (), { - 'model_subspace_id': self.model_subspace_id, - 'model_subspace_indices_hash': self.model_subspace_indices_hash, + "model_subspace_id": self.model_subspace_id, + "model_subspace_indices_hash": self.model_subspace_indices_hash, # 'petab_hash': self.petab_hash, }, ) @@ -923,7 +926,7 @@ def __deepcopy__(self, memo): # ) @staticmethod - def from_hash(model_hash: Union[str, ModelHash]) -> ModelHash: + def from_hash(model_hash: str | ModelHash) -> ModelHash: """Reconstruct a :class:`ModelHash` object. Args: @@ -939,7 +942,7 @@ def from_hash(model_hash: Union[str, ModelHash]) -> ModelHash: if model_hash == VIRTUAL_INITIAL_MODEL: return ModelHash( model_subspace_id=VIRTUAL_INITIAL_MODEL, - model_subspace_indices_hash='', + model_subspace_indices_hash="", # petab_hash=VIRTUAL_INITIAL_MODEL, ) @@ -955,7 +958,7 @@ def from_hash(model_hash: Union[str, ModelHash]) -> ModelHash: ) @staticmethod - def from_model(model: Model) -> "ModelHash": + def from_model(model: Model) -> ModelHash: """Create a hash for a model. Args: @@ -965,8 +968,8 @@ def from_model(model: Model) -> "ModelHash": Returns: The model hash. """ - model_subspace_id = '' - model_subspace_indices_hash = '' + model_subspace_id = "" + model_subspace_indices_hash = "" if model.model_subspace_id is not None: model_subspace_id = model.model_subspace_id model_subspace_indices_hash = ( @@ -993,12 +996,12 @@ def hash_model_subspace_indices(model_subspace_indices: list[int]) -> str: The hash. """ try: - return ''.join( + return "".join( MODEL_SUBSPACE_INDICES_HASH_MAP[index] for index in model_subspace_indices ) except KeyError: - return MODEL_SUBSPACE_INDICES_HAS_HASH_DELIMITER.join( + return MODEL_SUBSPACE_INDICES_HASH_DELIMITER.join( str(i) for i in model_subspace_indices ) diff --git a/petab_select/model_space.py b/petab_select/model_space.py index 2b19c53c..f3237ae9 100644 --- a/petab_select/model_space.py +++ b/petab_select/model_space.py @@ -1,10 +1,12 @@ """The `ModelSpace` class and related methods.""" + import itertools import logging import warnings +from collections.abc import Iterable from pathlib import Path from tempfile import NamedTemporaryFile -from typing import Any, Iterable, List, Optional, TextIO, Union, get_args +from typing import Any, TextIO, get_args import numpy as np import pandas as pd @@ -99,7 +101,7 @@ def line2row( delimiter: str = "\t", unpacked: bool = True, convert_parameters_to_float: bool = True, -) -> List: +) -> list: """Parse a line from a model space file. Args: @@ -137,7 +139,7 @@ class ModelSpace: def __init__( self, - model_subspaces: List[ModelSubspace], + model_subspaces: list[ModelSubspace], ): self.model_subspaces = { model_subspace.model_subspace_id: model_subspace @@ -146,7 +148,7 @@ def __init__( @staticmethod def from_files( - filenames: List[TYPE_PATH], + filenames: list[TYPE_PATH], ): """Create a model space from model space files. @@ -163,7 +165,7 @@ def from_files( ] model_subspaces = [] for model_space_df, model_space_filename in zip( - model_space_dfs, filenames + model_space_dfs, filenames, strict=False ): for model_subspace_id, definition in model_space_df.iterrows(): model_subspaces.append( @@ -218,8 +220,9 @@ def search( """ if candidate_space.limit.reached(): warnings.warn( - 'The candidate space has already reached its limit of accepted models.', + "The candidate space has already reached its limit of accepted models.", RuntimeWarning, + stacklevel=2, ) return candidate_space.models @@ -229,7 +232,7 @@ def search_subspaces(only_one_subspace: bool = False): # ID if only_one_subspace and len(self.model_subspaces) > 1: logging.warning( - f'There is more than one model subspace. This can lead to problems for candidate space {candidate_space}, especially if they have different PEtab YAML files.' + f"There is more than one model subspace. This can lead to problems for candidate space {candidate_space}, especially if they have different PEtab YAML files." ) for model_subspace in self.model_subspaces.values(): model_subspace.search( @@ -239,9 +242,9 @@ def search_subspaces(only_one_subspace: bool = False): break elif len(candidate_space.models) > limit: raise ValueError( - 'An unknown error has occurred. Too many models were ' - f'generated. Requested limit: {limit}. Number of ' - f'generated models: {len(candidate_space.models)}.' + "An unknown error has occurred. Too many models were " + f"generated. Requested limit: {limit}. Number of " + f"generated models: {len(candidate_space.models)}." ) search_subspaces() @@ -285,24 +288,24 @@ def exclude_model_hashes(self, model_hashes: Iterable[str]): def reset_exclusions( self, - exclusions: Optional[Union[List[Any], None]] = None, + exclusions: list[Any] | None | None = None, ) -> None: """Reset the exclusions in the model subspaces.""" for model_subspace in self.model_subspaces.values(): model_subspace.reset_exclusions(exclusions) -def get_model_space_df(df: Union[TYPE_PATH, pd.DataFrame]) -> pd.DataFrame: +def get_model_space_df(df: TYPE_PATH | pd.DataFrame) -> pd.DataFrame: # model_space_df = pd.read_csv(filename, sep='\t', index_col=MODEL_SUBSPACE_ID) # FIXME if isinstance(df, get_args(TYPE_PATH)): - df = pd.read_csv(df, sep='\t') + df = pd.read_csv(df, sep="\t") if df.index.name != MODEL_SUBSPACE_ID: df.set_index([MODEL_SUBSPACE_ID], inplace=True) return df def write_model_space_df(df: pd.DataFrame, filename: TYPE_PATH) -> None: - df.to_csv(filename, sep='\t', index=True) + df.to_csv(filename, sep="\t", index=True) # def get_model_space( diff --git a/petab_select/model_subspace.py b/petab_select/model_subspace.py index fc7847cd..1f077996 100644 --- a/petab_select/model_subspace.py +++ b/petab_select/model_subspace.py @@ -1,8 +1,9 @@ import math import warnings +from collections.abc import Iterable, Iterator from itertools import product from pathlib import Path -from typing import Any, Dict, Iterable, Iterator, List, Optional, Union +from typing import Any import numpy as np import pandas as pd @@ -27,7 +28,7 @@ from .petab import PetabMixin __all__ = [ - 'ModelSubspace', + "ModelSubspace", ] @@ -60,7 +61,7 @@ def __init__( model_subspace_id: str, petab_yaml: str, parameters: TYPE_PARAMETER_OPTIONS_DICT, - exclusions: Optional[Union[List[Any], None]] = None, + exclusions: list[Any] | None | None = None, ): self.model_subspace_id = model_subspace_id self.parameters = parameters @@ -95,20 +96,21 @@ def check_compatibility_stepwise_method( != str(self.petab_yaml.resolve()) ): warnings.warn( - 'The supplied candidate space is initialized with a model ' - 'that has a different PEtab YAML to this model subspace. ' - 'This is currently not supported for stepwise methods ' - '(e.g. forward or backward). ' - f'This model subspace: `{self.model_subspace_id}`. ' - 'This model subspace PEtab YAML: ' - f'`{self.petab_yaml}`. ' - 'The candidate space PEtab YAML: ' - f'`{candidate_space.predecessor_model.petab_yaml}`. ' + "The supplied candidate space is initialized with a model " + "that has a different PEtab YAML to this model subspace. " + "This is currently not supported for stepwise methods " + "(e.g. forward or backward). " + f"This model subspace: `{self.model_subspace_id}`. " + "This model subspace PEtab YAML: " + f"`{self.petab_yaml}`. " + "The candidate space PEtab YAML: " + f"`{candidate_space.predecessor_model.petab_yaml}`.", + stacklevel=2, ) return False return True - def get_models(self, estimated_parameters: List[str]) -> Iterator[Model]: + def get_models(self, estimated_parameters: list[str]) -> Iterator[Model]: """Get models in the subspace by estimated parameters. All models that have the provided ``estimated_parameters`` are returned. @@ -132,10 +134,10 @@ def get_models(self, estimated_parameters: List[str]) -> Iterator[Model]: """ if set(estimated_parameters).difference(self.parameters): raise ValueError( - 'Some parameter IDs were provided that are not in the model ' - 'subspace definition. NB: parameters that are only in the ' - 'PEtab parameters table should not be included here. ' - f'IDs: {set(estimated_parameters).difference(self.parameters)}' + "Some parameter IDs were provided that are not in the model " + "subspace definition. NB: parameters that are only in the " + "PEtab parameters table should not be included here. " + f"IDs: {set(estimated_parameters).difference(self.parameters)}" ) fixed_parameter_ids = [ parameter_id @@ -149,9 +151,9 @@ def get_models(self, estimated_parameters: List[str]) -> Iterator[Model]: ] if parameters_cannot_be_fixed_error: raise ValueError( - 'Models with the following fixed parameters were requested; ' - 'however, there is no such model in this subspace: ' - f'{parameters_cannot_be_fixed_error}.' + "Models with the following fixed parameters were requested; " + "however, there is no such model in this subspace: " + f"{parameters_cannot_be_fixed_error}." ) # Identify possible values for each of the fixed parameters. fixed_options = [ @@ -168,6 +170,7 @@ def get_models(self, estimated_parameters: List[str]) -> Iterator[Model]: zip( fixed_parameter_ids, fixed_parameter_values, + strict=False, ) ) parameters = { @@ -246,15 +249,13 @@ def continue_searching( # Should already be handled elsewhere (e.g. # `self.check_compatibility_stepwise_method`). raise NotImplementedError( - f'The default parameter set for a candidate space with the virtual initial model and method {candidate_space.method} is not implemented. Please report if this is desired.' + f"The default parameter set for a candidate space with the virtual initial model and method {candidate_space.method} is not implemented. Please report if this is desired." ) else: old_estimated_all = set() old_fixed_all = set() if isinstance(candidate_space.predecessor_model, Model): - old_estimated_all = ( - candidate_space.predecessor_model.get_estimated_parameter_ids_all() - ) + old_estimated_all = candidate_space.predecessor_model.get_estimated_parameter_ids_all() old_fixed_all = [ parameter_id for parameter_id in self.parameters_all @@ -288,7 +289,7 @@ def continue_searching( == new_must_estimate ): raise ValueError( - 'Unexpected error (sets that should be equal are not).' + "Unexpected error (sets that should be equal are not)." ) new_can_estimate_optional = ( set(self.can_estimate) @@ -484,7 +485,9 @@ def continue_searching( elif candidate_space.method == Method.BRUTE_FORCE: # TODO remove list? for parameterization in list(product(*self.parameters.values())): - parameters = dict(zip(self.parameters, parameterization)) + parameters = dict( + zip(self.parameters, parameterization, strict=False) + ) model = self.parameters_to_model(parameters) # Skip models that are excluded. if model is None: @@ -584,15 +587,15 @@ def continue_searching( else: raise NotImplementedError( - 'The requested method is not yet implemented in the model ' - f'subspace interface: `{candidate_space.method}`.' + "The requested method is not yet implemented in the model " + f"subspace interface: `{candidate_space.method}`." ) def send_model_to_candidate_space( self, model: Model, candidate_space: CandidateSpace, - exclude: Optional[bool] = False, + exclude: bool | None = False, # use_exclusions: Optional[bool] = True, ) -> bool: """Send a model to a candidate space for consideration. @@ -676,7 +679,7 @@ def excluded( def reset_exclusions( self, # TODO change typing with `List[Any]` to some `List[TYPE_MODEL_HASH]` - exclusions: Optional[Union[List[Any], None]] = None, + exclusions: list[Any] | None | None = None, ): self.exclusions = set() if exclusions is not None: @@ -684,8 +687,8 @@ def reset_exclusions( def reset( self, - exclusions: Optional[Union[List[Any], None]] = None, - limit: Optional[int] = None, + exclusions: list[Any] | None | None = None, + limit: int | None = None, ): self.reset_exclusions(exclusions=exclusions) if limit is not None: @@ -694,9 +697,9 @@ def reset( @staticmethod def from_definition( model_subspace_id: str, - definition: Union[Dict[str, str], pd.Series], + definition: dict[str, str] | pd.Series, parent_path: TYPE_PATH = None, - ) -> 'ModelSubspace': + ) -> "ModelSubspace": """Create a :class:`ModelSubspace` from a definition. Args: @@ -725,7 +728,7 @@ def from_definition( parameters=parameters, ) - def indices_to_model(self, indices: List[int]) -> Union[Model, None]: + def indices_to_model(self, indices: list[int]) -> Model | None: """Get a model from the subspace, by indices of possible parameter values. Model exclusions are handled here. @@ -753,7 +756,7 @@ def indices_to_model(self, indices: List[int]) -> Union[Model, None]: def indices_to_parameters( self, - indices: List[int], + indices: list[int], ) -> TYPE_PARAMETER_DICT: """Convert parameter indices to values. @@ -766,7 +769,9 @@ def indices_to_parameters( """ parameters = { parameter_id: self.parameters[parameter_id][index] - for parameter_id, index in zip(self.parameters, indices) + for parameter_id, index in zip( + self.parameters, indices, strict=False + ) } return parameters @@ -782,9 +787,9 @@ def parameters_to_indices(self, parameters: TYPE_PARAMETER_DICT): """ if set(self.parameters).symmetric_difference(parameters): raise ValueError( - 'Parameter IDs differ between the stored and provided ' - 'values: ' - f'{set(self.parameters).symmetric_difference(parameters)}' + "Parameter IDs differ between the stored and provided " + "values: " + f"{set(self.parameters).symmetric_difference(parameters)}" ) indices = [] for parameter_id, parameter_values in self.parameters.items(): @@ -792,8 +797,8 @@ def parameters_to_indices(self, parameters: TYPE_PARAMETER_DICT): index = parameter_values.index(parameters[parameter_id]) except ValueError: raise ValueError( - f'The following value for the parameter {parameter_id} is ' - f'not in the model subspace: {parameters[parameter_id]}.' + f"The following value for the parameter {parameter_id} is " + f"not in the model subspace: {parameters[parameter_id]}." ) indices.append(index) return indices @@ -801,7 +806,7 @@ def parameters_to_indices(self, parameters: TYPE_PARAMETER_DICT): def parameters_to_model( self, parameters: TYPE_PARAMETER_DICT, - ) -> Union[Model, None]: + ) -> Model | None: """Convert parameter values to a model. Args: @@ -826,7 +831,7 @@ def parameters_all(self) -> TYPE_PARAMETER_DICT: return {**self.petab_parameters, **self.parameters} @property - def can_fix(self) -> List[str]: + def can_fix(self) -> list[str]: """Parameters that can be fixed, according to the subspace. Parameters that are fixed as part of the PEtab problem are not @@ -843,7 +848,7 @@ def can_fix(self) -> List[str]: ] @property - def can_estimate(self) -> List[str]: + def can_estimate(self) -> list[str]: """Parameters that can be estimated, according to the subspace. Parameters that are estimated as part of the PEtab problem are not @@ -856,7 +861,7 @@ def can_estimate(self) -> List[str]: ] @property - def can_estimate_all(self) -> List[str]: + def can_estimate_all(self) -> list[str]: """All parameters than can be estimated in this subspace.""" return [ parameter_id @@ -865,7 +870,7 @@ def can_estimate_all(self) -> List[str]: ] @property - def must_fix(self) -> List[str]: + def must_fix(self) -> list[str]: """Subspace parameters that must be fixed. Parameters that are fixed as part of the PEtab problem are not @@ -878,7 +883,7 @@ def must_fix(self) -> List[str]: ] @property - def must_fix_all(self) -> List[str]: + def must_fix_all(self) -> list[str]: """All parameters that must be fixed in this subspace.""" return [ parameter_id @@ -887,7 +892,7 @@ def must_fix_all(self) -> List[str]: ] @property - def must_estimate(self) -> List[str]: + def must_estimate(self) -> list[str]: """Subspace parameters that must be estimated. Does not include parameters that are estimated in the PEtab @@ -900,7 +905,7 @@ def must_estimate(self) -> List[str]: ] @property - def must_estimate_all(self) -> List[str]: + def must_estimate_all(self) -> list[str]: """All parameters that must be estimated in this subspace.""" must_estimate_petab = [ parameter_id @@ -911,8 +916,8 @@ def must_estimate_all(self) -> List[str]: def get_estimated( self, - additional_parameters: Optional[TYPE_PARAMETER_DICT] = None, - ) -> List[str]: + additional_parameters: TYPE_PARAMETER_DICT | None = None, + ) -> list[str]: """Get the IDs of parameters that are estimated. Args: @@ -924,40 +929,42 @@ def get_estimated( The parameter IDs. """ raise NotImplementedError - parameters = [] - for parameter_id, parameter_value in self.parameters_all.items(): - if additional_parameters.get(parameter_id, None) == ESTIMATE: - parameters.append(parameter_id) - continue - if parameter_id in additional_parameters: - # Presumably not estimated. - continue - - old_estimated_all = { - parameter_id - for parameter_id, parameter_values in self.parameters_all.items() - if ( - # Predecessor model sets the parameter to be estimated - ( - candidate_space.predecessor_model.parameters.get( - parameter_id, None - ) - == ESTIMATE - ) - or ( - # Predecessor model takes the default PEtab parameter - parameter_id - not in candidate_space.predecessor_model.parameters - and - # And the default PEtab parameter is estimated - # The PEtab problem of this subspace and the - # `candidate_space` is the same, as verified earlier with - # `self.check_compatibility_stepwise_method`. - self.petab_parameters[parameter_id] == [ESTIMATE] - ) - ) - } + # parameters = [] + # for parameter_id, parameter_value in self.parameters_all.items(): + # if additional_parameters.get(parameter_id, None) == ESTIMATE: + # parameters.append(parameter_id) + # continue + # + # if parameter_id in additional_parameters: + # # Presumably not estimated. + # continue + # + # old_estimated_all = { + # parameter_id + # for parameter_id, parameter_values in self.parameters_all.items() + # if + # ( + # # Predecessor model sets the parameter to be estimated + # ( + # candidate_space.predecessor_model.parameters.get( + # parameter_id, None + # ) + # == ESTIMATE + # ) + # or ( + # # Predecessor model takes the default PEtab parameter + # parameter_id + # not in candidate_space.predecessor_model.parameters + # and + # # And the default PEtab parameter is estimated + # # The PEtab problem of this subspace and the + # # `candidate_space` is the same, as verified earlier with + # # `self.check_compatibility_stepwise_method`. + # self.petab_parameters[parameter_id] == [ESTIMATE] + # ) + # ) + # } def __len__(self) -> int: """Get the number of models in this subspace.""" @@ -967,7 +974,7 @@ def __len__(self) -> int: def decompress_parameter_values( - values: Union[float, int, str], + values: float | int | str, ) -> TYPE_PARAMETER_OPTIONS: """Decompress parameter values. @@ -980,7 +987,7 @@ def decompress_parameter_values( Returns: Parameter values, decompressed into a list. """ - if isinstance(values, (float, int)): + if isinstance(values, float | int): return [values] parameter_strings = list(values.split(PARAMETER_VALUE_DELIMITER)) diff --git a/petab_select/petab.py b/petab_select/petab.py index d121d4d6..8d370c8e 100644 --- a/petab_select/petab.py +++ b/petab_select/petab.py @@ -1,5 +1,4 @@ from pathlib import Path -from typing import List, Optional import petab.v1 as petab from more_itertools import one @@ -26,14 +25,14 @@ class PetabMixin: def __init__( self, - petab_yaml: Optional[TYPE_PATH] = None, - petab_problem: Optional[petab.Problem] = None, + petab_yaml: TYPE_PATH | None = None, + petab_problem: petab.Problem | None = None, parameters_as_lists: bool = False, ): if petab_yaml is None and petab_problem is None: raise ValueError( - 'Please supply at least one of either the location of the ' - 'PEtab problem YAML file, or an instance of the PEtab problem.' + "Please supply at least one of either the location of the " + "PEtab problem YAML file, or an instance of the PEtab problem." ) self.petab_yaml = petab_yaml if self.petab_yaml is not None: @@ -57,7 +56,7 @@ def __init__( } @property - def petab_parameter_ids_estimated(self) -> List[str]: + def petab_parameter_ids_estimated(self) -> list[str]: """Get the IDs of all estimated parameters. Returns: @@ -70,7 +69,7 @@ def petab_parameter_ids_estimated(self) -> List[str]: ] @property - def petab_parameter_ids_fixed(self) -> List[str]: + def petab_parameter_ids_fixed(self) -> list[str]: """Get the IDs of all fixed parameters. Returns: diff --git a/petab_select/problem.py b/petab_select/problem.py index c4829598..c7e20146 100644 --- a/petab_select/problem.py +++ b/petab_select/problem.py @@ -1,8 +1,9 @@ """The model selection problem class.""" -import abc + +from collections.abc import Callable, Iterable from functools import partial from pathlib import Path -from typing import Any, Callable, Dict, Iterable, Optional, Union +from typing import Any import yaml @@ -22,7 +23,7 @@ from .model_space import ModelSpace __all__ = [ - 'Problem', + "Problem", ] @@ -69,13 +70,13 @@ class Problem: def __init__( self, model_space: ModelSpace, - candidate_space_arguments: Dict[str, Any] = None, + candidate_space_arguments: dict[str, Any] = None, compare: Callable[[Model, Model], bool] = None, criterion: Criterion = None, problem_id: str = None, method: str = None, version: str = None, - yaml_path: Union[Path, str] = None, + yaml_path: Path | str = None, ): self.model_space = model_space self.criterion = criterion @@ -100,7 +101,7 @@ def __str__(self): f"Version: {self.version}\n" ) - def get_path(self, relative_path: Union[str, Path]) -> Path: + def get_path(self, relative_path: str | Path) -> Path: """Get the path to a resource, from a relative path. Args: @@ -145,8 +146,8 @@ def exclude_model_hashes( @staticmethod def from_yaml( - yaml_path: Union[str, Path], - ) -> 'Problem': + yaml_path: str | Path, + ) -> "Problem": """Generate a problem from a PEtab Select problem YAML file. Args: @@ -157,13 +158,13 @@ def from_yaml( A `Problem` instance. """ yaml_path = Path(yaml_path) - with open(yaml_path, 'r') as f: + with open(yaml_path) as f: problem_specification = yaml.safe_load(f) if not problem_specification.get(MODEL_SPACE_FILES, []): raise KeyError( - 'The model selection problem specification file is missing ' - 'model space files.' + "The model selection problem specification file is missing " + "model space files." ) model_space = ModelSpace.from_files( @@ -211,8 +212,8 @@ def from_yaml( def get_best( self, - models: Optional[Union[list[Model], dict[ModelHash, Model]]], - criterion: Optional[Union[str, None]] = None, + models: list[Model] | dict[ModelHash, Model] | None, + criterion: str | None | None = None, compute_criterion: bool = False, ) -> Model: """Get the best model from a collection of models. @@ -252,11 +253,11 @@ def get_best( best_model = model if best_model is None: raise KeyError( - f'None of the supplied models have a value set for the criterion {criterion}.' + f"None of the supplied models have a value set for the criterion {criterion}." ) return best_model - def model_hash_to_model(self, model_hash: Union[str, ModelHash]) -> Model: + def model_hash_to_model(self, model_hash: str | ModelHash) -> Model: """Get the model that matches a model hash. Args: diff --git a/petab_select/ui.py b/petab_select/ui.py index 6797bace..2fb73132 100644 --- a/petab_select/ui.py +++ b/petab_select/ui.py @@ -1,7 +1,6 @@ import copy -import logging from pathlib import Path -from typing import Any, Dict, List, Optional, Union +from typing import Any import numpy as np import petab.v1 as petab @@ -23,12 +22,12 @@ from .problem import Problem __all__ = [ - 'start_iteration', - 'end_iteration', - 'model_to_petab', - 'models_to_petab', - 'get_best', - 'write_summary_tsv', + "start_iteration", + "end_iteration", + "model_to_petab", + "models_to_petab", + "get_best", + "write_summary_tsv", ] @@ -42,14 +41,12 @@ def get_iteration(candidate_space: CandidateSpace) -> dict[str, Any]: def start_iteration( problem: Problem, - candidate_space: Optional[CandidateSpace] = None, - limit: Union[float, int] = np.inf, - limit_sent: Union[float, int] = np.inf, - excluded_hashes: Optional[list[ModelHash]] = None, - criterion: Optional[Criterion] = None, - user_calibrated_models: Optional[ - Union[list[Model], dict[ModelHash, Model]] - ] = None, + candidate_space: CandidateSpace | None = None, + limit: float | int = np.inf, + limit_sent: float | int = np.inf, + excluded_hashes: list[ModelHash] | None = None, + criterion: Criterion | None = None, + user_calibrated_models: list[Model] | dict[ModelHash, Model] | None = None, ) -> CandidateSpace: """Search the model space for candidate models. @@ -93,7 +90,6 @@ def start_iteration( :const:`petab_select.constants.MODELS`: The uncalibrated models of the current iteration. """ - """ FIXME(dilpath) - currently takes predecessor model from @@ -288,8 +284,8 @@ def end_iteration( def model_to_petab( model: Model, - output_path: Optional[TYPE_PATH] = None, -) -> Dict[str, Union[petab.Problem, TYPE_PATH]]: + output_path: TYPE_PATH | None = None, +) -> dict[str, petab.Problem | TYPE_PATH]: """Generate the PEtab problem for a model. Args: @@ -306,9 +302,9 @@ def model_to_petab( def models_to_petab( - models: List[Model], - output_path_prefix: Optional[List[TYPE_PATH]] = None, -) -> List[Dict[str, Union[petab.Problem, TYPE_PATH]]]: + models: list[Model], + output_path_prefix: list[TYPE_PATH] | None = None, +) -> list[dict[str, petab.Problem | TYPE_PATH]]: """Generate the PEtab problems for a list of models. Args: @@ -332,8 +328,8 @@ def models_to_petab( def get_best( problem: Problem, - models: List[Model], - criterion: Optional[Union[str, None]] = None, + models: list[Model], + criterion: str | None | None = None, ) -> Model: """Get the best model from a list of models. @@ -355,9 +351,9 @@ def get_best( def write_summary_tsv( problem: Problem, - candidate_space: Optional[CandidateSpace] = None, - previous_predecessor_model: Optional[Union[str, Model]] = None, - predecessor_model: Optional[Model] = None, + candidate_space: CandidateSpace | None = None, + previous_predecessor_model: str | Model | None = None, + predecessor_model: Model | None = None, ) -> None: if candidate_space.summary_tsv is None: return @@ -406,7 +402,7 @@ def write_summary_tsv( and isinstance(candidate_space.predecessor_model, Model) and candidate_space.predecessor_model.predecessor_model_hash is None ): - with open(candidate_space.summary_tsv, 'r') as f: + with open(candidate_space.summary_tsv) as f: if sum(1 for _ in f) > 1: method = Method.MOST_DISTANT diff --git a/petab_select/version.py b/petab_select/version.py index 6312a57b..86d0b954 100644 --- a/petab_select/version.py +++ b/petab_select/version.py @@ -1,2 +1,3 @@ """Version of the model selection extension for PEtab.""" -__version__ = '0.1.13' + +__version__ = "0.1.13" diff --git a/pyproject.toml b/pyproject.toml index 50c68ba9..c4cf16c5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,16 +1,97 @@ [build-system] -requires = [ - "setuptools>=42", - "wheel" -] +requires = ["setuptools>=64", "setuptools-scm>=8"] build-backend = "setuptools.build_meta" -[tool.black] -line-length = 79 -target-version = ['py37', 'py38', 'py39'] -skip-string-normalization = true +[project] +name = "petab_select" +dynamic = ["version"] +maintainers = [ + {name = "Dilan Pathirana", email = "dilan.pathirana@uni-bonn.de"}, +] +authors = [ + {name = "The PEtab Select developers"}, +] +description = "PEtab Select: an extension to PEtab for model selection." +readme = "README.md" +requires-python = ">=3.10" +license = {text = "BSD-3-Clause"} +dependencies = [ + # TODO minimum versions + "more-itertools", + "numpy", + "pandas", + "petab", + "pyyaml", + "click", + "dill", + # TODO: move to [vis]? not required for cli-only usage?! + "matplotlib>=2.2.3", + "toposort", + "networkx", +] +[project.optional-dependencies] +test = [ + "pytest >= 5.4.3", + "pytest-cov >= 2.10.0", + "amici >= 0.11.25", + "fides >= 0.7.5", + "pypesto > 0.2.13", + "tox >= 3.12.4", +] +doc = [ + "sphinx>=3.5.3,<7", + "sphinxcontrib-napoleon>=0.7", + "sphinx-markdown-tables>=0.0.15", + "sphinx-rtd-theme>=0.5.1", + "recommonmark>=0.7.1", + # pin until ubuntu comes with newer pandoc: + # /home/docs/checkouts/readthedocs.org/user_builds/petab-select/envs/63/lib/python3.11/site-packages/nbsphinx/__init__.py:1058: RuntimeWarning: You are using an unsupported version of pandoc (2.9.2.1). + # Your version must be at least (2.14.2) but less than (4.0.0). + "nbsphinx==0.9.1", + "nbconvert<7.5.0", + "ipython>=7.21.0", + "readthedocs-sphinx-ext>=2.2.5", + "sphinx-autodoc-typehints", +] + +[project.scripts] +petab_select = "petab_select.cli:cli" + +[tool.setuptools_scm] -[tool.isort] -profile = "black" -line_length = 79 -multi_line_output = 3 +[tool.ruff] +line-length = 79 +exclude = ["amici_models"] +extend-include = ["*.ipynb"] +lint.ignore = [ + # FIXME: we should be able to remove move of those + "D103", # Missing docstring in public function + "S101", # Use of assert detected + "E501", # Line too long + "F403", # star import + "F821", # undefined name + "T201", # print statement + "S301", # pickle module used + "S102", # Use of exec detected + "S307", # Use of possibly insecure function + "B006", + "E722", + "B904", + "B007", + "F841", +] +lint.select = [ + "F", # Pyflakes + "I", # isort + # "D", # pydocstyle (PEP 257) FIXME enable after https://github.com/PEtab-dev/petab_select/pull/67 + "S", # flake8-bandit + "B", # flake8-bugbear + "C4", # flake8-comprehensions + "T20", # flake8-print + "W", # pycodestyle Warnings + "E", # pycodestyle Errors + "UP", # pyupgrade + # "ANN", # flakes-annotations +] +[tool.ruff.lint.pydocstyle] +convention = "pep257" diff --git a/requirements_dev.txt b/requirements_dev.txt index cb8ddec6..ee8cc8cf 100644 --- a/requirements_dev.txt +++ b/requirements_dev.txt @@ -1,3 +1,4 @@ +git+https://github.com/ICB-DCM/pyPESTO.git@develop#egg=pypesto tox >= 3.12.4 pre-commit >= 2.10.1 flake8 >= 4.0.1 diff --git a/setup.py b/setup.py index b33526c8..64091987 100644 --- a/setup.py +++ b/setup.py @@ -1,11 +1,9 @@ -import os import re -import sys from setuptools import find_packages, setup -org = 'PEtab-dev' -repo = 'petab_select' +org = "PEtab-dev" +repo = "petab_select" def read(fname): @@ -15,12 +13,11 @@ def read(fname): def absolute_links(txt): """Replace relative petab github links by absolute links.""" - raw_base = f"(https://raw.githubusercontent.com/{org}/{repo}/main/" embedded_base = f"(https://github.com/{org}/{repo}/tree/main/" # iterate over links - for var in re.findall(r'\[.*?\]\((?!http).*?\)', txt): - if re.match(r'.*?.(png|svg)\)', var): + for var in re.findall(r"\[.*?\]\((?!http).*?\)", txt): + if re.match(r".*?.(png|svg)\)", var): # link to raw file rep = var.replace("(", raw_base) else: @@ -30,80 +27,12 @@ def absolute_links(txt): return txt -minimum_python_version = '3.10' -if sys.version_info < tuple(map(int, minimum_python_version.split('.'))): - sys.exit(f'PEtab Select requires Python >= {minimum_python_version}') - -# read version from file -__version__ = '' -version_file = os.path.join('petab_select', 'version.py') -# sets __version__ -exec(read(version_file)) # pylint: disable=W0122 # nosec - -ENTRY_POINTS = { - 'console_scripts': [ - 'petab_select = petab_select.cli:cli', - ] -} - # project metadata # noinspection PyUnresolvedReferences setup( - name='petab_select', - version=__version__, - description='PEtab Select: an extension to PEtab for model selection.', - long_description=absolute_links(read('README.md')), + long_description=absolute_links(read("README.md")), long_description_content_type="text/markdown", - # author='The PEtab Select developers', - # author_email='dilan.pathirana@uni-bonn.de', - url=f'https://github.com/{org}/{repo}', - packages=find_packages(exclude=['doc*', 'test*']), - install_requires=[ - # TODO minimum versions - 'more-itertools', - 'numpy', - 'pandas', - 'petab', - 'pyyaml', - #'python-libsbml>=5.17.0', - #'sympy', - # required for CLI - 'click', - 'dill', - # plot - 'matplotlib>=2.2.3', - #'seaborn', - 'toposort', - ], + url=f"https://github.com/{org}/{repo}", + packages=find_packages(exclude=["doc*", "test*"]), include_package_data=True, - python_requires=f'>={minimum_python_version}', - entry_points=ENTRY_POINTS, - extras_require={ - 'test': [ - 'pytest >= 5.4.3', - 'pytest-cov >= 2.10.0', - 'amici >= 0.11.25', - 'fides >= 0.7.5', - # FIXME - # 'pypesto > 0.2.13', - 'pypesto @ git+https://github.com/ICB-DCM/pyPESTO.git@select_use_old_calibrations#egg=pypesto', - 'tox >= 3.12.4', - 'flake8 >= 4.0.1', - ], - 'doc': [ - 'sphinx>=3.5.3,<7', - 'sphinxcontrib-napoleon>=0.7', - 'sphinx-markdown-tables>=0.0.15', - 'sphinx-rtd-theme>=0.5.1', - 'recommonmark>=0.7.1', - # pin until ubuntu comes with newer pandoc: - # /home/docs/checkouts/readthedocs.org/user_builds/petab-select/envs/63/lib/python3.11/site-packages/nbsphinx/__init__.py:1058: RuntimeWarning: You are using an unsupported version of pandoc (2.9.2.1). - # Your version must be at least (2.14.2) but less than (4.0.0). - 'nbsphinx==0.9.1', - 'nbconvert<7.5.0', - 'ipython>=7.21.0', - 'readthedocs-sphinx-ext>=2.2.5', - 'sphinx-autodoc-typehints', - ], - }, ) diff --git a/test/candidate_space/test_candidate_space.py b/test/candidate_space/test_candidate_space.py index 523f42d7..970a3e00 100644 --- a/test/candidate_space/test_candidate_space.py +++ b/test/candidate_space/test_candidate_space.py @@ -2,9 +2,6 @@ import pandas as pd import pytest -from more_itertools import one - -import petab_select # from petab_select.candidate_space import ( # BackwardCandidateSpace, @@ -14,39 +11,32 @@ # ) from petab_select.constants import ( ESTIMATE, - MODEL_SUBSPACE_ID, - MODELS, - PARAMETER_VALUE_DELIMITER, - PETAB_YAML, - Criterion, ) -from petab_select.model import Model, default_compare from petab_select.model_space import ModelSpace, get_model_space_df -from petab_select.model_subspace import ModelSubspace @pytest.fixture def ordered_model_parameterizations(): good_models_ascending = [ # forward - '00000', - '10000', - '11000', - '11100', - '11110', + "00000", + "10000", + "11000", + "11100", + "11110", # backward - '01110', - '01100', + "01110", + "01100", # forward - '01101', - '01111', + "01101", + "01111", # backward - '00111', - '00011', + "00111", + "00011", ] bad_models = [ - '01011', - '11011', + "01011", + "11011", ] # All good models are unique @@ -94,14 +84,14 @@ def model_space(calibrated_model_space) -> pd.DataFrame: data["model_subspace_id"].append(f"model_subspace_{model}") data["petab_yaml"].append( Path(__file__).parent.parent.parent - / 'doc' - / 'examples' - / 'model_selection' - / 'petab_problem.yaml' + / "doc" + / "examples" + / "model_selection" + / "petab_problem.yaml" + ) + k1, k2, k3, k4, k5 = ( + "0" if parameter == "0" else ESTIMATE for parameter in model ) - k1, k2, k3, k4, k5 = [ - '0' if parameter == '0' else ESTIMATE for parameter in model - ] data["k1"].append(k1) data["k2"].append(k2) data["k3"].append(k3) diff --git a/test/candidate_space/test_famos.py b/test/candidate_space/test_famos.py index 1076bcc7..e7cf12e7 100644 --- a/test/candidate_space/test_famos.py +++ b/test/candidate_space/test_famos.py @@ -1,13 +1,11 @@ from pathlib import Path -from typing import Tuple -import numpy as np import pandas as pd import pytest from more_itertools import one import petab_select -from petab_select import ESTIMATE, FamosCandidateSpace, Method, Model +from petab_select import Method from petab_select.constants import ( CANDIDATE_SPACE, MODEL_HASH, @@ -16,7 +14,6 @@ UNCALIBRATED_MODELS, Criterion, ) -from petab_select.model import default_compare @pytest.fixture @@ -99,9 +96,9 @@ def calibrate( value=expected_criterion_values[model.get_hash()], ) - def parse_summary_to_progress_list(summary_tsv: str) -> Tuple[Method, set]: + def parse_summary_to_progress_list(summary_tsv: str) -> tuple[Method, set]: """Get progress information from the summary file.""" - df_raw = pd.read_csv(summary_tsv, sep='\t') + df_raw = pd.read_csv(summary_tsv, sep="\t") df = df_raw.loc[~pd.isnull(df_raw["predecessor change"])] parameter_list = list( @@ -136,9 +133,10 @@ def parse_summary_to_progress_list(summary_tsv: str) -> Tuple[Method, set]: candidate_space.summary_tsv.unlink(missing_ok=True) candidate_space._setup_summary_tsv() - with pytest.raises( - StopIteration, match="No valid models found." - ), pytest.warns(RuntimeWarning) as warning_record: + with ( + pytest.raises(StopIteration, match="No valid models found."), + pytest.warns(RuntimeWarning) as warning_record, + ): while True: # Initialize iteration iteration = petab_select.ui.start_iteration( diff --git a/test/cli/test_cli.py b/test/cli/test_cli.py index c01982d3..0a4dc34d 100644 --- a/test/cli/test_cli.py +++ b/test/cli/test_cli.py @@ -12,22 +12,22 @@ @pytest.fixture def output_path() -> Path: - return base_dir / 'output' + return base_dir / "output" @pytest.fixture def expected_output_path() -> Path: - return base_dir / 'expected_output' + return base_dir / "expected_output" @pytest.fixture def model_yaml() -> Path: - return base_dir / 'input' / 'model.yaml' + return base_dir / "input" / "model.yaml" @pytest.fixture def models_yaml() -> Path: - return base_dir / 'input' / 'models.yaml' + return base_dir / "input" / "models.yaml" @pytest.fixture @@ -42,15 +42,15 @@ def test_model_to_petab( cli_runner, ) -> None: """Test conversion of a model to PEtab problem files.""" - output_path_model = output_path / 'model' + output_path_model = output_path / "model" output_path_model.mkdir(parents=True, exist_ok=True) result = cli_runner.invoke( petab_select.cli.model_to_petab, [ - '--model', + "--model", model_yaml, - '--output', + "--output", output_path_model, ], ) @@ -62,18 +62,18 @@ def test_model_to_petab( ) comparison = filecmp.dircmp( - expected_output_path / 'model', + expected_output_path / "model", output_path_model, ) # The PEtab problem files are as expected. assert not comparison.diff_files assert sorted(comparison.same_files) == [ - 'conditions.tsv', - 'measurements.tsv', - 'model.xml', - 'observables.tsv', - 'parameters.tsv', - 'problem.yaml', + "conditions.tsv", + "measurements.tsv", + "model.xml", + "observables.tsv", + "parameters.tsv", + "problem.yaml", ] @@ -84,15 +84,15 @@ def test_models_to_petab( cli_runner, ) -> None: """Test conversion of multiple models to PEtab problem files.""" - output_path_models = output_path / 'models' + output_path_models = output_path / "models" output_path_models.mkdir(parents=True, exist_ok=True) result = cli_runner.invoke( petab_select.cli.models_to_petab, [ - '--models', + "--models", models_yaml, - '--output', + "--output", output_path_models, ], ) @@ -105,46 +105,46 @@ def test_models_to_petab( ) comparison = filecmp.dircmp( - expected_output_path / 'models' / 'model_1', - output_path_models / 'model_1', + expected_output_path / "models" / "model_1", + output_path_models / "model_1", ) # The first set of PEtab problem files are as expected. assert not comparison.diff_files assert sorted(comparison.same_files) == [ - 'conditions.tsv', - 'measurements.tsv', - 'model.xml', - 'observables.tsv', - 'parameters.tsv', - 'problem.yaml', + "conditions.tsv", + "measurements.tsv", + "model.xml", + "observables.tsv", + "parameters.tsv", + "problem.yaml", ] comparison = filecmp.dircmp( - expected_output_path / 'models' / 'model_2', - output_path_models / 'model_2', + expected_output_path / "models" / "model_2", + output_path_models / "model_2", ) # The second set of PEtab problem files are as expected. assert not comparison.diff_files assert sorted(comparison.same_files) == [ - 'conditions.tsv', - 'measurements.tsv', - 'model.xml', - 'observables.tsv', - 'parameters.tsv', - 'problem.yaml', + "conditions.tsv", + "measurements.tsv", + "model.xml", + "observables.tsv", + "parameters.tsv", + "problem.yaml", ] comparison = filecmp.dircmp( - output_path_models / 'model_1', - output_path_models / 'model_2', + output_path_models / "model_1", + output_path_models / "model_2", ) # The first and second set of PEtab problems only differ in their # parameters table and nowhere else. - assert comparison.diff_files == ['parameters.tsv'] + assert comparison.diff_files == ["parameters.tsv"] assert sorted(comparison.same_files) == [ - 'conditions.tsv', - 'measurements.tsv', - 'model.xml', - 'observables.tsv', - 'problem.yaml', + "conditions.tsv", + "measurements.tsv", + "model.xml", + "observables.tsv", + "problem.yaml", ] diff --git a/test/model/test_model.py b/test/model/test_model.py index 1717b4d2..ffe0956d 100644 --- a/test/model/test_model.py +++ b/test/model/test_model.py @@ -10,22 +10,22 @@ @pytest.fixture def output_path() -> Path: - return base_dir / 'output' + return base_dir / "output" @pytest.fixture def expected_output_path() -> Path: - return base_dir / 'expected_output' + return base_dir / "expected_output" @pytest.fixture def model() -> Model: - return Model.from_yaml(base_dir / 'input' / 'model.yaml') + return Model.from_yaml(base_dir / "input" / "model.yaml") def test_model_to_petab(model, output_path, expected_output_path) -> None: """Test conversion of a model to a PEtab problem and files.""" - output_path_petab = output_path / 'petab' + output_path_petab = output_path / "petab" output_path_petab.mkdir(parents=True, exist_ok=True) # TODO test `petab_problem`? Shouldn't be necessary since the generated # files are tested below. @@ -34,16 +34,16 @@ def test_model_to_petab(model, output_path, expected_output_path) -> None: ) comparison = filecmp.dircmp( - expected_output_path / 'petab', + expected_output_path / "petab", output_path_petab, ) # The PEtab problem files are as expected. assert not comparison.diff_files assert sorted(comparison.same_files) == [ - 'conditions.tsv', - 'measurements.tsv', - 'model.xml', - 'observables.tsv', - 'parameters.tsv', - 'problem.yaml', + "conditions.tsv", + "measurements.tsv", + "model.xml", + "observables.tsv", + "parameters.tsv", + "problem.yaml", ] diff --git a/test/model_space/test_model_space.py b/test/model_space/test_model_space.py index a9260646..ace5560e 100644 --- a/test/model_space/test_model_space.py +++ b/test/model_space/test_model_space.py @@ -1,33 +1,26 @@ from pathlib import Path -from typing import List -import pandas as pd import pytest from petab_select.candidate_space import ( BackwardCandidateSpace, BruteForceCandidateSpace, ForwardCandidateSpace, - LateralCandidateSpace, ) from petab_select.constants import ( ESTIMATE, - MODEL_SUBSPACE_ID, - PARAMETER_VALUE_DELIMITER, - PETAB_YAML, Criterion, ) -from petab_select.model import Model from petab_select.model_space import ModelSpace base_dir = Path(__file__).parent @pytest.fixture -def model_space_files() -> List[Path]: +def model_space_files() -> list[Path]: return [ - base_dir / 'model_space_file_1.tsv', - base_dir / 'model_space_file_2.tsv', + base_dir / "model_space_file_1.tsv", + base_dir / "model_space_file_2.tsv", ] @@ -45,16 +38,16 @@ def test_model_space_forward_virtual(model_space): # fixed parameters as possible) in the model space. expected_models = [ ( - 'model_subspace_1', - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0.0}, + "model_subspace_1", + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.0}, ), ( - 'model_subspace_1', - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0.1}, + "model_subspace_1", + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.1}, ), ( - 'model_subspace_2', - {'k1': 0.0, 'k2': 0.0, 'k3': 0.0, 'k4': ESTIMATE}, + "model_subspace_2", + {"k1": 0.0, "k2": 0.0, "k3": 0.0, "k4": ESTIMATE}, ), ] @@ -64,14 +57,14 @@ def test_model_space_forward_virtual(model_space): ] # Search found only expected models. - assert all([model in expected_models for model in models]) + assert all(model in expected_models for model in models) # All expected models have now been added to the candidate space. - assert all([model in models for model in expected_models]) + assert all(model in models for model in expected_models) # Probably unnecessary: same number of models in expectation vs realization assert len(expected_models) == len(candidate_space.models) -@pytest.mark.filterwarnings('ignore:Model has been previously excluded') +@pytest.mark.filterwarnings("ignore:Model has been previously excluded") def test_model_space_backward_virtual(model_space): candidate_space = BackwardCandidateSpace(criterion=Criterion.NLLH) model_space.search(candidate_space) @@ -80,14 +73,14 @@ def test_model_space_backward_virtual(model_space): # initial model is used. This means the expected models are the "smallest" # models (as many fixed parameters as possible) in the model space. expected_models = [ - ('model_subspace_1', {f'k{i}': ESTIMATE for i in range(1, 5)}), + ("model_subspace_1", {f"k{i}": ESTIMATE for i in range(1, 5)}), # This model could be excluded, if the hashes/model comparisons enabled # identification of identical models between different subspaces. # TODO delete above, keep below comment, when implemented... # This model is not included because it is exactly the same as the # other model (same PEtab YAML and parameterization), hence has been # excluded from the candidate space. - ('model_subspace_3', {f'k{i}': ESTIMATE for i in range(1, 5)}), + ("model_subspace_3", {f"k{i}": ESTIMATE for i in range(1, 5)}), ] models = [ @@ -96,9 +89,9 @@ def test_model_space_backward_virtual(model_space): ] # Search found only expected models. - assert all([model in expected_models for model in models]) + assert all(model in expected_models for model in models) # All expected models have now been added to the candidate space. - assert all([model in models for model in expected_models]) + assert all(model in models for model in expected_models) # Probably unnecessary: same number of models in expectation vs realization assert len(expected_models) == len(candidate_space.models) @@ -111,56 +104,56 @@ def test_model_space_brute_force_limit(model_space): # result in all models except the last two models in the last model subspace. expected_models = [ ( - 'model_subspace_1', - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0.0}, + "model_subspace_1", + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.0}, ), ( - 'model_subspace_1', - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0.1}, + "model_subspace_1", + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.1}, ), ( - 'model_subspace_1', - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': ESTIMATE}, + "model_subspace_1", + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": ESTIMATE}, ), ( - 'model_subspace_1', - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0.0}, + "model_subspace_1", + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.0}, ), ( - 'model_subspace_1', - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0.1}, + "model_subspace_1", + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.1}, ), ( - 'model_subspace_1', - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': ESTIMATE}, + "model_subspace_1", + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": ESTIMATE}, ), ( - 'model_subspace_1', - {'k1': ESTIMATE, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0.0}, + "model_subspace_1", + {"k1": ESTIMATE, "k2": 0.1, "k3": ESTIMATE, "k4": 0.0}, ), ( - 'model_subspace_1', - {'k1': ESTIMATE, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0.1}, + "model_subspace_1", + {"k1": ESTIMATE, "k2": 0.1, "k3": ESTIMATE, "k4": 0.1}, ), ( - 'model_subspace_1', - {'k1': ESTIMATE, 'k2': 0.1, 'k3': ESTIMATE, 'k4': ESTIMATE}, + "model_subspace_1", + {"k1": ESTIMATE, "k2": 0.1, "k3": ESTIMATE, "k4": ESTIMATE}, ), ( - 'model_subspace_1', - {'k1': ESTIMATE, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0.0}, + "model_subspace_1", + {"k1": ESTIMATE, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.0}, ), ( - 'model_subspace_1', - {'k1': ESTIMATE, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0.1}, + "model_subspace_1", + {"k1": ESTIMATE, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.1}, ), ( - 'model_subspace_1', - {'k1': ESTIMATE, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': ESTIMATE}, + "model_subspace_1", + {"k1": ESTIMATE, "k2": ESTIMATE, "k3": ESTIMATE, "k4": ESTIMATE}, ), ( - 'model_subspace_2', - {'k1': 0.0, 'k2': 0.0, 'k3': 0.0, 'k4': ESTIMATE}, + "model_subspace_2", + {"k1": 0.0, "k2": 0.0, "k3": 0.0, "k4": ESTIMATE}, ), ] @@ -170,14 +163,14 @@ def test_model_space_brute_force_limit(model_space): ] # Search found only expected models. - assert all([model in expected_models for model in models]) + assert all(model in expected_models for model in models) # All expected models have now been added to the candidate space. - assert all([model in models for model in expected_models]) + assert all(model in models for model in expected_models) # Probably unnecessary: same number of models in expectation vs realization assert len(expected_models) == len(candidate_space.models) -''' +""" @pytest.fixture def e(): return ESTIMATE @@ -262,4 +255,4 @@ def test_distance(model_space, e): model_space.reset() neighbors = model_space.neighbors(brute_force_candidate_space) assert len(neighbors) == 16 -''' +""" diff --git a/test/model_subspace/test_model_subspace.py b/test/model_subspace/test_model_subspace.py index 167ac6f3..5d3d6de9 100644 --- a/test/model_subspace/test_model_subspace.py +++ b/test/model_subspace/test_model_subspace.py @@ -13,7 +13,6 @@ ) from petab_select.constants import ( ESTIMATE, - MODEL_SUBSPACE_ID, PARAMETER_VALUE_DELIMITER, PETAB_YAML, Criterion, @@ -24,17 +23,17 @@ @pytest.fixture def model_subspace_id_and_definition() -> pd.Series: - model_subspace_id = 'model_subspace_1' + model_subspace_id = "model_subspace_1" data = { PETAB_YAML: Path(__file__).parent.parent.parent - / 'doc' - / 'examples' - / 'model_selection' - / 'petab_problem.yaml', - 'k1': 0.2, - 'k2': PARAMETER_VALUE_DELIMITER.join(['0.1', ESTIMATE]), - 'k3': ESTIMATE, - 'k4': PARAMETER_VALUE_DELIMITER.join(['0', '0.1', ESTIMATE]), + / "doc" + / "examples" + / "model_selection" + / "petab_problem.yaml", + "k1": 0.2, + "k2": PARAMETER_VALUE_DELIMITER.join(["0.1", ESTIMATE]), + "k3": ESTIMATE, + "k4": PARAMETER_VALUE_DELIMITER.join(["0", "0.1", ESTIMATE]), } return model_subspace_id, pd.Series(data=data, dtype=str) @@ -50,16 +49,16 @@ def model_subspace(model_subspace_id_and_definition) -> ModelSubspace: @pytest.fixture def initial_model(model_subspace) -> Model: - estimated_parameters = ['k3', 'k4'] + estimated_parameters = ["k3", "k4"] model = one( model_subspace.get_models(estimated_parameters=estimated_parameters) ) # Initial model is parameterized as expected. assert model.parameters == { - 'k1': 0.2, - 'k2': 0.1, - 'k3': ESTIMATE, - 'k4': ESTIMATE, + "k1": 0.2, + "k2": 0.1, + "k3": ESTIMATE, + "k4": ESTIMATE, } return model @@ -67,50 +66,46 @@ def initial_model(model_subspace) -> Model: def test_from_definition(model_subspace): """A model subspace definition is parsed correctly.""" # Model subspace ID is parsed - assert model_subspace.model_subspace_id == 'model_subspace_1' + assert model_subspace.model_subspace_id == "model_subspace_1" # PEtab YAML is parsed assert model_subspace.petab_yaml.samefile( Path(__file__).parent.parent.parent - / 'doc' - / 'examples' - / 'model_selection' - / 'petab_problem.yaml', + / "doc" + / "examples" + / "model_selection" + / "petab_problem.yaml", ) # Fixed parameters are parsed - assert model_subspace.parameters['k1'] == [0.2] + assert model_subspace.parameters["k1"] == [0.2] # Parameters with multiple values are parsed assert ( - 0.1 in model_subspace.parameters['k2'] - and ESTIMATE in model_subspace.parameters['k2'] + 0.1 in model_subspace.parameters["k2"] + and ESTIMATE in model_subspace.parameters["k2"] ) # Estimated parameters are parsed - assert model_subspace.parameters['k3'] == [ESTIMATE] + assert model_subspace.parameters["k3"] == [ESTIMATE] def test_get_models(model_subspace): """The getter for models with specific estimated parameters works.""" - estimated_parameters = ['k2', 'k3'] + estimated_parameters = ["k2", "k3"] models = list( model_subspace.get_models(estimated_parameters=estimated_parameters) ) expected_parameterizations = [ - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0.0}, - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0.1}, + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.0}, + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.1}, ] test_parameterizations = [model.parameters for model in models] # Getter gets only expected models. assert all( - [ - test_parameterization in expected_parameterizations - for test_parameterization in expected_parameterizations - ] + test_parameterization in expected_parameterizations + for test_parameterization in expected_parameterizations ) # Getter gets all expected models. assert all( - [ - expected_parameterization in test_parameterizations - for expected_parameterization in expected_parameterizations - ] + expected_parameterization in test_parameterizations + for expected_parameterization in expected_parameterizations ) @@ -125,10 +120,10 @@ def test_search_forward(model_subspace, initial_model): assert len(candidate_space.models) == 1 # The one model has the expected parameterization. expected_parameterization = { - 'k1': 0.2, - 'k2': ESTIMATE, - 'k3': ESTIMATE, - 'k4': ESTIMATE, + "k1": 0.2, + "k2": ESTIMATE, + "k3": ESTIMATE, + "k4": ESTIMATE, } assert one(candidate_space.models).parameters == expected_parameterization @@ -156,25 +151,21 @@ def test_search_backward(model_subspace, initial_model): # Only two models are possible in the backward direction. assert len(candidate_space.models) == 2 expected_parameterizations = [ - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0}, - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0.1}, + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0}, + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.1}, ] test_parameterizations = [ model.parameters for model in candidate_space.models ] # Search found only expected models. assert all( - [ - test_parameterization in expected_parameterizations - for test_parameterization in test_parameterizations - ] + test_parameterization in expected_parameterizations + for test_parameterization in test_parameterizations ) # Search found all expected models. assert all( - [ - expected_parameterization in test_parameterizations - for expected_parameterization in expected_parameterizations - ] + expected_parameterization in test_parameterizations + for expected_parameterization in expected_parameterizations ) # Test limit via model subspace @@ -199,29 +190,25 @@ def test_search_brute_force(model_subspace): assert len(candidate_space.models) == 6 expected_parameterizations = [ - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0}, - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': 0.1}, - {'k1': 0.2, 'k2': 0.1, 'k3': ESTIMATE, 'k4': ESTIMATE}, - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0}, - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0.1}, - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': ESTIMATE}, + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0}, + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": 0.1}, + {"k1": 0.2, "k2": 0.1, "k3": ESTIMATE, "k4": ESTIMATE}, + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0}, + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.1}, + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": ESTIMATE}, ] test_parameterizations = [ model.parameters for model in candidate_space.models ] # Search found only expected models. assert all( - [ - test_parameterization in expected_parameterizations - for test_parameterization in test_parameterizations - ] + test_parameterization in expected_parameterizations + for test_parameterization in test_parameterizations ) # Search found all expected models. assert all( - [ - expected_parameterization in test_parameterizations - for expected_parameterization in expected_parameterizations - ] + expected_parameterization in test_parameterizations + for expected_parameterization in expected_parameterizations ) limit_accepted_candidates = 3 @@ -257,19 +244,15 @@ def test_search_brute_force(model_subspace): assert len(candidate_space.models) == limit_accepted_candidates # Search found only expected models. assert all( - [ - test_parameterization in expected_parameterizations - for test_parameterization in test_parameterizations - ] + test_parameterization in expected_parameterizations + for test_parameterization in test_parameterizations ) # Test exclusions: all models have now been added to the candidate space. # TODO ideally with only 3 additional calls to `candidate_space.consider`, assuming # the model_subspace excluded the first three models it had already sent. assert all( - [ - expected_parameterization in test_parameterizations - for expected_parameterization in expected_parameterizations - ] + expected_parameterization in test_parameterizations + for expected_parameterization in expected_parameterizations ) # Test limit via model subspace @@ -296,25 +279,21 @@ def test_search_swap(model_subspace, initial_model): assert len(candidate_space.models) == 2 # The two models have the expected parameterization. expected_parameterizations = [ - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0}, - {'k1': 0.2, 'k2': ESTIMATE, 'k3': ESTIMATE, 'k4': 0.1}, + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0}, + {"k1": 0.2, "k2": ESTIMATE, "k3": ESTIMATE, "k4": 0.1}, ] test_parameterizations = [ model.parameters for model in candidate_space.models ] # Search found only expected models. assert all( - [ - test_parameterization in expected_parameterizations - for test_parameterization in test_parameterizations - ] + test_parameterization in expected_parameterizations + for test_parameterization in test_parameterizations ) # Search found all expected models. assert all( - [ - expected_parameterization in test_parameterizations - for expected_parameterization in expected_parameterizations - ] + expected_parameterization in test_parameterizations + for expected_parameterization in expected_parameterizations ) # Test limit via model subspace diff --git a/test/pypesto/generate_expected_models.py b/test/pypesto/generate_expected_models.py index 081b9dbe..7d68bd7d 100644 --- a/test/pypesto/generate_expected_models.py +++ b/test/pypesto/generate_expected_models.py @@ -2,15 +2,11 @@ from pathlib import Path import fides -import pandas as pd import pypesto.engine import pypesto.optimize import pypesto.select -from more_itertools import one import petab_select -from petab_select import Model -from petab_select.constants import CRITERIA, ESTIMATED_PARAMETERS, MODEL SKIP_TEST_CASES_WITH_PREEXISTING_EXPECTED_MODEL = False os.environ["AMICI_EXPERIMENTAL_SBML_NONCONST_CLS"] = "1" @@ -23,20 +19,20 @@ # Do not use computationally-expensive test cases in CI skip_test_cases = [ - '0009', + "0009", ] -test_cases_path = Path(__file__).resolve().parent.parent.parent / 'test_cases' +test_cases_path = Path(__file__).resolve().parent.parent.parent / "test_cases" # Reduce runtime but with high reproducibility minimize_options = { - 'n_starts': 24, - 'optimizer': pypesto.optimize.FidesOptimizer( + "n_starts": 24, + "optimizer": pypesto.optimize.FidesOptimizer( verbose=0, hessian_update=fides.BFGS() ), - 'engine': pypesto.engine.MultiProcessEngine(), - 'filename': None, - 'progress_bar': False, + "engine": pypesto.engine.MultiProcessEngine(), + "filename": None, + "progress_bar": False, } @@ -47,14 +43,14 @@ def objective_customizer(obj): # Indentation to match `test_pypesto.py`, to make it easier to keep files similar. if True: - for test_case_path in test_cases_path.glob('*'): + for test_case_path in test_cases_path.glob("*"): if test_cases and test_case_path.stem not in test_cases: continue if test_case_path.stem in skip_test_cases: continue - expected_model_yaml = test_case_path / 'expected.yaml' + expected_model_yaml = test_case_path / "expected.yaml" if ( SKIP_TEST_CASES_WITH_PREEXISTING_EXPECTED_MODEL @@ -62,11 +58,11 @@ def objective_customizer(obj): ): # Skip test cases that already have an expected model. continue - print(f'Running test case {test_case_path.stem}') + print(f"Running test case {test_case_path.stem}") # Setup the pyPESTO model selector instance. petab_select_problem = petab_select.Problem.from_yaml( - test_case_path / 'petab_select_problem.yaml', + test_case_path / "petab_select_problem.yaml", ) pypesto_select_problem = pypesto.select.Problem( petab_select_problem=petab_select_problem diff --git a/test/pypesto/regenerate_model_hashes.py b/test/pypesto/regenerate_model_hashes.py index 9c52487a..95fdfb2a 100644 --- a/test/pypesto/regenerate_model_hashes.py +++ b/test/pypesto/regenerate_model_hashes.py @@ -1,6 +1,5 @@ from pathlib import Path -import pandas as pd import yaml import petab_select @@ -12,16 +11,16 @@ PREDECESSOR_MODEL_HASH, ) -test_cases_path = Path(__file__).resolve().parent.parent.parent / 'test_cases' +test_cases_path = Path(__file__).resolve().parent.parent.parent / "test_cases" -for test_case_path in test_cases_path.glob('*'): +for test_case_path in test_cases_path.glob("*"): petab_select_problem = petab_select.Problem.from_yaml( - test_case_path / 'petab_select_problem.yaml', + test_case_path / "petab_select_problem.yaml", ) - expected_model_yaml = test_case_path / 'expected.yaml' + expected_model_yaml = test_case_path / "expected.yaml" - with open(expected_model_yaml, "r") as f: + with open(expected_model_yaml) as f: model_dict = yaml.safe_load(f) model = petab_select_problem.model_space.model_subspaces[ diff --git a/test/pypesto/test_pypesto.py b/test/pypesto/test_pypesto.py index abae6698..d64de876 100644 --- a/test/pypesto/test_pypesto.py +++ b/test/pypesto/test_pypesto.py @@ -1,25 +1,18 @@ import os from pathlib import Path -import fides import numpy as np import pandas as pd import pypesto.engine import pypesto.optimize import pypesto.select import pytest -from more_itertools import one import petab_select from petab_select import Model from petab_select.constants import ( - CANDIDATE_SPACE, CRITERIA, ESTIMATED_PARAMETERS, - MODEL, - MODELS, - Criterion, - Method, ) os.environ["AMICI_EXPERIMENTAL_SBML_NONCONST_CLS"] = "1" @@ -33,17 +26,17 @@ # Do not use computationally-expensive test cases in CI skip_test_cases = [ - '0009', + "0009", ] -test_cases_path = Path(__file__).resolve().parent.parent.parent / 'test_cases' +test_cases_path = Path(__file__).resolve().parent.parent.parent / "test_cases" # Reduce runtime but with high reproducibility minimize_options = { - 'n_starts': 10, - 'engine': pypesto.engine.MultiProcessEngine(), - 'filename': None, - 'progress_bar': False, + "n_starts": 10, + "engine": pypesto.engine.MultiProcessEngine(), + "filename": None, + "progress_bar": False, } @@ -66,10 +59,10 @@ def test_pypesto(test_case_path_stem): pytest.skip("Test marked to be skipped.") test_case_path = test_cases_path / test_case_path_stem - expected_model_yaml = test_case_path / 'expected.yaml' + expected_model_yaml = test_case_path / "expected.yaml" # Setup the pyPESTO model selector instance. petab_select_problem = petab_select.Problem.from_yaml( - test_case_path / 'petab_select_problem.yaml', + test_case_path / "petab_select_problem.yaml", ) pypesto_select_problem = pypesto.select.Problem( petab_select_problem=petab_select_problem diff --git a/test/ui/test_ui.py b/test/ui/test_ui.py index 5cf19056..dcfaac7f 100644 --- a/test/ui/test_ui.py +++ b/test/ui/test_ui.py @@ -1,28 +1,14 @@ from pathlib import Path -from typing import Tuple -import numpy as np -import pandas as pd import pytest from more_itertools import one import petab_select -from petab_select import ( - ESTIMATE, - FamosCandidateSpace, - ForwardCandidateSpace, - Method, - Model, -) from petab_select.constants import ( CANDIDATE_SPACE, - MODEL_HASH, MODELS, - TERMINATE, UNCALIBRATED_MODELS, - Criterion, ) -from petab_select.model import default_compare @pytest.fixture @@ -39,7 +25,7 @@ def petab_select_problem(): def test_user_calibrated_models(petab_select_problem): """Test handling of user-calibrated models.""" model_M1_2 = petab_select_problem.model_space.model_subspaces[ - 'M1_2' + "M1_2" ].indices_to_model((0, 0, 0)) model_M1_2.set_criterion( criterion=petab_select_problem.criterion, value=12.3 diff --git a/tox.ini b/tox.ini index 264971a9..139c8b0d 100644 --- a/tox.ini +++ b/tox.ini @@ -18,23 +18,10 @@ description = [testenv:base] extras = test +deps = + git+https://github.com/ICB-DCM/pyPESTO.git@develop\#egg=pypesto commands = pytest --cov=petab_select --cov-report=xml --cov-append test -s coverage report description = Test basic functionality - -[testenv:flake8] -skip_install = true -deps = - flake8 >= 3.8.3 - flake8-bandit >= 2.1.2 - flake8-bugbear >= 20.1.4 - flake8-colors >= 0.1.6 - flake8-comprehensions >= 3.2.3 - flake8-print >= 3.1.4 - flake8-docstrings >= 1.6.0 -commands = - flake8 petab_select test setup.py -description = - Run flake8 with various plugins.