From 41670f0663dd0286bb030354599acc9daf49716a Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Feb 2025 22:41:44 -0800 Subject: [PATCH 01/14] Update ruff action --- .github/workflows/ruff.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 1933fa75e..078a243cc 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -8,4 +8,4 @@ jobs: - name: Checkout repo uses: actions/checkout@v4 - name: Run ruff - uses: chartboost/ruff-action@v1 + uses: astral-sh/ruff-action@v1 From 61853c7fe2e11ef3615e00ca1f966df76193c85b Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Feb 2025 22:47:29 -0800 Subject: [PATCH 02/14] Move min reqs testing info to pyproject.toml --- .github/PULL_REQUEST_TEMPLATE/release.md | 3 +-- .github/dependabot.yml | 11 ----------- docs/source/software_process.rst | 6 +++--- pyproject.toml | 24 ++++++++++++++++-------- requirements-min.txt | 10 ---------- tox.ini | 4 +--- 6 files changed, 21 insertions(+), 37 deletions(-) delete mode 100644 requirements-min.txt diff --git a/.github/PULL_REQUEST_TEMPLATE/release.md b/.github/PULL_REQUEST_TEMPLATE/release.md index 82f987164..569c694da 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release.md +++ b/.github/PULL_REQUEST_TEMPLATE/release.md @@ -2,8 +2,7 @@ Prepare for release of HDMF [version] ### Before merging: - [ ] Make sure all PRs to be included in this release have been merged to `dev`. -- [ ] Major and minor releases: Update dependency ranges in `pyproject.toml` and minimums in - `requirements-min.txt` as needed. +- [ ] Major and minor releases: Update dependency ranges in `pyproject.toml` as needed. - [ ] Check legal file dates and information in `Legal.txt`, `license.txt`, `README.rst`, `docs/source/conf.py`, and any other locations as needed - [ ] Update `pyproject.toml` as needed diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 24615639c..d202a332d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,16 +1,5 @@ version: 2 updates: - # disable checking python requirements files because there are too - # many updates and dependabot will not ignore requirements-min.txt - # until https://github.com/dependabot/dependabot-core/issues/2883 is resolved - # workaround is to continue updating these files manually - - # - package-ecosystem: "pip" - # directory: "/" - # schedule: - # # Check for updates to requirements files and pyproject.toml every week - # interval: "weekly" - - package-ecosystem: "github-actions" directory: "/" schedule: diff --git a/docs/source/software_process.rst b/docs/source/software_process.rst index f3a6c7457..bf765a062 100644 --- a/docs/source/software_process.rst +++ b/docs/source/software_process.rst @@ -45,8 +45,9 @@ pyproject.toml_ contains a list of package dependencies and their version ranges running HDMF. As a library, upper bound version constraints create more harm than good in the long term (see this `blog post`_) so we avoid setting upper bounds on requirements. -When setting lower bounds, make sure to specify the lower bounds in both pyproject.toml_ and -requirements-min.txt_. The latter is used in automated testing to ensure that the package runs +When setting lower bounds, make sure to specify the lower bounds in the ``[project] dependencies`` key and +``[project.optional-dependencies] min-reqs`` key in pyproject.toml_. +The latter is used in automated testing to ensure that the package runs correctly using the minimum versions of dependencies. Minimum requirements should be updated manually if a new feature or bug fix is added in a dependency that is required @@ -56,7 +57,6 @@ minimum version to be as high as it is. .. _pyproject.toml: https://github.com/hdmf-dev/hdmf/blob/dev/pyproject.toml .. _blog post: https://iscinumpy.dev/post/bound-version-constraints/ -.. _requirements-min.txt: https://github.com/hdmf-dev/hdmf/blob/dev/requirements-min.txt -------------------- Testing Requirements diff --git a/pyproject.toml b/pyproject.toml index 5a58b6cef..7ccdfa2f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ authors = [ description = "A hierarchical data modeling framework for modern science data standards" readme = "README.rst" requires-python = ">=3.9" -license = {text = "BSD-3-Clause"} +license = "BSD-3-Clause" classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3.9", @@ -29,6 +29,7 @@ classifiers = [ "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering :: Medical Science Apps.", ] +# make sure to update min-reqs dependencies below when these lower bounds change dependencies = [ "h5py>=3.1.0", "jsonschema>=3.2.0", @@ -39,6 +40,7 @@ dependencies = [ dynamic = ["version"] [project.optional-dependencies] +# make sure to update min-reqs dependencies below when these lower bounds change tqdm = ["tqdm>=4.41.0"] zarr = ["zarr>=2.12.0,<3"] sparse = ["scipy>=1.7"] @@ -72,6 +74,18 @@ docs = [ # all possible dependencies all = ["hdmf[tqdm,zarr,sparse,termset,test,docs]"] +# minimum requirements of project dependencies for testing (see .github/workflows/run_all_tests.yml) +min-reqs = [ + "h5py==3.1.0", + "jsonschema==3.2.0", + "numpy==1.19.3", + "pandas==1.2.0", + "ruamel.yaml==0.16.0", + "scipy==1.7.0", + "tqdm==4.41.0", + "zarr==2.12.0", +] + [project.urls] "Homepage" = "https://github.com/hdmf-dev/hdmf" "Bug Tracker" = "https://github.com/hdmf-dev/hdmf/issues" @@ -141,13 +155,8 @@ omit = [ # force-exclude = "src/hdmf/common/hdmf-common-schema|docs/gallery" [tool.ruff] -lint.select = ["E", "F", "T100", "T201", "T203"] +lint.select = ["E", "F", "T100", "T201", "T203", "C901"] exclude = [ - ".git", - ".tox", - "__pycache__", - "build/", - "dist/", "src/hdmf/common/hdmf-common-schema", "docs/source/conf.py", "src/hdmf/_due.py", @@ -160,7 +169,6 @@ line-length = 120 [tool.ruff.lint.per-file-ignores] "docs/gallery/*" = ["E402", "T201"] "src/*/__init__.py" = ["F401"] -"setup.py" = ["T201"] "test_gallery.py" = ["T201"] [tool.ruff.lint.mccabe] diff --git a/requirements-min.txt b/requirements-min.txt deleted file mode 100644 index a9fbeb93e..000000000 --- a/requirements-min.txt +++ /dev/null @@ -1,10 +0,0 @@ -# minimum versions of package dependencies for installing HDMF -# NOTE: these should match the minimum bound for dependencies in pyproject.toml -h5py==3.1.0 -jsonschema==3.2.0 -numpy==1.19.3 -pandas==1.2.0 -ruamel.yaml==0.16.0 -scipy==1.7.0 -tqdm==4.41.0 -zarr==2.12.0 diff --git a/tox.ini b/tox.ini index 4caa68a4b..37171acb1 100644 --- a/tox.ini +++ b/tox.ini @@ -22,14 +22,12 @@ install_command = minimum, wheelinstall: python -I -m pip install {opts} {packages} upgraded: python -I -m pip install -U {opts} {packages} prerelease: python -I -m pip install -U --pre {opts} {packages} -deps = - # which requirements files to use (default: none) - minimum: -r requirements-min.txt extras = # which optional dependency set(s) to use (default: none) pytest: test gallery: doc optional: tqdm,sparse,zarr,termset + minimum: min-reqs commands = # commands to run for every environment python --version # print python version for debugging From 1d5322910dc82eff0a0d744a4351f413f338d886 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Feb 2025 22:47:49 -0800 Subject: [PATCH 03/14] Update github-app-token action --- .github/workflows/project_action.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/project_action.yml b/.github/workflows/project_action.yml index 6037bd4ab..148d4d41d 100644 --- a/.github/workflows/project_action.yml +++ b/.github/workflows/project_action.yml @@ -12,10 +12,10 @@ jobs: steps: - name: GitHub App token id: generate_token - uses: tibdex/github-app-token@v2 + uses: actions/create-github-app-token@v1 with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PEM }} + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PEM }} - name: Add to Developer Board env: From 59e60193814bc3e090a3d296937b03b9ffcd342d Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 8 Feb 2025 07:08:47 +0000 Subject: [PATCH 04/14] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- docs/source/software_process.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/source/software_process.rst b/docs/source/software_process.rst index bf765a062..96bf470e7 100644 --- a/docs/source/software_process.rst +++ b/docs/source/software_process.rst @@ -45,8 +45,8 @@ pyproject.toml_ contains a list of package dependencies and their version ranges running HDMF. As a library, upper bound version constraints create more harm than good in the long term (see this `blog post`_) so we avoid setting upper bounds on requirements. -When setting lower bounds, make sure to specify the lower bounds in the ``[project] dependencies`` key and -``[project.optional-dependencies] min-reqs`` key in pyproject.toml_. +When setting lower bounds, make sure to specify the lower bounds in the ``[project] dependencies`` key and +``[project.optional-dependencies] min-reqs`` key in pyproject.toml_. The latter is used in automated testing to ensure that the package runs correctly using the minimum versions of dependencies. From 82294d80f1116e38c84f55994ec977e11e58ec4d Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Feb 2025 23:37:54 -0800 Subject: [PATCH 05/14] Reduce complexity of DynamicTable.add_row --- src/hdmf/common/table.py | 52 +++++++++++++++++++++++----------------- 1 file changed, 30 insertions(+), 22 deletions(-) diff --git a/src/hdmf/common/table.py b/src/hdmf/common/table.py index 2f6401672..d4697f487 100644 --- a/src/hdmf/common/table.py +++ b/src/hdmf/common/table.py @@ -638,26 +638,9 @@ def __len__(self): """Number of rows in the table""" return len(self.id) - @docval({'name': 'data', 'type': dict, 'doc': 'the data to put in this row', 'default': None}, - {'name': 'id', 'type': int, 'doc': 'the ID for the row', 'default': None}, - {'name': 'enforce_unique_id', 'type': bool, 'doc': 'enforce that the id in the table must be unique', - 'default': False}, - {'name': 'check_ragged', 'type': bool, 'default': True, - 'doc': ('whether or not to check for ragged arrays when adding data to the table. ' - 'Set to False to avoid checking every element if performance issues occur.')}, - allow_extra=True) - def add_row(self, **kwargs): - """ - Add a row to the table. If *id* is not provided, it will auto-increment. - """ - data, row_id, enforce_unique_id, check_ragged = popargs('data', 'id', 'enforce_unique_id', 'check_ragged', - kwargs) - data = data if data is not None else kwargs - + def _validate_new_row(self, data: dict): + """Validate a row of new data to be added.""" bad_data = [] - extra_columns = set(list(data.keys())) - set(list(self.__colids.keys())) - missing_columns = set(list(self.__colids.keys())) - set(list(data.keys())) - for colname, colnum in self.__colids.items(): if colname not in data: raise ValueError("column '%s' missing" % colname) @@ -675,7 +658,12 @@ def add_row(self, **kwargs): msg = ('"%s" is not in the term set.' % ', '.join([str(item) for item in bad_data])) raise ValueError(msg) - # check to see if any of the extra columns just need to be added + def _add_extra_predefined_columns(self, data: dict): + """Add columns that are predefined, have not been added, and are present in the new row data. + Also check to see if all extra row data keys have corresponding columns in the table. + """ + extra_columns = set(list(data.keys())) - set(list(self.__colids.keys())) + if extra_columns: for col in self.__columns__: if col['name'] in extra_columns: @@ -691,14 +679,34 @@ def add_row(self, **kwargs): if k not in DynamicTable.__reserved_colspec_keys}) extra_columns.remove(col['name']) - if extra_columns or missing_columns: + if extra_columns: raise ValueError( '\n'.join([ 'row data keys don\'t match available columns', 'you supplied {} extra keys: {}'.format(len(extra_columns), extra_columns), - 'and were missing {} keys: {}'.format(len(missing_columns), missing_columns) ]) ) + + @docval({'name': 'data', 'type': dict, 'doc': 'the data to put in this row', 'default': None}, + {'name': 'id', 'type': int, 'doc': 'the ID for the row', 'default': None}, + {'name': 'enforce_unique_id', 'type': bool, 'doc': 'enforce that the id in the table must be unique', + 'default': False}, + {'name': 'check_ragged', 'type': bool, 'default': True, + 'doc': ('whether or not to check for ragged arrays when adding data to the table. ' + 'Set to False to avoid checking every element if performance issues occur.')}, + allow_extra=True) + def add_row(self, **kwargs): + """ + Add a row to the table. If *id* is not provided, it will auto-increment. + """ + data, row_id, enforce_unique_id, check_ragged = popargs('data', 'id', 'enforce_unique_id', 'check_ragged', + kwargs) + data = data if data is not None else kwargs + + self._validate_new_row(data) + self._add_extra_predefined_columns(data) + + if row_id is None: row_id = data.pop('id', None) if row_id is None: From 1071f54601d51f2a8cdb3b95ece811027a9afb63 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 8 Feb 2025 07:38:01 +0000 Subject: [PATCH 06/14] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/hdmf/common/table.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hdmf/common/table.py b/src/hdmf/common/table.py index d4697f487..8b57bd569 100644 --- a/src/hdmf/common/table.py +++ b/src/hdmf/common/table.py @@ -706,7 +706,7 @@ def add_row(self, **kwargs): self._validate_new_row(data) self._add_extra_predefined_columns(data) - + if row_id is None: row_id = data.pop('id', None) if row_id is None: From 5e6a4b3749652e4c482d9aa97fcbf54fbf7b6d26 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Feb 2025 23:48:44 -0800 Subject: [PATCH 07/14] Reduce complexity of is_inherited_spec --- src/hdmf/spec/spec.py | 28 ++++++++++++---------------- 1 file changed, 12 insertions(+), 16 deletions(-) diff --git a/src/hdmf/spec/spec.py b/src/hdmf/spec/spec.py index bbd97b592..0e15cb92c 100644 --- a/src/hdmf/spec/spec.py +++ b/src/hdmf/spec/spec.py @@ -1045,24 +1045,20 @@ def is_inherited_spec(self, **kwargs): return self.is_inherited_type(spec_name) elif spec_name in self.__target_types: return self.is_inherited_target_type(spec_name) + elif super().is_inherited_spec(spec): # attribute spec + return True else: - # attribute spec - if super().is_inherited_spec(spec): - return True + parent_name = spec.parent.name + if parent_name is None: + parent_name = spec.parent.data_type + if isinstance(spec.parent, DatasetSpec): + if (parent_name in self.__datasets and self.is_inherited_dataset(parent_name) and + self.__datasets[parent_name].get_attribute(spec_name) is not None): + return True else: - parent_name = spec.parent.name - if parent_name is None: - parent_name = spec.parent.data_type - if isinstance(spec.parent, DatasetSpec): - if parent_name in self.__datasets: - if self.is_inherited_dataset(parent_name): - if self.__datasets[parent_name].get_attribute(spec_name) is not None: - return True - else: - if parent_name in self.__groups: - if self.is_inherited_group(parent_name): - if self.__groups[parent_name].get_attribute(spec_name) is not None: - return True + if (parent_name in self.__groups and self.is_inherited_group(parent_name) and : + self.__groups[parent_name].get_attribute(spec_name) is not None): + return True return False @docval({'name': 'spec', 'type': Spec, 'doc': 'the specification to check'}) From 672d91c5f35319765c31225f3b81deec72c06311 Mon Sep 17 00:00:00 2001 From: "pre-commit-ci[bot]" <66853113+pre-commit-ci[bot]@users.noreply.github.com> Date: Sat, 8 Feb 2025 07:48:51 +0000 Subject: [PATCH 08/14] [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci --- src/hdmf/spec/spec.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hdmf/spec/spec.py b/src/hdmf/spec/spec.py index 0e15cb92c..67969181b 100644 --- a/src/hdmf/spec/spec.py +++ b/src/hdmf/spec/spec.py @@ -1052,7 +1052,7 @@ def is_inherited_spec(self, **kwargs): if parent_name is None: parent_name = spec.parent.data_type if isinstance(spec.parent, DatasetSpec): - if (parent_name in self.__datasets and self.is_inherited_dataset(parent_name) and + if (parent_name in self.__datasets and self.is_inherited_dataset(parent_name) and self.__datasets[parent_name].get_attribute(spec_name) is not None): return True else: From d1f699bd895a04996a5fd35434c10fbbf706463e Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Feb 2025 00:01:59 -0800 Subject: [PATCH 09/14] Reduce complexity of add_ref --- src/hdmf/common/resources.py | 110 ++++++++++++++++++----------------- 1 file changed, 56 insertions(+), 54 deletions(-) diff --git a/src/hdmf/common/resources.py b/src/hdmf/common/resources.py index 1fc731ef5..8ada6cf78 100644 --- a/src/hdmf/common/resources.py +++ b/src/hdmf/common/resources.py @@ -544,6 +544,52 @@ def add_ref_termset(self, **kwargs): if len(missing_terms)>0: return {"missing_terms": missing_terms} + def _validate_object(self, container, attribute, field): + if attribute is None: # Trivial Case + relative_path = '' + object_field = self._check_object_field(file=file, + container=container, + relative_path=relative_path, + field=field) + else: # DataType Attribute Case + attribute_object = getattr(container, attribute) # returns attribute object + if isinstance(attribute_object, AbstractContainer): + relative_path = '' + object_field = self._check_object_field(file=file, + container=attribute_object, + relative_path=relative_path, + field=field) + else: # Non-DataType Attribute Case: + obj_mapper = self.type_map.get_map(container) + spec = obj_mapper.get_attr_spec(attr_name=attribute) + parent_spec = spec.parent # return the parent spec of the attribute + if parent_spec.data_type is None: + while parent_spec.data_type is None: + parent_spec = parent_spec.parent # find the closest parent with a data_type + parent_cls = self.type_map.get_dt_container_cls(data_type=parent_spec.data_type, autogen=False) + if isinstance(container, parent_cls): + parent = container + # We need to get the path of the spec for relative_path + absolute_path = spec.path + relative_path = absolute_path[absolute_path.find('/')+1:] + object_field = self._check_object_field(file=file, + container=parent, + relative_path=relative_path, + field=field) + else: + msg = 'Container not the nearest data_type' + raise ValueError(msg) + else: + parent = container # container needs to be the parent + absolute_path = spec.path + relative_path = absolute_path[absolute_path.find('/')+1:] + # this regex removes everything prior to the container on the absolute_path + object_field = self._check_object_field(file=file, + container=parent, + relative_path=relative_path, + field=field) + + @docval({'name': 'container', 'type': (str, AbstractContainer), 'default': None, 'doc': ('The Container/Data object that uses the key or ' 'the object_id for the Container/Data object that uses the key.')}, @@ -630,52 +676,7 @@ def add_ref(self, **kwargs): msg = 'This entity already exists. Ignoring new entity uri' warn(msg, stacklevel=3) - ################# - # Validate Object - ################# - if attribute is None: # Trivial Case - relative_path = '' - object_field = self._check_object_field(file=file, - container=container, - relative_path=relative_path, - field=field) - else: # DataType Attribute Case - attribute_object = getattr(container, attribute) # returns attribute object - if isinstance(attribute_object, AbstractContainer): - relative_path = '' - object_field = self._check_object_field(file=file, - container=attribute_object, - relative_path=relative_path, - field=field) - else: # Non-DataType Attribute Case: - obj_mapper = self.type_map.get_map(container) - spec = obj_mapper.get_attr_spec(attr_name=attribute) - parent_spec = spec.parent # return the parent spec of the attribute - if parent_spec.data_type is None: - while parent_spec.data_type is None: - parent_spec = parent_spec.parent # find the closest parent with a data_type - parent_cls = self.type_map.get_dt_container_cls(data_type=parent_spec.data_type, autogen=False) - if isinstance(container, parent_cls): - parent = container - # We need to get the path of the spec for relative_path - absolute_path = spec.path - relative_path = absolute_path[absolute_path.find('/')+1:] - object_field = self._check_object_field(file=file, - container=parent, - relative_path=relative_path, - field=field) - else: - msg = 'Container not the nearest data_type' - raise ValueError(msg) - else: - parent = container # container needs to be the parent - absolute_path = spec.path - relative_path = absolute_path[absolute_path.find('/')+1:] - # this regex removes everything prior to the container on the absolute_path - object_field = self._check_object_field(file=file, - container=parent, - relative_path=relative_path, - field=field) + object_field = self._validate_object(container, attribute, field) ####################################### # Validate Parameters and Populate HERD @@ -998,7 +999,7 @@ def get_zip_directory(cls, path): directory = os.path.dirname(os.path.realpath(path)) return directory - @classmethod + @classmethod # noqa: C901 @docval({'name': 'path', 'type': str, 'doc': 'The path to the zip file.'}) def from_zip(cls, **kwargs): """ @@ -1075,11 +1076,12 @@ def from_zip(cls, **kwargs): msg = "Key Index out of range in EntityKeyTable. Please check for alterations." raise ValueError(msg) - - er = HERD(files=files, - keys=keys, - entities=entities, - entity_keys=entity_keys, - objects=objects, - object_keys=object_keys) + er = HERD( + files=files, + keys=keys, + entities=entities, + entity_keys=entity_keys, + objects=objects, + object_keys=object_keys + ) return er From eb41dda30d8b8a6c1101f5f8cc5431b9f1dca5e4 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Feb 2025 00:03:50 -0800 Subject: [PATCH 10/14] Fix --- src/hdmf/common/resources.py | 5 +++-- src/hdmf/spec/spec.py | 2 +- 2 files changed, 4 insertions(+), 3 deletions(-) diff --git a/src/hdmf/common/resources.py b/src/hdmf/common/resources.py index 8ada6cf78..e93ccc135 100644 --- a/src/hdmf/common/resources.py +++ b/src/hdmf/common/resources.py @@ -544,7 +544,7 @@ def add_ref_termset(self, **kwargs): if len(missing_terms)>0: return {"missing_terms": missing_terms} - def _validate_object(self, container, attribute, field): + def _validate_object(self, container, attribute, field, file): if attribute is None: # Trivial Case relative_path = '' object_field = self._check_object_field(file=file, @@ -588,6 +588,7 @@ def _validate_object(self, container, attribute, field): container=parent, relative_path=relative_path, field=field) + return object_field @docval({'name': 'container', 'type': (str, AbstractContainer), 'default': None, @@ -676,7 +677,7 @@ def add_ref(self, **kwargs): msg = 'This entity already exists. Ignoring new entity uri' warn(msg, stacklevel=3) - object_field = self._validate_object(container, attribute, field) + object_field = self._validate_object(container, attribute, field, file) ####################################### # Validate Parameters and Populate HERD diff --git a/src/hdmf/spec/spec.py b/src/hdmf/spec/spec.py index 67969181b..cf29c8b0e 100644 --- a/src/hdmf/spec/spec.py +++ b/src/hdmf/spec/spec.py @@ -1056,7 +1056,7 @@ def is_inherited_spec(self, **kwargs): self.__datasets[parent_name].get_attribute(spec_name) is not None): return True else: - if (parent_name in self.__groups and self.is_inherited_group(parent_name) and : + if (parent_name in self.__groups and self.is_inherited_group(parent_name) and self.__groups[parent_name].get_attribute(spec_name) is not None): return True return False From f18b9c4bd5d54042f38372ce05308fe005d6128c Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Feb 2025 00:06:27 -0800 Subject: [PATCH 11/14] Cleanup --- src/hdmf/backends/hdf5/h5tools.py | 2 +- src/hdmf/build/objectmapper.py | 4 ++-- src/hdmf/common/resources.py | 4 ++-- src/hdmf/common/table.py | 4 ++-- src/hdmf/spec/spec.py | 29 +++++++++++++---------------- src/hdmf/validate/validator.py | 4 ++-- 6 files changed, 22 insertions(+), 25 deletions(-) diff --git a/src/hdmf/backends/hdf5/h5tools.py b/src/hdmf/backends/hdf5/h5tools.py index d30cef06c..493c4057f 100644 --- a/src/hdmf/backends/hdf5/h5tools.py +++ b/src/hdmf/backends/hdf5/h5tools.py @@ -1086,7 +1086,7 @@ def write_link(self, **kwargs): self.__set_written(builder) return link_obj - @docval({'name': 'parent', 'type': Group, 'doc': 'the parent HDF5 object'}, # noqa: C901 + @docval({'name': 'parent', 'type': Group, 'doc': 'the parent HDF5 object'}, {'name': 'builder', 'type': DatasetBuilder, 'doc': 'the DatasetBuilder to write'}, {'name': 'link_data', 'type': bool, 'doc': 'If not specified otherwise link (True) or copy (False) HDF5 Datasets', 'default': True}, diff --git a/src/hdmf/build/objectmapper.py b/src/hdmf/build/objectmapper.py index 176de322c..e683e60bf 100644 --- a/src/hdmf/build/objectmapper.py +++ b/src/hdmf/build/objectmapper.py @@ -184,7 +184,7 @@ def no_convert(cls, obj_type): """ cls.__no_convert.add(obj_type) - @classmethod # noqa: C901 + @classmethod def convert_dtype(cls, spec, value, spec_dtype=None): # noqa: C901 """ Convert values to the specified dtype. For example, if a literal int @@ -276,7 +276,7 @@ def __check_convert_numeric(cls, value_type): np.issubdtype(value_dtype, np.integer)): raise ValueError("Cannot convert from %s to 'numeric' specification dtype." % value_type) - @classmethod # noqa: C901 + @classmethod def __check_edgecases(cls, spec, value, spec_dtype): # noqa: C901 """ Check edge cases in converting data to a dtype diff --git a/src/hdmf/common/resources.py b/src/hdmf/common/resources.py index e93ccc135..2071415a0 100644 --- a/src/hdmf/common/resources.py +++ b/src/hdmf/common/resources.py @@ -1000,9 +1000,9 @@ def get_zip_directory(cls, path): directory = os.path.dirname(os.path.realpath(path)) return directory - @classmethod # noqa: C901 + @classmethod @docval({'name': 'path', 'type': str, 'doc': 'The path to the zip file.'}) - def from_zip(cls, **kwargs): + def from_zip(cls, **kwargs): # noqa: C901 """ Method to read in zipped tsv files to populate HERD. """ diff --git a/src/hdmf/common/table.py b/src/hdmf/common/table.py index 8b57bd569..40380b46e 100644 --- a/src/hdmf/common/table.py +++ b/src/hdmf/common/table.py @@ -304,7 +304,7 @@ def __gather_columns(cls, name, bases, classdict): except AttributeError: # raises error when "__columns__" is not an attr of item continue - @docval({'name': 'name', 'type': str, 'doc': 'the name of this table'}, # noqa: C901 + @docval({'name': 'name', 'type': str, 'doc': 'the name of this table'}, {'name': 'description', 'type': str, 'doc': 'a description of what is in this table'}, {'name': 'id', 'type': ('array_data', 'data', ElementIdentifiers), 'doc': 'the identifiers for this table', 'default': None}, @@ -749,7 +749,7 @@ def __eq__(self, other): return False return self.to_dataframe().equals(other.to_dataframe()) - @docval({'name': 'name', 'type': str, 'doc': 'the name of this VectorData'}, # noqa: C901 + @docval({'name': 'name', 'type': str, 'doc': 'the name of this VectorData'}, {'name': 'description', 'type': str, 'doc': 'a description for this column'}, {'name': 'data', 'type': ('array_data', 'data'), 'doc': 'a dataset where the first dimension is a concatenation of multiple vectors', 'default': list()}, diff --git a/src/hdmf/spec/spec.py b/src/hdmf/spec/spec.py index cf29c8b0e..665be4a34 100644 --- a/src/hdmf/spec/spec.py +++ b/src/hdmf/spec/spec.py @@ -1062,7 +1062,7 @@ def is_inherited_spec(self, **kwargs): return False @docval({'name': 'spec', 'type': Spec, 'doc': 'the specification to check'}) - def is_overridden_spec(self, **kwargs): # noqa: C901 + def is_overridden_spec(self, **kwargs): ''' Returns 'True' if specification overrides a specification from the parent type ''' spec = getargs('spec', kwargs) spec_name = spec.name @@ -1086,23 +1086,20 @@ def is_overridden_spec(self, **kwargs): # noqa: C901 return self.is_overridden_dataset(spec_name) elif spec_name in self.__data_types: return self.is_overridden_type(spec_name) + elif super().is_overridden_spec(spec): # attribute spec + return True else: - if super().is_overridden_spec(spec): # check if overridden attribute - return True + parent_name = spec.parent.name + if parent_name is None: + parent_name = spec.parent.data_type + if isinstance(spec.parent, DatasetSpec): + if (parent_name in self.__datasets and self.is_overridden_dataset(parent_name) and + self.__datasets[parent_name].is_overridden_spec(spec)): + return True else: - parent_name = spec.parent.name - if parent_name is None: - parent_name = spec.parent.data_type - if isinstance(spec.parent, DatasetSpec): - if parent_name in self.__datasets: - if self.is_overridden_dataset(parent_name): - if self.__datasets[parent_name].is_overridden_spec(spec): - return True - else: - if parent_name in self.__groups: - if self.is_overridden_group(parent_name): - if self.__groups[parent_name].is_overridden_spec(spec): - return True + if (parent_name in self.__groups and self.is_overridden_group(parent_name) and + self.__groups[parent_name].is_overridden_spec(spec)): + return True return False @docval({'name': 'spec', 'type': (BaseStorageSpec, str), 'doc': 'the specification to check'}) diff --git a/src/hdmf/validate/validator.py b/src/hdmf/validate/validator.py index d7ec78eaa..1a0be981d 100644 --- a/src/hdmf/validate/validator.py +++ b/src/hdmf/validate/validator.py @@ -472,9 +472,9 @@ class GroupValidator(BaseStorageValidator): def __init__(self, **kwargs): super().__init__(**kwargs) - @docval({"name": "builder", "type": GroupBuilder, "doc": "the builder to validate"}, # noqa: C901 + @docval({"name": "builder", "type": GroupBuilder, "doc": "the builder to validate"}, returns='a list of Errors', rtype=list) - def validate(self, **kwargs): # noqa: C901 + def validate(self, **kwargs): builder = getargs('builder', kwargs) errors = super().validate(builder) errors.extend(self.__validate_children(builder)) From 63030e3eed41e132bc3b35a6ae858ce68ccf710b Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Feb 2025 00:07:28 -0800 Subject: [PATCH 12/14] add c901 exception --- src/hdmf/common/resources.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/hdmf/common/resources.py b/src/hdmf/common/resources.py index 2071415a0..ff333caac 100644 --- a/src/hdmf/common/resources.py +++ b/src/hdmf/common/resources.py @@ -605,7 +605,7 @@ def _validate_object(self, container, attribute, field, file): {'name': 'file', 'type': HERDManager, 'doc': 'The file associated with the container.', 'default': None}, ) - def add_ref(self, **kwargs): + def add_ref(self, **kwargs): # noqa: C901 """ Add information about an external reference used in this file. From c81d99fad0926f17949131a6340b15dbc8600061 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Feb 2025 00:35:24 -0800 Subject: [PATCH 13/14] Update changelog --- CHANGELOG.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8e3f480c8..34da87402 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # HDMF Changelog +## [Unreleased] + +### Changed +- Removed `requirements-min.txt` in favor of the `min-reqs` optional dependency group in `pyproject.toml`. @rly [#1246](https://github.com/hdmf-dev/hdmf/pull/1246) +- Updated GitHub actions and ruff configuration. @rly [#1246](https://github.com/hdmf-dev/hdmf/pull/1246) + ## HDMF 4.0.0 (January 22, 2025) ### Breaking changes From 722a772f59c4d528dbc3d676958683afec0330c4 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Feb 2025 11:18:01 -0800 Subject: [PATCH 14/14] Update ruff.yml --- .github/workflows/ruff.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 078a243cc..da9b4327c 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -8,4 +8,4 @@ jobs: - name: Checkout repo uses: actions/checkout@v4 - name: Run ruff - uses: astral-sh/ruff-action@v1 + uses: astral-sh/ruff-action@v3