diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 3f80639c7..0f486273b 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -18,7 +18,7 @@ repos: # hooks: # - id: black - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.4.4 + rev: v0.4.9 hooks: - id: ruff # - repo: https://github.com/econchick/interrogate @@ -26,7 +26,7 @@ repos: # hooks: # - id: interrogate - repo: https://github.com/codespell-project/codespell - rev: v2.2.6 + rev: v2.3.0 hooks: - id: codespell additional_dependencies: diff --git a/CHANGELOG.md b/CHANGELOG.md index 4f11fa143..74e048a19 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,6 +1,15 @@ # HDMF Changelog -## HDMF 3.14.1 (Upcoming) +## HDMF 3.14.2 (Upcoming) + +### Enhancements +- Warn when unexpected keys are present in specs. @rly [#1134](https://github.com/hdmf-dev/hdmf/pull/1134) +- Support appending to zarr arrays. @mavaylon1 [#1136](https://github.com/hdmf-dev/hdmf/pull/1136) + +### Bug fixes +- Fix iterator increment causing an extra +1 added after the end of completion. @CodyCBakerPhD [#1128](https://github.com/hdmf-dev/hdmf/pull/1128) + +## HDMF 3.14.1 (June 6, 2024) ### Bug fixes - Excluded unnecessary artifacts from sdist and wheel. @rly [#1119](https://github.com/hdmf-dev/hdmf/pull/1119) diff --git a/pyproject.toml b/pyproject.toml index 3a0034087..a089113c0 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -36,12 +36,12 @@ dependencies = [ "pandas>=1.0.5", "ruamel.yaml>=0.16", "scipy>=1.4", + "zarr >= 2.12.0", "importlib-resources; python_version < '3.9'", # TODO: remove when minimum python version is 3.9 ] dynamic = ["version"] [project.optional-dependencies] -zarr = ["zarr>=2.12.0"] tqdm = ["tqdm>=4.41.0"] termset = ["linkml-runtime>=1.5.5; python_version >= '3.9'", "schemasheets>=0.1.23; python_version >= '3.9'", @@ -117,7 +117,7 @@ omit = [ # force-exclude = "src/hdmf/common/hdmf-common-schema|docs/gallery" [tool.ruff] -select = ["E", "F", "T100", "T201", "T203"] +lint.select = ["E", "F", "T100", "T201", "T203"] exclude = [ ".git", ".tox", @@ -132,11 +132,11 @@ exclude = [ ] line-length = 120 -[tool.ruff.per-file-ignores] +[tool.ruff.lint.per-file-ignores] "docs/gallery/*" = ["E402", "T201"] "src/*/__init__.py" = ["F401"] "setup.py" = ["T201"] "test_gallery.py" = ["T201"] -[tool.ruff.mccabe] +[tool.ruff.lint.mccabe] max-complexity = 17 diff --git a/src/hdmf/backends/hdf5/h5tools.py b/src/hdmf/backends/hdf5/h5tools.py index 0604881bb..8135d75e7 100644 --- a/src/hdmf/backends/hdf5/h5tools.py +++ b/src/hdmf/backends/hdf5/h5tools.py @@ -728,7 +728,7 @@ def __read_dataset(self, h5obj, name=None): def _check_str_dtype(self, h5obj): dtype = h5obj.dtype if dtype.kind == 'O': - if dtype.metadata.get('vlen') == str and H5PY_3: + if dtype.metadata.get('vlen') is str and H5PY_3: return StrDataset(h5obj, None) return h5obj diff --git a/src/hdmf/build/objectmapper.py b/src/hdmf/build/objectmapper.py index fed678d41..52fbfec49 100644 --- a/src/hdmf/build/objectmapper.py +++ b/src/hdmf/build/objectmapper.py @@ -1164,7 +1164,7 @@ def __get_subspec_values(self, builder, spec, manager): if not isinstance(builder, DatasetBuilder): # pragma: no cover raise ValueError("__get_subspec_values - must pass DatasetBuilder with DatasetSpec") if (spec.shape is None and getattr(builder.data, 'shape', None) == (1,) and - type(builder.data[0]) != np.void): + type(builder.data[0]) is not np.void): # if a scalar dataset is expected and a 1-element non-compound dataset is given, then read the dataset builder['data'] = builder.data[0] # use dictionary reference instead of .data to bypass error ret[spec] = self.__check_ref_resolver(builder.data) diff --git a/src/hdmf/data_utils.py b/src/hdmf/data_utils.py index c03665caa..71f5bdf6d 100644 --- a/src/hdmf/data_utils.py +++ b/src/hdmf/data_utils.py @@ -5,17 +5,20 @@ from warnings import warn from typing import Tuple from itertools import product, chain +from zarr import Array as ZarrArray import h5py import numpy as np from .utils import docval, getargs, popargs, docval_macro, get_data_shape - def append_data(data, arg): if isinstance(data, (list, DataIO)): data.append(arg) return data + elif isinstance(data, ZarrArray): + data.append([arg], axis=0) + return data elif type(data).__name__ == 'TermSetWrapper': # circular import data.append(arg) return data @@ -386,14 +389,18 @@ def __next__(self): :returns: DataChunk object with the data and selection of the current buffer. :rtype: DataChunk """ - if self.display_progress: - self.progress_bar.update(n=1) try: buffer_selection = next(self.buffer_selection_generator) + + # Only update after successful iteration + if self.display_progress: + self.progress_bar.update(n=1) + return DataChunk(data=self._get_data(selection=buffer_selection), selection=buffer_selection) except StopIteration: + # Allow text to be written to new lines after completion if self.display_progress: - self.progress_bar.write("\n") # Allows text to be written to new lines after completion + self.progress_bar.write("\n") raise StopIteration def __reduce__(self) -> Tuple[Callable, Iterable]: diff --git a/src/hdmf/spec/spec.py b/src/hdmf/spec/spec.py index 1a6e8d987..6d7d29e49 100644 --- a/src/hdmf/spec/spec.py +++ b/src/hdmf/spec/spec.py @@ -93,9 +93,13 @@ def build_spec(cls, spec_dict): vargs = cls.build_const_args(spec_dict) kwargs = dict() # iterate through the Spec docval and construct kwargs based on matching values in spec_dict + unused_vargs = list(vargs) for x in get_docval(cls.__init__): if x['name'] in vargs: kwargs[x['name']] = vargs.get(x['name']) + unused_vargs.remove(x['name']) + if unused_vargs: + warn(f'Unexpected keys {unused_vargs} in spec {spec_dict}') return cls(**kwargs) diff --git a/src/hdmf/testing/testcase.py b/src/hdmf/testing/testcase.py index 798df6fe4..1be4bcecd 100644 --- a/src/hdmf/testing/testcase.py +++ b/src/hdmf/testing/testcase.py @@ -174,7 +174,7 @@ def _assert_array_equal(self, :param message: custom additional message to show when assertions as part of this assert are failing """ array_data_types = tuple([i for i in get_docval_macro('array_data') - if (i != list and i != tuple and i != AbstractDataChunkIterator)]) + if (i is not list and i is not tuple and i is not AbstractDataChunkIterator)]) # We construct array_data_types this way to avoid explicit dependency on h5py, Zarr and other # I/O backends. Only list and tuple do not support [()] slicing, and AbstractDataChunkIterator # should never occur here. The effective value of array_data_types is then: diff --git a/src/hdmf/validate/validator.py b/src/hdmf/validate/validator.py index bdfc15f8f..e39011d9f 100644 --- a/src/hdmf/validate/validator.py +++ b/src/hdmf/validate/validator.py @@ -164,7 +164,7 @@ def get_type(data, builder_dtype=None): # Empty array else: # Empty string array - if data.dtype.metadata["vlen"] == str: + if data.dtype.metadata["vlen"] is str: return "utf", None # Undetermined variable length data type. else: # pragma: no cover diff --git a/tests/unit/spec_tests/test_attribute_spec.py b/tests/unit/spec_tests/test_attribute_spec.py index 15102e728..bac8e12a3 100644 --- a/tests/unit/spec_tests/test_attribute_spec.py +++ b/tests/unit/spec_tests/test_attribute_spec.py @@ -91,3 +91,15 @@ def test_build_spec_no_doc(self): msg = "AttributeSpec.__init__: missing argument 'doc'" with self.assertRaisesWith(TypeError, msg): AttributeSpec.build_spec(spec_dict) + + def test_build_warn_extra_args(self): + spec_dict = { + 'name': 'attribute1', + 'doc': 'test attribute', + 'dtype': 'int', + 'quantity': '?', + } + msg = ("Unexpected keys ['quantity'] in spec {'name': 'attribute1', 'doc': 'test attribute', " + "'dtype': 'int', 'quantity': '?'}") + with self.assertWarnsWith(UserWarning, msg): + AttributeSpec.build_spec(spec_dict) diff --git a/tests/unit/spec_tests/test_dataset_spec.py b/tests/unit/spec_tests/test_dataset_spec.py index 0309aced4..008e8c6fc 100644 --- a/tests/unit/spec_tests/test_dataset_spec.py +++ b/tests/unit/spec_tests/test_dataset_spec.py @@ -245,3 +245,15 @@ def test_data_type_property_value(self): group = GroupSpec('A group', name='group', data_type_inc=data_type_inc, data_type_def=data_type_def) self.assertEqual(group.data_type, data_type) + + def test_build_warn_extra_args(self): + spec_dict = { + 'name': 'dataset1', + 'doc': 'test dataset', + 'dtype': 'int', + 'required': True, + } + msg = ("Unexpected keys ['required'] in spec {'name': 'dataset1', 'doc': 'test dataset', " + "'dtype': 'int', 'required': True}") + with self.assertWarnsWith(UserWarning, msg): + DatasetSpec.build_spec(spec_dict) diff --git a/tests/unit/spec_tests/test_group_spec.py b/tests/unit/spec_tests/test_group_spec.py index 00a937538..31c00cfbb 100644 --- a/tests/unit/spec_tests/test_group_spec.py +++ b/tests/unit/spec_tests/test_group_spec.py @@ -314,6 +314,16 @@ def test_get_namespace_spec(self): expected = AttributeSpec('namespace', 'the namespace for the data type of this object', 'text', required=False) self.assertDictEqual(GroupSpec.get_namespace_spec(), expected) + def test_build_warn_extra_args(self): + spec_dict = { + 'name': 'group1', + 'doc': 'test group', + 'required': True, + } + msg = "Unexpected keys ['required'] in spec {'name': 'group1', 'doc': 'test group', 'required': True}" + with self.assertWarnsWith(UserWarning, msg): + GroupSpec.build_spec(spec_dict) + class TestNotAllowedConfig(TestCase): diff --git a/tests/unit/spec_tests/test_link_spec.py b/tests/unit/spec_tests/test_link_spec.py index e6c680b7c..38e10886b 100644 --- a/tests/unit/spec_tests/test_link_spec.py +++ b/tests/unit/spec_tests/test_link_spec.py @@ -67,3 +67,15 @@ def test_required_is_many(self): ) self.assertEqual(spec.required, req) self.assertEqual(spec.is_many(), many) + + def test_build_warn_extra_args(self): + spec_dict = { + 'name': 'link1', + 'doc': 'test link', + 'target_type': 'TestType', + 'required': True, + } + msg = ("Unexpected keys ['required'] in spec {'name': 'link1', 'doc': 'test link', " + "'target_type': 'TestType', 'required': True}") + with self.assertWarnsWith(UserWarning, msg): + LinkSpec.build_spec(spec_dict) diff --git a/tests/unit/utils_test/test_data_utils.py b/tests/unit/utils_test/test_data_utils.py new file mode 100644 index 000000000..2e0df7ba8 --- /dev/null +++ b/tests/unit/utils_test/test_data_utils.py @@ -0,0 +1,14 @@ +from hdmf.data_utils import append_data +from hdmf.testing import TestCase + +import numpy as np +from numpy.testing import assert_array_equal +import zarr + +class TestAppendData(TestCase): + + def test_append_data_zarr(self): + zarr_array = zarr.array([1,2,3]) + new = append_data(zarr_array, 4) + + assert_array_equal(new[:], np.array([1,2,3,4]))