From c908d7b0a348db1632307662f6b0888e743b0c67 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Thu, 20 Feb 2025 11:21:41 -0800 Subject: [PATCH] Minor housekeeping updates (#1246) * Update ruff action * Move min reqs testing info to pyproject.toml * Update github-app-token action * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reduce complexity of DynamicTable.add_row * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reduce complexity of is_inherited_spec * [pre-commit.ci] auto fixes from pre-commit.com hooks for more information, see https://pre-commit.ci * Reduce complexity of add_ref * Fix * Cleanup * add c901 exception --------- Co-authored-by: pre-commit-ci[bot] <66853113+pre-commit-ci[bot]@users.noreply.github.com> --- .github/PULL_REQUEST_TEMPLATE/release.md | 3 +- .github/dependabot.yml | 11 --- .github/workflows/project_action.yml | 6 +- .github/workflows/ruff.yml | 2 +- CHANGELOG.md | 2 + docs/source/software_process.rst | 6 +- pyproject.toml | 24 +++-- requirements-min.txt | 10 -- src/hdmf/backends/hdf5/h5tools.py | 2 +- src/hdmf/build/objectmapper.py | 4 +- src/hdmf/common/resources.py | 113 ++++++++++++----------- src/hdmf/common/table.py | 56 ++++++----- src/hdmf/spec/spec.py | 57 +++++------- src/hdmf/validate/validator.py | 4 +- tox.ini | 4 +- 15 files changed, 147 insertions(+), 157 deletions(-) delete mode 100644 requirements-min.txt diff --git a/.github/PULL_REQUEST_TEMPLATE/release.md b/.github/PULL_REQUEST_TEMPLATE/release.md index 82f987164..569c694da 100644 --- a/.github/PULL_REQUEST_TEMPLATE/release.md +++ b/.github/PULL_REQUEST_TEMPLATE/release.md @@ -2,8 +2,7 @@ Prepare for release of HDMF [version] ### Before merging: - [ ] Make sure all PRs to be included in this release have been merged to `dev`. -- [ ] Major and minor releases: Update dependency ranges in `pyproject.toml` and minimums in - `requirements-min.txt` as needed. +- [ ] Major and minor releases: Update dependency ranges in `pyproject.toml` as needed. - [ ] Check legal file dates and information in `Legal.txt`, `license.txt`, `README.rst`, `docs/source/conf.py`, and any other locations as needed - [ ] Update `pyproject.toml` as needed diff --git a/.github/dependabot.yml b/.github/dependabot.yml index 24615639c..d202a332d 100644 --- a/.github/dependabot.yml +++ b/.github/dependabot.yml @@ -1,16 +1,5 @@ version: 2 updates: - # disable checking python requirements files because there are too - # many updates and dependabot will not ignore requirements-min.txt - # until https://github.com/dependabot/dependabot-core/issues/2883 is resolved - # workaround is to continue updating these files manually - - # - package-ecosystem: "pip" - # directory: "/" - # schedule: - # # Check for updates to requirements files and pyproject.toml every week - # interval: "weekly" - - package-ecosystem: "github-actions" directory: "/" schedule: diff --git a/.github/workflows/project_action.yml b/.github/workflows/project_action.yml index 6037bd4ab..148d4d41d 100644 --- a/.github/workflows/project_action.yml +++ b/.github/workflows/project_action.yml @@ -12,10 +12,10 @@ jobs: steps: - name: GitHub App token id: generate_token - uses: tibdex/github-app-token@v2 + uses: actions/create-github-app-token@v1 with: - app_id: ${{ secrets.APP_ID }} - private_key: ${{ secrets.APP_PEM }} + app-id: ${{ secrets.APP_ID }} + private-key: ${{ secrets.APP_PEM }} - name: Add to Developer Board env: diff --git a/.github/workflows/ruff.yml b/.github/workflows/ruff.yml index 1933fa75e..da9b4327c 100644 --- a/.github/workflows/ruff.yml +++ b/.github/workflows/ruff.yml @@ -8,4 +8,4 @@ jobs: - name: Checkout repo uses: actions/checkout@v4 - name: Run ruff - uses: chartboost/ruff-action@v1 + uses: astral-sh/ruff-action@v3 diff --git a/CHANGELOG.md b/CHANGELOG.md index d24ac481f..547708dd5 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,6 +6,8 @@ - Optimized `get` within `VectorIndex` to be more efficient when retrieving a dataset of references. @mavaylon1 [#1248](https://github.com/hdmf-dev/hdmf/pull/1248) ### Changed +- Removed `requirements-min.txt` in favor of the `min-reqs` optional dependency group in `pyproject.toml`. @rly [#1246](https://github.com/hdmf-dev/hdmf/pull/1246) +- Updated GitHub actions and ruff configuration. @rly [#1246](https://github.com/hdmf-dev/hdmf/pull/1246) - `hdmf.monitor` is unused and undocumented. It has been deprecated and will be removed in HDMF 5.0. @rly [#1245](https://github.com/hdmf-dev/hdmf/pull/1245) ## HDMF 4.0.0 (January 22, 2025) diff --git a/docs/source/software_process.rst b/docs/source/software_process.rst index f3a6c7457..96bf470e7 100644 --- a/docs/source/software_process.rst +++ b/docs/source/software_process.rst @@ -45,8 +45,9 @@ pyproject.toml_ contains a list of package dependencies and their version ranges running HDMF. As a library, upper bound version constraints create more harm than good in the long term (see this `blog post`_) so we avoid setting upper bounds on requirements. -When setting lower bounds, make sure to specify the lower bounds in both pyproject.toml_ and -requirements-min.txt_. The latter is used in automated testing to ensure that the package runs +When setting lower bounds, make sure to specify the lower bounds in the ``[project] dependencies`` key and +``[project.optional-dependencies] min-reqs`` key in pyproject.toml_. +The latter is used in automated testing to ensure that the package runs correctly using the minimum versions of dependencies. Minimum requirements should be updated manually if a new feature or bug fix is added in a dependency that is required @@ -56,7 +57,6 @@ minimum version to be as high as it is. .. _pyproject.toml: https://github.com/hdmf-dev/hdmf/blob/dev/pyproject.toml .. _blog post: https://iscinumpy.dev/post/bound-version-constraints/ -.. _requirements-min.txt: https://github.com/hdmf-dev/hdmf/blob/dev/requirements-min.txt -------------------- Testing Requirements diff --git a/pyproject.toml b/pyproject.toml index 5a58b6cef..7ccdfa2f2 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -14,7 +14,7 @@ authors = [ description = "A hierarchical data modeling framework for modern science data standards" readme = "README.rst" requires-python = ">=3.9" -license = {text = "BSD-3-Clause"} +license = "BSD-3-Clause" classifiers = [ "Programming Language :: Python", "Programming Language :: Python :: 3.9", @@ -29,6 +29,7 @@ classifiers = [ "Intended Audience :: Science/Research", "Topic :: Scientific/Engineering :: Medical Science Apps.", ] +# make sure to update min-reqs dependencies below when these lower bounds change dependencies = [ "h5py>=3.1.0", "jsonschema>=3.2.0", @@ -39,6 +40,7 @@ dependencies = [ dynamic = ["version"] [project.optional-dependencies] +# make sure to update min-reqs dependencies below when these lower bounds change tqdm = ["tqdm>=4.41.0"] zarr = ["zarr>=2.12.0,<3"] sparse = ["scipy>=1.7"] @@ -72,6 +74,18 @@ docs = [ # all possible dependencies all = ["hdmf[tqdm,zarr,sparse,termset,test,docs]"] +# minimum requirements of project dependencies for testing (see .github/workflows/run_all_tests.yml) +min-reqs = [ + "h5py==3.1.0", + "jsonschema==3.2.0", + "numpy==1.19.3", + "pandas==1.2.0", + "ruamel.yaml==0.16.0", + "scipy==1.7.0", + "tqdm==4.41.0", + "zarr==2.12.0", +] + [project.urls] "Homepage" = "https://github.com/hdmf-dev/hdmf" "Bug Tracker" = "https://github.com/hdmf-dev/hdmf/issues" @@ -141,13 +155,8 @@ omit = [ # force-exclude = "src/hdmf/common/hdmf-common-schema|docs/gallery" [tool.ruff] -lint.select = ["E", "F", "T100", "T201", "T203"] +lint.select = ["E", "F", "T100", "T201", "T203", "C901"] exclude = [ - ".git", - ".tox", - "__pycache__", - "build/", - "dist/", "src/hdmf/common/hdmf-common-schema", "docs/source/conf.py", "src/hdmf/_due.py", @@ -160,7 +169,6 @@ line-length = 120 [tool.ruff.lint.per-file-ignores] "docs/gallery/*" = ["E402", "T201"] "src/*/__init__.py" = ["F401"] -"setup.py" = ["T201"] "test_gallery.py" = ["T201"] [tool.ruff.lint.mccabe] diff --git a/requirements-min.txt b/requirements-min.txt deleted file mode 100644 index a9fbeb93e..000000000 --- a/requirements-min.txt +++ /dev/null @@ -1,10 +0,0 @@ -# minimum versions of package dependencies for installing HDMF -# NOTE: these should match the minimum bound for dependencies in pyproject.toml -h5py==3.1.0 -jsonschema==3.2.0 -numpy==1.19.3 -pandas==1.2.0 -ruamel.yaml==0.16.0 -scipy==1.7.0 -tqdm==4.41.0 -zarr==2.12.0 diff --git a/src/hdmf/backends/hdf5/h5tools.py b/src/hdmf/backends/hdf5/h5tools.py index d30cef06c..493c4057f 100644 --- a/src/hdmf/backends/hdf5/h5tools.py +++ b/src/hdmf/backends/hdf5/h5tools.py @@ -1086,7 +1086,7 @@ def write_link(self, **kwargs): self.__set_written(builder) return link_obj - @docval({'name': 'parent', 'type': Group, 'doc': 'the parent HDF5 object'}, # noqa: C901 + @docval({'name': 'parent', 'type': Group, 'doc': 'the parent HDF5 object'}, {'name': 'builder', 'type': DatasetBuilder, 'doc': 'the DatasetBuilder to write'}, {'name': 'link_data', 'type': bool, 'doc': 'If not specified otherwise link (True) or copy (False) HDF5 Datasets', 'default': True}, diff --git a/src/hdmf/build/objectmapper.py b/src/hdmf/build/objectmapper.py index 176de322c..e683e60bf 100644 --- a/src/hdmf/build/objectmapper.py +++ b/src/hdmf/build/objectmapper.py @@ -184,7 +184,7 @@ def no_convert(cls, obj_type): """ cls.__no_convert.add(obj_type) - @classmethod # noqa: C901 + @classmethod def convert_dtype(cls, spec, value, spec_dtype=None): # noqa: C901 """ Convert values to the specified dtype. For example, if a literal int @@ -276,7 +276,7 @@ def __check_convert_numeric(cls, value_type): np.issubdtype(value_dtype, np.integer)): raise ValueError("Cannot convert from %s to 'numeric' specification dtype." % value_type) - @classmethod # noqa: C901 + @classmethod def __check_edgecases(cls, spec, value, spec_dtype): # noqa: C901 """ Check edge cases in converting data to a dtype diff --git a/src/hdmf/common/resources.py b/src/hdmf/common/resources.py index 1fc731ef5..ff333caac 100644 --- a/src/hdmf/common/resources.py +++ b/src/hdmf/common/resources.py @@ -544,6 +544,53 @@ def add_ref_termset(self, **kwargs): if len(missing_terms)>0: return {"missing_terms": missing_terms} + def _validate_object(self, container, attribute, field, file): + if attribute is None: # Trivial Case + relative_path = '' + object_field = self._check_object_field(file=file, + container=container, + relative_path=relative_path, + field=field) + else: # DataType Attribute Case + attribute_object = getattr(container, attribute) # returns attribute object + if isinstance(attribute_object, AbstractContainer): + relative_path = '' + object_field = self._check_object_field(file=file, + container=attribute_object, + relative_path=relative_path, + field=field) + else: # Non-DataType Attribute Case: + obj_mapper = self.type_map.get_map(container) + spec = obj_mapper.get_attr_spec(attr_name=attribute) + parent_spec = spec.parent # return the parent spec of the attribute + if parent_spec.data_type is None: + while parent_spec.data_type is None: + parent_spec = parent_spec.parent # find the closest parent with a data_type + parent_cls = self.type_map.get_dt_container_cls(data_type=parent_spec.data_type, autogen=False) + if isinstance(container, parent_cls): + parent = container + # We need to get the path of the spec for relative_path + absolute_path = spec.path + relative_path = absolute_path[absolute_path.find('/')+1:] + object_field = self._check_object_field(file=file, + container=parent, + relative_path=relative_path, + field=field) + else: + msg = 'Container not the nearest data_type' + raise ValueError(msg) + else: + parent = container # container needs to be the parent + absolute_path = spec.path + relative_path = absolute_path[absolute_path.find('/')+1:] + # this regex removes everything prior to the container on the absolute_path + object_field = self._check_object_field(file=file, + container=parent, + relative_path=relative_path, + field=field) + return object_field + + @docval({'name': 'container', 'type': (str, AbstractContainer), 'default': None, 'doc': ('The Container/Data object that uses the key or ' 'the object_id for the Container/Data object that uses the key.')}, @@ -558,7 +605,7 @@ def add_ref_termset(self, **kwargs): {'name': 'file', 'type': HERDManager, 'doc': 'The file associated with the container.', 'default': None}, ) - def add_ref(self, **kwargs): + def add_ref(self, **kwargs): # noqa: C901 """ Add information about an external reference used in this file. @@ -630,52 +677,7 @@ def add_ref(self, **kwargs): msg = 'This entity already exists. Ignoring new entity uri' warn(msg, stacklevel=3) - ################# - # Validate Object - ################# - if attribute is None: # Trivial Case - relative_path = '' - object_field = self._check_object_field(file=file, - container=container, - relative_path=relative_path, - field=field) - else: # DataType Attribute Case - attribute_object = getattr(container, attribute) # returns attribute object - if isinstance(attribute_object, AbstractContainer): - relative_path = '' - object_field = self._check_object_field(file=file, - container=attribute_object, - relative_path=relative_path, - field=field) - else: # Non-DataType Attribute Case: - obj_mapper = self.type_map.get_map(container) - spec = obj_mapper.get_attr_spec(attr_name=attribute) - parent_spec = spec.parent # return the parent spec of the attribute - if parent_spec.data_type is None: - while parent_spec.data_type is None: - parent_spec = parent_spec.parent # find the closest parent with a data_type - parent_cls = self.type_map.get_dt_container_cls(data_type=parent_spec.data_type, autogen=False) - if isinstance(container, parent_cls): - parent = container - # We need to get the path of the spec for relative_path - absolute_path = spec.path - relative_path = absolute_path[absolute_path.find('/')+1:] - object_field = self._check_object_field(file=file, - container=parent, - relative_path=relative_path, - field=field) - else: - msg = 'Container not the nearest data_type' - raise ValueError(msg) - else: - parent = container # container needs to be the parent - absolute_path = spec.path - relative_path = absolute_path[absolute_path.find('/')+1:] - # this regex removes everything prior to the container on the absolute_path - object_field = self._check_object_field(file=file, - container=parent, - relative_path=relative_path, - field=field) + object_field = self._validate_object(container, attribute, field, file) ####################################### # Validate Parameters and Populate HERD @@ -1000,7 +1002,7 @@ def get_zip_directory(cls, path): @classmethod @docval({'name': 'path', 'type': str, 'doc': 'The path to the zip file.'}) - def from_zip(cls, **kwargs): + def from_zip(cls, **kwargs): # noqa: C901 """ Method to read in zipped tsv files to populate HERD. """ @@ -1075,11 +1077,12 @@ def from_zip(cls, **kwargs): msg = "Key Index out of range in EntityKeyTable. Please check for alterations." raise ValueError(msg) - - er = HERD(files=files, - keys=keys, - entities=entities, - entity_keys=entity_keys, - objects=objects, - object_keys=object_keys) + er = HERD( + files=files, + keys=keys, + entities=entities, + entity_keys=entity_keys, + objects=objects, + object_keys=object_keys + ) return er diff --git a/src/hdmf/common/table.py b/src/hdmf/common/table.py index e9de4d345..a574edaa5 100644 --- a/src/hdmf/common/table.py +++ b/src/hdmf/common/table.py @@ -327,7 +327,7 @@ def __gather_columns(cls, name, bases, classdict): except AttributeError: # raises error when "__columns__" is not an attr of item continue - @docval({'name': 'name', 'type': str, 'doc': 'the name of this table'}, # noqa: C901 + @docval({'name': 'name', 'type': str, 'doc': 'the name of this table'}, {'name': 'description', 'type': str, 'doc': 'a description of what is in this table'}, {'name': 'id', 'type': ('array_data', 'data', ElementIdentifiers), 'doc': 'the identifiers for this table', 'default': None}, @@ -661,26 +661,9 @@ def __len__(self): """Number of rows in the table""" return len(self.id) - @docval({'name': 'data', 'type': dict, 'doc': 'the data to put in this row', 'default': None}, - {'name': 'id', 'type': int, 'doc': 'the ID for the row', 'default': None}, - {'name': 'enforce_unique_id', 'type': bool, 'doc': 'enforce that the id in the table must be unique', - 'default': False}, - {'name': 'check_ragged', 'type': bool, 'default': True, - 'doc': ('whether or not to check for ragged arrays when adding data to the table. ' - 'Set to False to avoid checking every element if performance issues occur.')}, - allow_extra=True) - def add_row(self, **kwargs): - """ - Add a row to the table. If *id* is not provided, it will auto-increment. - """ - data, row_id, enforce_unique_id, check_ragged = popargs('data', 'id', 'enforce_unique_id', 'check_ragged', - kwargs) - data = data if data is not None else kwargs - + def _validate_new_row(self, data: dict): + """Validate a row of new data to be added.""" bad_data = [] - extra_columns = set(list(data.keys())) - set(list(self.__colids.keys())) - missing_columns = set(list(self.__colids.keys())) - set(list(data.keys())) - for colname, colnum in self.__colids.items(): if colname not in data: raise ValueError("column '%s' missing" % colname) @@ -698,7 +681,12 @@ def add_row(self, **kwargs): msg = ('"%s" is not in the term set.' % ', '.join([str(item) for item in bad_data])) raise ValueError(msg) - # check to see if any of the extra columns just need to be added + def _add_extra_predefined_columns(self, data: dict): + """Add columns that are predefined, have not been added, and are present in the new row data. + Also check to see if all extra row data keys have corresponding columns in the table. + """ + extra_columns = set(list(data.keys())) - set(list(self.__colids.keys())) + if extra_columns: for col in self.__columns__: if col['name'] in extra_columns: @@ -714,14 +702,34 @@ def add_row(self, **kwargs): if k not in DynamicTable.__reserved_colspec_keys}) extra_columns.remove(col['name']) - if extra_columns or missing_columns: + if extra_columns: raise ValueError( '\n'.join([ 'row data keys don\'t match available columns', 'you supplied {} extra keys: {}'.format(len(extra_columns), extra_columns), - 'and were missing {} keys: {}'.format(len(missing_columns), missing_columns) ]) ) + + @docval({'name': 'data', 'type': dict, 'doc': 'the data to put in this row', 'default': None}, + {'name': 'id', 'type': int, 'doc': 'the ID for the row', 'default': None}, + {'name': 'enforce_unique_id', 'type': bool, 'doc': 'enforce that the id in the table must be unique', + 'default': False}, + {'name': 'check_ragged', 'type': bool, 'default': True, + 'doc': ('whether or not to check for ragged arrays when adding data to the table. ' + 'Set to False to avoid checking every element if performance issues occur.')}, + allow_extra=True) + def add_row(self, **kwargs): + """ + Add a row to the table. If *id* is not provided, it will auto-increment. + """ + data, row_id, enforce_unique_id, check_ragged = popargs('data', 'id', 'enforce_unique_id', 'check_ragged', + kwargs) + data = data if data is not None else kwargs + + self._validate_new_row(data) + self._add_extra_predefined_columns(data) + + if row_id is None: row_id = data.pop('id', None) if row_id is None: @@ -764,7 +772,7 @@ def __eq__(self, other): return False return self.to_dataframe().equals(other.to_dataframe()) - @docval({'name': 'name', 'type': str, 'doc': 'the name of this VectorData'}, # noqa: C901 + @docval({'name': 'name', 'type': str, 'doc': 'the name of this VectorData'}, {'name': 'description', 'type': str, 'doc': 'a description for this column'}, {'name': 'data', 'type': ('array_data', 'data'), 'doc': 'a dataset where the first dimension is a concatenation of multiple vectors', 'default': list()}, diff --git a/src/hdmf/spec/spec.py b/src/hdmf/spec/spec.py index bbd97b592..665be4a34 100644 --- a/src/hdmf/spec/spec.py +++ b/src/hdmf/spec/spec.py @@ -1045,28 +1045,24 @@ def is_inherited_spec(self, **kwargs): return self.is_inherited_type(spec_name) elif spec_name in self.__target_types: return self.is_inherited_target_type(spec_name) + elif super().is_inherited_spec(spec): # attribute spec + return True else: - # attribute spec - if super().is_inherited_spec(spec): - return True + parent_name = spec.parent.name + if parent_name is None: + parent_name = spec.parent.data_type + if isinstance(spec.parent, DatasetSpec): + if (parent_name in self.__datasets and self.is_inherited_dataset(parent_name) and + self.__datasets[parent_name].get_attribute(spec_name) is not None): + return True else: - parent_name = spec.parent.name - if parent_name is None: - parent_name = spec.parent.data_type - if isinstance(spec.parent, DatasetSpec): - if parent_name in self.__datasets: - if self.is_inherited_dataset(parent_name): - if self.__datasets[parent_name].get_attribute(spec_name) is not None: - return True - else: - if parent_name in self.__groups: - if self.is_inherited_group(parent_name): - if self.__groups[parent_name].get_attribute(spec_name) is not None: - return True + if (parent_name in self.__groups and self.is_inherited_group(parent_name) and + self.__groups[parent_name].get_attribute(spec_name) is not None): + return True return False @docval({'name': 'spec', 'type': Spec, 'doc': 'the specification to check'}) - def is_overridden_spec(self, **kwargs): # noqa: C901 + def is_overridden_spec(self, **kwargs): ''' Returns 'True' if specification overrides a specification from the parent type ''' spec = getargs('spec', kwargs) spec_name = spec.name @@ -1090,23 +1086,20 @@ def is_overridden_spec(self, **kwargs): # noqa: C901 return self.is_overridden_dataset(spec_name) elif spec_name in self.__data_types: return self.is_overridden_type(spec_name) + elif super().is_overridden_spec(spec): # attribute spec + return True else: - if super().is_overridden_spec(spec): # check if overridden attribute - return True + parent_name = spec.parent.name + if parent_name is None: + parent_name = spec.parent.data_type + if isinstance(spec.parent, DatasetSpec): + if (parent_name in self.__datasets and self.is_overridden_dataset(parent_name) and + self.__datasets[parent_name].is_overridden_spec(spec)): + return True else: - parent_name = spec.parent.name - if parent_name is None: - parent_name = spec.parent.data_type - if isinstance(spec.parent, DatasetSpec): - if parent_name in self.__datasets: - if self.is_overridden_dataset(parent_name): - if self.__datasets[parent_name].is_overridden_spec(spec): - return True - else: - if parent_name in self.__groups: - if self.is_overridden_group(parent_name): - if self.__groups[parent_name].is_overridden_spec(spec): - return True + if (parent_name in self.__groups and self.is_overridden_group(parent_name) and + self.__groups[parent_name].is_overridden_spec(spec)): + return True return False @docval({'name': 'spec', 'type': (BaseStorageSpec, str), 'doc': 'the specification to check'}) diff --git a/src/hdmf/validate/validator.py b/src/hdmf/validate/validator.py index d7ec78eaa..1a0be981d 100644 --- a/src/hdmf/validate/validator.py +++ b/src/hdmf/validate/validator.py @@ -472,9 +472,9 @@ class GroupValidator(BaseStorageValidator): def __init__(self, **kwargs): super().__init__(**kwargs) - @docval({"name": "builder", "type": GroupBuilder, "doc": "the builder to validate"}, # noqa: C901 + @docval({"name": "builder", "type": GroupBuilder, "doc": "the builder to validate"}, returns='a list of Errors', rtype=list) - def validate(self, **kwargs): # noqa: C901 + def validate(self, **kwargs): builder = getargs('builder', kwargs) errors = super().validate(builder) errors.extend(self.__validate_children(builder)) diff --git a/tox.ini b/tox.ini index 4caa68a4b..37171acb1 100644 --- a/tox.ini +++ b/tox.ini @@ -22,14 +22,12 @@ install_command = minimum, wheelinstall: python -I -m pip install {opts} {packages} upgraded: python -I -m pip install -U {opts} {packages} prerelease: python -I -m pip install -U --pre {opts} {packages} -deps = - # which requirements files to use (default: none) - minimum: -r requirements-min.txt extras = # which optional dependency set(s) to use (default: none) pytest: test gallery: doc optional: tqdm,sparse,zarr,termset + minimum: min-reqs commands = # commands to run for every environment python --version # print python version for debugging