diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md deleted file mode 100644 index 35e047d9..00000000 --- a/CONTRIBUTING.md +++ /dev/null @@ -1,121 +0,0 @@ -# Contributing - -Contributions are welcome, and they are greatly appreciated! Every little bit -helps, and credit will always be given. - -You can contribute in many ways: - -## Types of Contributions - -### Report Bugs - -Report bugs at https://github.com/bdpedigo/networkframe/issues. - -If you are reporting a bug, please include: - -- Your operating system name and version. -- Any details about your local setup that might be helpful in troubleshooting. -- Detailed steps to reproduce the bug. - -### Fix Bugs - -Look through the GitHub issues for bugs. Anything tagged with "bug" and "help -wanted" is open to whoever wants to implement it. - -### Implement Features - -Look through the GitHub issues for features. Anything tagged with "enhancement" -and "help wanted" is open to whoever wants to implement it. - -### Write Documentation - -networkframe could always use more documentation, whether as part of the -official networkframe docs, in docstrings, or even on the web in blog posts, -articles, and such. - -### Submit Feedback - -The best way to send feedback is to file an issue at https://github.com/bdpedigo/networkframe/issues. - -If you are proposing a feature: - -- Explain in detail how it would work. -- Keep the scope as narrow as possible, to make it easier to implement. -- Remember that this is a volunteer-driven project, and that contributions - are welcome. - -## Get Started - -Ready to contribute? Here's how to set up `networkframe` for local development. - -1. Fork the `networkframe` repo on GitHub. -2. Clone your fork locally - - ```console - git clone git@github.com:your_name_here/networkframe.git - ``` - -3. Ensure [poetry](https://python-poetry.org/docs/) is installed. -4. Install dependencies and start your virtualenv: - - ```console - poetry install --with dev - ``` - -5. Create a branch for local development: - - ```console - git checkout -b name-of-your-bugfix-or-feature - ``` - - Now you can make your changes locally. - -6. When you're done making changes, check that your changes pass the - tests, including testing other Python versions, with tox: - - ```console - poetry run tox - ``` - -7. Commit your changes and push your branch to GitHub: - - ```console - git add . - git commit -m "Your detailed description of your changes." - git push origin name-of-your-bugfix-or-feature - ``` - -8. Submit a pull request through the GitHub website. - -## Pull Request Guidelines - -Before you submit a pull request, check that it meets these guidelines: - -1. The pull request should include tests. -2. If the pull request adds functionality, the docs should be updated. Put - your new functionality into a function with a docstring, and add the - feature to the list in README.md. -3. Check https://github.com/bdpedigo/networkframe/actions - and make sure that the tests pass for all supported Python versions. - -## Tips - -```console -poetry run pytest tests/test_networkframe.py -``` - -To run a subset of tests. - -## Deploying - -A reminder for the maintainers on how to deploy. -Make sure all your changes are committed (including an entry in CHANGELOG.md). -Then run: - -```console -poetry run bump2version patch # possible: major / minor / patch -git push -git push --tags -``` - -GitHub Actions will then deploy to PyPI if tests pass. diff --git a/caveclient/auth.py b/caveclient/auth.py index 6d0704db..a3d0bfa0 100644 --- a/caveclient/auth.py +++ b/caveclient/auth.py @@ -1,14 +1,15 @@ +import json +import logging +import os +import urllib +import webbrowser + +import requests + from .base import ( handle_response, ) -import urllib from .endpoints import auth_endpoints_v1, default_global_server_address -import os -import webbrowser -import requests -import json -import logging -import time logger = logging.getLogger(__name__) @@ -23,7 +24,6 @@ def write_token(token, filepath, key, overwrite=True): - if os.path.exists(filepath): with open(filepath, "r") as f: secrets = json.load(f) @@ -45,26 +45,6 @@ def write_token(token, filepath, key, overwrite=True): class AuthClient(object): - """Client to find and use auth tokens to access the dynamic annotation framework services. - - Parameters - ---------- - token_file : str, optional - Path to a JSON key:value file holding your auth token. - By default, "~/.cloudvolume/secrets/cave-secret.json" - (will check deprecated token name "chunkedgraph-secret.json" as well) - token_key : str, optional - Key for the token in the token_file. - By default, "token" - - token : str or None, optional - Direct entry of the token as a string. If provided, overrides the files. - If None, attempts to use the file paths. - - server_address : str, optional, - URL to the auth server. By default, uses a default server address. - """ - def __init__( self, token_file=None, @@ -72,6 +52,25 @@ def __init__( token=None, server_address=default_global_server_address, ): + """Client to find and use auth tokens to access the dynamic annotation framework services. + + Parameters + ---------- + token_file : str, optional + Path to a JSON key:value file holding your auth token. + By default, "~/.cloudvolume/secrets/cave-secret.json" + (will check deprecated token name "chunkedgraph-secret.json" as well) + token_key : str, optional + Key for the token in the token_file. + By default, "token" + + token : str or None, optional + Direct entry of the token as a string. If provided, overrides the files. + If None, attempts to use the file paths. + + server_address : str, optional, + URL to the auth server. By default, uses a default server address. + """ if token_file is None: server = urllib.parse.urlparse(server_address).netloc server_file = server + "-cave-secret.json" diff --git a/changelog.md b/changelog.md deleted file mode 100644 index cffa932d..00000000 --- a/changelog.md +++ /dev/null @@ -1,58 +0,0 @@ -# Changelog - -5.1.0 ----- -* added get_oldest_timestamp call to chunkedgraph - -5.0.1 ------ -* Fixed bug with desired_resolution being set at the client level -was being ignored in >5.0.0 - -5.0.0 ------ -* Added support for the new CAVE Materialization 3.0 API - Includes support for the new materialization API, which allows for - server side conversion of the units of position, and ensures that - all positions are returned with the same units, even after joins. -* Added support for querying databases that were materialized without merging - tables together. This will allow for faster materializations. -* Removed support for LiveLive query from the Materialization 2.0 API client. - Note.. <5.0.0 clients interacting with MaterializationEngine >4.7.0 servers will - use live live query but will doubly convert the units of position if you ask - for a desired resolution, as the old client will also do a conversion server side. -* Fixed interaction with api version querying of servers from individual - clients to work with verify=False. (useful for testing) -* Stored infromation from client about mapping between dataframe and table names - and original column names. -* Added support for suffixes and select columns to be passed by dictionary rather than list - making the selection an application of suffixes more explicit when there are collisions - between column names in joined tables. - -Upgrade Notes -~~~~~~~~~~~~~ -Change all select_column calls to pass dictionaries rather than lists. -Change all suffix calls to pass dictionaries rather than lists. -Advocate for your server administrator to upgrade to MaterializationEngine 4.7.0 or later, -so you can use the new MaterializationEngine 3.0 API and client. - - - -### Added -- **JSONStateService**: Neuroglancer URL can be specified for the client under the property `ngl_url`. -For a FrameworkClient with a datastack name, the value is set using the `viewer_site` field from the info client. - -### Changed - -- **JSONStateService**: In `build_neuroglancer_url`, if `ngl_url` is None the url will be pulled from the default client value. -If there is the default value is None, only the URL to the JSON file will be returned. - -## [2.0.1] - 2020-10-20 - -### Fixed -- **AuthClient** : Token creation and setting is more robust. Directories are created if not previously present. - -## [2.0.0] - -### Added -- First release of the unified FrameworkClient and system-wide authentication. \ No newline at end of file diff --git a/docs/changelog.md b/docs/changelog.md index 67259dad..5a68d24a 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,3 +1,41 @@ -{% - include-markdown "../CHANGELOG.md" -%} +--- +title: Changelog +--- + +## 5.1.0 + +- Added get_oldest_timestamp call to chunkedgraph + +## 5.0.1 + +- Fixed bug with desired_resolution being set at the client level + was being ignored in >5.0.0 + +## 5.0.0 + +- Added support for the new CAVE Materialization 3.0 API + Includes support for the new materialization API, which allows for + server side conversion of the units of position, and ensures that + all positions are returned with the same units, even after joins. +- Added support for querying databases that were materialized without merging + tables together. This will allow for faster materializations. +- Removed support for LiveLive query from the Materialization 2.0 API client. + Note.. <5.0.0 clients interacting with MaterializationEngine >4.7.0 servers will + use live live query but will doubly convert the units of position if you ask + for a desired resolution, as the old client will also do a conversion server side. +- Fixed interaction with api version querying of servers from individual + clients to work with verify=False. (useful for testing) +- Stored infromation from client about mapping between dataframe and table names + and original column names. +- Added support for suffixes and select columns to be passed by dictionary rather than list + making the selection an application of suffixes more explicit when there are collisions + between column names in joined tables. + +--- + +## Older Upgrade Notes + +Change all select_column calls to pass dictionaries rather than lists. +Change all suffix calls to pass dictionaries rather than lists. +Advocate for your server administrator to upgrade to MaterializationEngine 4.7.0 or later, +so you can use the new MaterializationEngine 3.0 API and client. diff --git a/docs/client_api/annotation.md b/docs/client_api/annotation.md index 432cbfa5..0744d38f 100644 --- a/docs/client_api/annotation.md +++ b/docs/client_api/annotation.md @@ -2,17 +2,13 @@ title: client.annotation --- -Note: the functionality described here will be accurate if the datastack you are using -is using the most up-to-date version of the annotation engine. If something seems -wrong with the documentation here, try checking the version of the annotation -engine returned by your client: - -```python -type(client.annotation) -``` - -Extended documentation for all versions of the annotation client can be found -[here](../extended_api/annotation.md). +!!! note + The functionality described here will be accurate if the datastack you are using is using the most up-to-date version of the annotation service. If something seems wrong with the documentation here, try checking the version of the annotation engine returned by your client: + ```python + type(client.annotation) + ``` + Extended documentation for all versions of the annotation client can be found + [here](../extended_api/annotation.md). ::: caveclient.annotationengine.AnnotationClientV2 options: diff --git a/docs/client_api/auth.md b/docs/client_api/auth.md new file mode 100644 index 00000000..9a0c80c6 --- /dev/null +++ b/docs/client_api/auth.md @@ -0,0 +1,10 @@ +--- +title: client.auth +--- + +::: caveclient.auth.AuthClient + options: + heading_level: 2 + show_bases: false + filters: ["!__init__"] + merge_init_into_class: false diff --git a/docs/contributing.md b/docs/contributing.md index f9a79cb8..fb82c161 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -1,3 +1,112 @@ -{% - include-markdown "../CONTRIBUTING.md" -%} +--- +title: Contributing +--- + +Contributions are welcome, and they are greatly appreciated! Every little bit +helps, and credit will always be given. + +You can contribute in many ways: + +## Types of Contributions + +### Report Bugs + +Report bugs to our [issues page](https://github.com/{{ config.repo_name }}/issues). + +If you are reporting a bug, please include: + +- Your operating system name and version. +- Any details about your local setup that might be helpful in troubleshooting. +- Detailed steps to reproduce the bug, in the form of a [minimal reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). + +### Fix Bugs + +Look through the GitHub issues for bugs. Anything tagged with "bug" and "help +wanted" is open to whoever wants to implement it. + +### Implement Features + +Look through the GitHub issues for features. Anything tagged with "enhancement" +and "help wanted" is open to whoever wants to implement it. + +### Write Documentation + +networkframe could always use more documentation, whether as part of the +official networkframe docs, in docstrings, or even on the web in blog posts, +articles, and such. + +### Submit Feedback + +The best way to send feedback is to [create an issue](https://github.com/{{ config.repo_name }}/issues/new) on GitHub. + +If you are proposing a feature: + +- Explain in detail how it would work. +- Keep the scope as narrow as possible, to make it easier to implement. +- Remember that while contributions are welcome, developer/maintainer time is limited. + +## Get Started + +Ready to contribute? Here's how to set up `{{ names.package }}` for local development. + +- [Fork](https://github.com/{{ config.repo_name }}/fork) the repo on GitHub. +- Clone your fork locally + +```console +git clone git@github.com:your_name_here/{{ names.repo_title }}.git +``` + +- Ensure [pip](https://pip.pypa.io/en/stable/installation/) is installed. +- Create a virtual environment using pipenv: +- Start your virtualenv: + + ```console + source .venv/bin/activate + ``` + +- Create a branch for local development: + + ```console + git checkout -b name-of-your-bugfix-or-feature + ``` + +- Make your changes locally +- When you're done making changes, check that your changes pass the + tests by running [pytest](https://docs.pytest.org/en/): + + ```console + pytest tests + ``` + + Note that once you submit your pull request, GitHub Actions will run the tests also, + including on multiple operating systems and Python versions. Your pull request will + have to pass on all of these before it can be merged. + +- Commit your changes and push your branch to GitHub: + + ```console + git add . + git commit -m "Your detailed description of your changes." + git push origin name-of-your-bugfix-or-feature + ``` + +- [Submit a pull request](https://github.com/{{ config.repo_name }}/compare) through the GitHub website. + +## Pull Request Guidelines + +Before you submit a pull request, check that it meets these guidelines: + +- The pull request should include tests if adding a new feature. +- The docs should be updated with whatever changes you have made. Put + your new functionality into a function with a docstring, and make sure the new + functionality is documented after building the documentation. + +## Documentation style + +We use [mkdocs](https://www.mkdocs.org/) to build the documentation. In particular, we +use the [mkdocs-material](https://squidfunk.github.io/mkdocs-material/) theme, and a +variety of other extensions. + +!!! note + More information codifying our documentation style and principles coming soon. For + now, just try to follow the style of the existing documentation. diff --git a/docs/glossary.md b/docs/glossary.md index 00fc88fc..e7c80eac 100644 --- a/docs/glossary.md +++ b/docs/glossary.md @@ -1,4 +1,11 @@ -# Glossary +--- +title: Glossary +--- + +!!! warning + This glossary is a work in progress; for now we are documenting the commonly used + terms that we need to define. Please feel free to contribute definitions or + additional terms. - Datastack - Voxel resolution diff --git a/docs/images/logo-inverted.png b/docs/images/logo-inverted.png new file mode 100644 index 00000000..741e3d65 Binary files /dev/null and b/docs/images/logo-inverted.png differ diff --git a/docs/tutorials/annotation.md b/docs/tutorials/annotation.md index e64af75e..5af8ceec 100644 --- a/docs/tutorials/annotation.md +++ b/docs/tutorials/annotation.md @@ -10,35 +10,32 @@ do not include any root ids. An annotation client is accessed with ## Getting existing tables A list of the existing tables for the datastack can be found with - - - -``` python +```python all_tables = client.annotation.get_tables() all_tables[0] ``` Each table has three main properties that can be useful to know: -- `table_name` : The table name, used to refer to it when uploading or - downloading annotations. This is also passed through to the table in - the Materialized database. -- `schema_name` : The name of the table's schema from - EMAnnotationSchemas (see below). -- `max_annotation_id` : An upper limit on the number of annotations - already contained in the table. +- `table_name` : The table name, used to refer to it when uploading or + downloading annotations. This is also passed through to the table in + the Materialized database. +- `schema_name` : The name of the table's schema from + EMAnnotationSchemas (see below). +- `max_annotation_id` : An upper limit on the number of annotations + already contained in the table. ## Downloading annotations You can download the JSON representation of a data point through the -[get_annotation()][caveclient.annotationengine.{{ latest_clients.annotation }}.get_annotation] +[get_annotation()]({{ client_api_paths.annotation }}.get_annotation) method. This can be useful if you need to look up information on unmaterialized data, or to see what a properly templated annotation looks like. -``` python +```python table_name = all_tables[0]['table_name'] # 'ais_analysis_soma' annotation_id = 100 client.annotation.get_annotation(annotation_ids=annotation_id, table_name=table_name) @@ -47,10 +44,10 @@ client.annotation.get_annotation(annotation_ids=annotation_id, table_name=table_ ## Create a new table One can create a new table with a specified schema with the -[create_table()][caveclient.annotationengine.{{ latest_clients.annotation }}.create_table] - method: +[create_table()]({{ client_api_paths.annotation }}.create_table) +method: -``` python +```python client.annotation.create_table(table_name='test_table', schema_name='microns_func_coreg', voxel_resolution = [1,1,1], @@ -64,35 +61,35 @@ neuroglancer session, you want this to match the units of that neuroglancer view. Note there are some optional metadata parameters to -[create_table()][caveclient.annotationengine.{{ latest_clients.annotation }}.create_table] - -- `notice_text` : This is text that will show up to users who access - this data as a warning. This could be used to warn users that the - data is not complete or checked yet, or to advertise that a - particular publication should be cited when using this table. -- `read_permission` : one of \"PRIVATE\" which means only you can read - data in this table. \"PUBLIC\" (default) which means anyone can read - this table that has read permissions to this dataset. So if and only - if you can read the segmentation results of this data, you can read - this table. \"GROUP\" which means that you must share a common group - with this user for them to be able to read. We need to make a way to - discover what groups you are in and who you share groups with. -- `write_permission`: one of \"PRIVATE\" (default), which means only - you can write to this table. \"PUBLIC\" which means anyone can write - to this table that has write permissions to this dataset. Note - although this means anyone can add data, no annotations are ever - truly overwritten. \"GROUP\" which means that you must share a - common group with this user for them to be able to write. We need to - make a way to discover what groups you are in and who you share - groups with. +[create_table()]({{ client_api_paths.annotation }}.create_table) + +- `notice_text` : This is text that will show up to users who access + this data as a warning. This could be used to warn users that the + data is not complete or checked yet, or to advertise that a + particular publication should be cited when using this table. +- `read_permission` : one of \"PRIVATE\" which means only you can read + data in this table. \"PUBLIC\" (default) which means anyone can read + this table that has read permissions to this dataset. So if and only + if you can read the segmentation results of this data, you can read + this table. \"GROUP\" which means that you must share a common group + with this user for them to be able to read. We need to make a way to + discover what groups you are in and who you share groups with. +- `write_permission`: one of \"PRIVATE\" (default), which means only + you can write to this table. \"PUBLIC\" which means anyone can write + to this table that has write permissions to this dataset. Note + although this means anyone can add data, no annotations are ever + truly overwritten. \"GROUP\" which means that you must share a + common group with this user for them to be able to write. We need to + make a way to discover what groups you are in and who you share + groups with. If you change your mind about what you want for metadata, some but not all fields can be updated with -`~caveclient.annotationengine.AnnotationClientV2.update_metadata`{.interpreted-text -role="func"}. This includes the description, the notice_text, and the -permissions, but not the name, schema or voxel resolution. +[update_metadata()]({{ client_api_paths.annotation }}.update_metadata). This includes the +description, the notice_text, and the permissions, but not the name, schema or voxel +resolution. -``` python +```python # to update description client.annotation.update_metadata(table_name='test_table', description="a new description for my table") @@ -116,7 +113,7 @@ The following could would create a new annotation and then upload it to the service. Note that you get back the annotation id(s) of what you uploaded. -``` python +```python new_data = {'type': 'microns_func_coreg', 'pt': {'position': [1,2,3]}, 'func_id': 0} @@ -127,13 +124,13 @@ There are methods to simplify annotation uploads if you have a pandas dataframe whose structure mirrors the struction of the annotation schema you want to upload -``` python +```python import pandas as pd df = pd.DataFrame([{'id':0, 'type': 'microns_func_coreg', 'pt_position': [1,2,3]}, - 'func_id': 0}, + 'func_id': 0}, {'id':1, 'type': 'microns_func_coreg', 'pt_position': [3,2,1]}, @@ -147,12 +144,11 @@ IDs. If you leave them blank then the service will assign the IDs for you. There is a similar method for updating -`~caveclient.annotationengine.AnnotationClientV2.update_annotation_df`{.interpreted-text -role="func"} +[update_annotation_df()]({{ client_api_paths.annotation }}.update_annotation_df) ## Staged Annotations -Staged anotations help ensure that the annotations you post follow the +Staged annotations help ensure that the annotations you post follow the appropriate schema, both by providing guides to the field names and locally validating against a schema before uploading. The most common use case for staged annotations is to create a StagedAnnotation object @@ -163,11 +159,9 @@ To get a StagedAnnotation object, you can start with either a table name or a schema name. Here, we\'ll assume that there\'s already a table called \"my_table\" that is running a \"cell_type_local\" schema. If we want to add new annotations to the table, we simply use the table name -with -`~caveclient.annotationengine.AnnotationClientV2.stage_annotations`{.interpreted-text -role="func"}. +with [stage_annotations()]({{ client_api_paths.annotation }}.stage_annotations). -``` python +```python stage = client.annotation.stage_annotations("my_table") ``` @@ -176,7 +170,7 @@ collection of annotations. Every time you add an annotation, it is immediately validated against the schema. To add an annotation, use the `add` method: -``` python +```python stage.add( cell_type = "pyramidal_cell", classification_system="excitatory", @@ -198,7 +192,7 @@ You can see the annotations as a list of dictionary records with a table name, this information is stored in the `stage` and you can simply upload it from the client. -``` python +```python client.annotation.upload_staged_annotations(stage) ``` @@ -207,7 +201,7 @@ annotation you are updating, which is not required in the schema otherwise. In order to stage updated annotations, set the `update` parameter to `True` when creating the stage. -``` python +```python update_stage = client.annotation.stage_annotations("my_table", update=True) update_stage.add( id=1, @@ -231,7 +225,7 @@ different than the resolution for the table, you can also set the between the resolution you specify for your own annotations and the resolution that the table expects. -``` python +```python stage = client.annotation.stage_annotations("my_table", annotation_resolution=[8,8,40]) stage.add( cell_type='pyramidal_cell', diff --git a/docs/tutorials/authentication.md b/docs/tutorials/authentication.md index 40fa6178..eec90b68 100644 --- a/docs/tutorials/authentication.md +++ b/docs/tutorials/authentication.md @@ -18,7 +18,7 @@ print(f"My current token is: {auth.token}") ## Getting a new token To get a new token, you will need to manually acquire it. For -convenience, the function [caveclient.auth.get_new_token()][] provides instructions for +convenience, the function [client.auth.get_new_token()]({{ client_api_paths.auth }}.get_new_token) provides instructions for how to get and save the token. By default, the token is saved to diff --git a/mkdocs.yml b/mkdocs.yml index eca63bc3..e346a427 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -2,7 +2,7 @@ site_name: CAVEclient site_url: https://bdpedigo.github.io/CAVEclient/ repo_url: https://github.com/bdpedigo/CAVEclient/ repo_name: bdpedigo/CAVEclient -edit_uri: edit/main/docs/ +edit_uri: edit/master/docs/ nav: - Home: index.md - Installation: installation.md @@ -17,10 +17,10 @@ nav: - tutorials/schemas.md - tutorials/state.md - tutorials/materialization.md - - Glossary: glossary.md - Client API: - client_api/index.md - client_api/annotation.md + - client_api/auth.md - client_api/chunkedgraph.md - client_api/l2cache.md - client_api/materialize.md @@ -39,20 +39,22 @@ nav: - extended_api/l2cache.md - extended_api/materialization.md - extended_api/session_config.md + - Glossary: glossary.md - Contributing: contributing.md - Changelog: changelog.md theme: name: material language: en - #logo: assets/logo.png + logo: images/logo-inverted.png + favicon: images/logo-inverted.png palette: scheme: preference - primary: indigo + primary: black accent: indigo icon: repo: fontawesome/brands/github features: - # - navigation.indexes + - navigation.indexes - navigation.instant # - navigation.path - navigation.prune @@ -149,12 +151,16 @@ extra: version: provider: mike names: - title: CAVEclient + repo_title: CAVEclient package: caveclient main_branch: master - clients_api_paths: + client_api_paths: annotation: ../client_api/annotation.md#caveclient.annotationengine.AnnotationClientV2 + chunkedgraph: ../client_api/chunkedgraph.md#caveclient.chunkedgraph.ChunkedGraphClientV1 materialize: ../client_api/materialize.md#caveclient.materializationengine.MaterializatonClientV3 + l2cache: ../client_api/l2cache.md#caveclient.l2cache.L2CacheClientLegacy + state: ../client_api/state.md#caveclient.state.JSONServiceV1 + # to enable disqus, uncomment the following and put your disqus id below # disqus: disqus_id