From b41fddc187f86b2a90b27ac6b88ca16cd738a6fb Mon Sep 17 00:00:00 2001 From: Chris Brozdowski Date: Thu, 1 Jun 2023 14:50:45 -0500 Subject: [PATCH] Migrate to `mkdocs` (#527) --- .github/workflows/publish.yml | 50 +++++ .gitignore | 14 +- .markdownlint.yaml | 13 ++ CHANGELOG.md | 56 +++-- LICENSE | 17 +- README.md | 35 ++- config/add_dj_module.py | 4 +- config/add_dj_user.py | 4 +- config/dj_config.py | 19 +- docs/build-docs.sh | 40 ++++ docs/config_yaml.md | 58 ----- docs/developer_notes.md | 106 --------- docs/mkdocs.yml | 122 ++++++++++ docs/overrides/nav.html | 9 + docs/src/api/make_pages.py | 17 ++ docs/src/contribute.md | 211 ++++++++++++++++++ docs/src/images/FrankLab.png | Bin 0 -> 10259 bytes docs/src/images/Spyglass.png | Bin 0 -> 75394 bytes docs/src/images/Spyglass.svg | 22 ++ docs/src/index.md | 24 ++ docs/src/installation/index.md | 15 ++ docs/src/installation/local.md | 122 ++++++++++ docs/src/installation/production.md | 148 ++++++++++++ docs/{ => src/misc}/figurl_views.md | 6 +- docs/src/misc/insert_data.md | 94 ++++++++ docs/{ => src/misc}/session_groups.md | 5 +- docs/src/stylesheets/extra.css | 127 +++++++++++ examples/cli_examples/create_session_group.py | 4 +- examples/cli_examples/readme.md | 30 +-- franklab_scripts/alter_tables.py | 4 +- franklab_scripts/nightly_cleanup.py | 6 +- notebooks/00_intro.ipynb | 57 ++--- notebooks/01_spikesorting.ipynb | 77 ++++--- notebooks/02_curation.ipynb | 10 +- notebooks/03_lfp.ipynb | 103 +++++---- notebooks/04_Trodes_position.ipynb | 56 +++-- notebooks/05_DLC_from_scratch.ipynb | 138 +++++++----- notebooks/06_DLC_from_dir.ipynb | 106 ++++----- notebooks/07_linearization.ipynb | 67 +++--- notebooks/08_Extract_Mark_indicators.ipynb | 16 +- ...ecoding_with_GPUs_on_the_GPU_cluster.ipynb | 18 +- notebooks/10_1D_Clusterless_Decoding.ipynb | 44 ++-- notebooks/11_2D_Clusterless_Decoding.ipynb | 32 +-- notebooks/12_Ripple_Detection.ipynb | 59 +++-- notebooks/13_Theta_phase_and_power.ipynb | 12 +- notebooks/4_position_info.ipynb | 59 ++--- pyproject.toml | 14 +- requirements-dev.txt | 6 +- requirements-docs.txt | 9 + setup.cfg | 2 +- setup.py | 5 +- src/spyglass/cli/cli.py | 32 ++- src/spyglass/common/__init__.py | 7 +- src/spyglass/common/common_behav.py | 18 +- src/spyglass/common/common_device.py | 36 ++- src/spyglass/common/common_dio.py | 15 +- src/spyglass/common/common_ephys.py | 124 ++++++---- src/spyglass/common/common_filter.py | 25 ++- src/spyglass/common/common_interval.py | 50 ++++- src/spyglass/common/common_lab.py | 8 +- src/spyglass/common/common_nwbfile.py | 71 ++++-- src/spyglass/common/common_position.py | 131 ++++++++--- src/spyglass/common/common_region.py | 4 +- src/spyglass/common/common_ripple.py | 37 ++- src/spyglass/common/common_sensors.py | 10 +- src/spyglass/common/common_session.py | 18 +- src/spyglass/common/populate_all_common.py | 7 +- .../common/prepopulate/prepopulate.py | 8 +- src/spyglass/data_import/storage_dirs.py | 6 +- src/spyglass/decoding/clusterless.py | 109 ++++++--- src/spyglass/decoding/core.py | 33 ++- .../decoding/dj_decoder_conversion.py | 32 ++- src/spyglass/decoding/sorted_spikes.py | 74 ++++-- src/spyglass/decoding/visualization.py | 70 ++++-- .../decoding/visualization_1D_view.py | 4 +- .../decoding/visualization_2D_view.py | 30 ++- .../figurl_views/SpikeSortingRecordingView.py | 4 +- src/spyglass/figurl_views/SpikeSortingView.py | 8 +- .../prepare_spikesortingview_data.py | 51 +++-- src/spyglass/lfp/__init__.py | 0 src/spyglass/lfp/v1/lfp.py | 127 ++++++++--- src/spyglass/position/position_merge.py | 89 +++++--- src/spyglass/position/v1/__init__.py | 16 +- src/spyglass/position/v1/dlc_reader.py | 22 +- src/spyglass/position/v1/dlc_utils.py | 85 +++++-- .../position/v1/position_dlc_centroid.py | 168 ++++++++++---- .../position/v1/position_dlc_cohort.py | 17 +- .../position/v1/position_dlc_model.py | 16 +- .../position/v1/position_dlc_orient.py | 45 +++- .../v1/position_dlc_pose_estimation.py | 23 +- .../position/v1/position_dlc_position.py | 63 ++++-- .../position/v1/position_dlc_project.py | 76 +++++-- .../position/v1/position_dlc_selection.py | 79 +++++-- .../position/v1/position_dlc_training.py | 32 ++- .../position/v1/position_trodes_position.py | 69 ++++-- src/spyglass/sharing/sharing_kachery.py | 16 +- src/spyglass/spikesorting/curation_figurl.py | 16 +- .../spikesorting/merged_sorting_extractor.py | 4 +- src/spyglass/spikesorting/sortingview.py | 14 +- .../spikesorting/sortingview_helper_fn.py | 12 +- .../spikesorting/spikesorting_artifact.py | 60 +++-- .../spikesorting/spikesorting_curation.py | 99 +++++--- .../spikesorting/spikesorting_recording.py | 59 +++-- .../spikesorting/spikesorting_sorting.py | 35 ++- src/spyglass/utils/dj_helper_fn.py | 28 +-- src/spyglass/utils/nwb_helper_fn.py | 24 +- tests/ci_config.py | 6 +- tests/conftest.py | 11 +- tests/data_import/test_insert_sessions.py | 18 +- tests/test_insert_beans.py | 15 +- tests/test_nwb_helper_fn.py | 4 +- tests/trim_beans.py | 14 +- 112 files changed, 3411 insertions(+), 1245 deletions(-) create mode 100644 .github/workflows/publish.yml create mode 100644 .markdownlint.yaml create mode 100644 docs/build-docs.sh delete mode 100644 docs/config_yaml.md delete mode 100644 docs/developer_notes.md create mode 100644 docs/mkdocs.yml create mode 100644 docs/overrides/nav.html create mode 100644 docs/src/api/make_pages.py create mode 100644 docs/src/contribute.md create mode 100644 docs/src/images/FrankLab.png create mode 100644 docs/src/images/Spyglass.png create mode 100644 docs/src/images/Spyglass.svg create mode 100644 docs/src/index.md create mode 100644 docs/src/installation/index.md create mode 100644 docs/src/installation/local.md create mode 100644 docs/src/installation/production.md rename docs/{ => src/misc}/figurl_views.md (89%) create mode 100644 docs/src/misc/insert_data.md rename docs/{ => src/misc}/session_groups.md (94%) create mode 100644 docs/src/stylesheets/extra.css create mode 100644 requirements-docs.txt create mode 100644 src/spyglass/lfp/__init__.py diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml new file mode 100644 index 000000000..ec68508de --- /dev/null +++ b/.github/workflows/publish.yml @@ -0,0 +1,50 @@ +name: Publish docs +on: + pull_request: + branches: + - main + types: + - closed + push: + branches: + - test_branch + +permissions: + contents: write + pull-requests: write + issues: write + repository-projects: write + +jobs: + deploy: + runs-on: ubuntu-latest + env: + REPO_OWNER: ${{ github.repository_owner }} + permissions: + contents: write + issues: write + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + token: ${{ secrets.GITHUB_TOKEN }} + fetch-depth: 0 + + - name: Set up Python runtime + uses: actions/setup-python@v4 + with: + python-version: 3.9 + token: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup - pip & config + run: | + pip install -r requirements-docs.txt + git config user.name 'github-actions[bot]' && git config user.email 'github-actions[bot]@users.noreply.github.com' + + - name: Deploy + run: | + echo "OWNER: ${REPO_OWNER}" + bash ./docs/build-docs.sh push $REPO_OWNER + env: + USERNAME: github-actions[bot] + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitignore b/.gitignore index 60b278942..0bb39ce83 100644 --- a/.gitignore +++ b/.gitignore @@ -92,9 +92,6 @@ instance/ # Scrapy stuff: .scrapy -# Sphinx documentation -docs/_build/ - # PyBuilder target/ @@ -137,9 +134,6 @@ venv.bak/ # Rope project settings .ropeproject -# mkdocs documentation -/site - # mypy .mypy_cache/ .dmypy.json @@ -179,3 +173,11 @@ dj_local_conf.json !/.vscode/settings.json !/notebooks/dlc_scratch.png !/notebooks/dlc_existing.png + +# Documentation +docs/site/ +docs/src/CHANGELOG.md +docs/src/LICENSE.md +docs/src/notebooks/ +!docs/src/images/*png +temp* \ No newline at end of file diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 000000000..5c9c3712b --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,13 @@ +# https://github.com/DavidAnson/markdownlint +# https://github.com/DavidAnson/markdownlint/blob/main/doc/Rules.md +MD007: false # permit indenting 4 spaces instead of 2 +MD013: + line_length: "80" # Line length limits + tables: false # disable for tables + code_blocks: false # disable for code blocks +MD025: false # permit adjacent headings +MD026: false # permit trailing punctuation in headings +MD033: # HTML elements allowed + allowed_elements: + - "img" +MD034: false # Bare URLs OK diff --git a/CHANGELOG.md b/CHANGELOG.md index 52738d553..4b563ef54 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,38 +1,60 @@ -# 0.4.0 (May 22, 2023) -- Updated call to `spikeinterface.preprocessing.whiten` to use dtype np.float16. #446, +# Change Log + +## 0.4.1 (Unreleased) + +- Add mkdocs automated deployment + +## 0.4.0 (May 22, 2023) + +- Updated call to `spikeinterface.preprocessing.whiten` to use dtype np.float16. + #446, - Updated default spike sorting metric parameters. #447 -- Updated whitening to be compatible with recent changes in spikeinterface when using mountainsort. #449 -- Moved LFP pipeline to `src/spyglass/lfp/v1` and addressed related usability issues. #468, #478, #482, #484, #504 +- Updated whitening to be compatible with recent changes in spikeinterface when + using mountainsort. #449 +- Moved LFP pipeline to `src/spyglass/lfp/v1` and addressed related usability + issues. #468, #478, #482, #484, #504 - Removed whiten parameter for clusterless thresholder. #454 - Added plot to plot all DIO events in a session. #457 - Added file sharing functionality through kachery_cloud. #458, #460 - Pinned numpy version to `numpy<1.24` - Added scripts to add guests and collaborators as users. #463 - Cleaned up installation instructions in repo README. #467 -- Added checks in decoding visualization to ensure time dimensions are the correct length. +- Added checks in decoding visualization to ensure time dimensions are the + correct length. - Fixed artifact removed valid times. #472 - Added codespell workflow for spell checking and fixed typos. #471 - Updated LFP code to save LFP as `pynwb.ecephys.LFP` type. #475 - Added artifact detection to LFP pipeline. #473 -- Replaced calls to `spikeinterface.sorters.get_default_params` with `spikeinterface.sorters.get_default_sorter_params`. #486 -- Updated position pipeline and added functionality to handle pose estimation through DeepLabCut. #367, #505 +- Replaced calls to `spikeinterface.sorters.get_default_params` with + `spikeinterface.sorters.get_default_sorter_params`. #486 +- Updated position pipeline and added functionality to handle pose estimation + through DeepLabCut. #367, #505 - Updated `environment_position.yml`. #502 - Renamed `FirFilter` class to `FirFilterParameters`. #512 -# 0.3.4 (March 30, 2023) -- Fixed error in spike sorting pipeline referencing the "probe_type" column which is no longer accessible from the `Electrode` table. #437 -- Fixed error when inserting an NWB file that does not have a probe manufacturer. #433, #436 -- Fixed error when adding a new `DataAcquisitionDevice` and a new `ProbeType`. #436 -- Fixed inconsistency between capitalized/uncapitalized versions of "Intan" for DataAcquisitionAmplifier and DataAcquisitionDevice.adc_circuit. #430, #438 +## 0.3.4 (March 30, 2023) + +- Fixed error in spike sorting pipeline referencing the "probe_type" column + which is no longer accessible from the `Electrode` table. #437 +- Fixed error when inserting an NWB file that does not have a probe + manufacturer. #433, #436 +- Fixed error when adding a new `DataAcquisitionDevice` and a new `ProbeType`. + #436 +- Fixed inconsistency between capitalized/uncapitalized versions of "Intan" for + DataAcquisitionAmplifier and DataAcquisitionDevice.adc_circuit. #430, #438 + +## 0.3.3 (March 29, 2023) -# 0.3.3 (March 29, 2023) - Fixed errors from referencing the changed primary key for `Probe`. #429 -# 0.3.2 (March 28, 2023) +## 0.3.2 (March 28, 2023) + - Fixed import of `common_nwbfile`. #424 -# 0.3.1 (March 24, 2023) +## 0.3.1 (March 24, 2023) + - Fixed import error due to `sortingview.Workspace`. #421 -# 0.3.0 (March 24, 2023) -To be added. \ No newline at end of file +## 0.3.0 (March 24, 2023) + +To be added. diff --git a/LICENSE b/LICENSE index 3f72a56e4..5558e5c95 100644 --- a/LICENSE +++ b/LICENSE @@ -1,7 +1,18 @@ Copyright (c) 2020-present Loren Frank -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BEsq LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/README.md b/README.md index 8d220834b..6ccb6f75b 100644 --- a/README.md +++ b/README.md @@ -1,27 +1,44 @@ -[![Import test](https://github.com/LorenFrankLab/spyglass/actions/workflows/workflow.yml/badge.svg)](https://github.com/LorenFrankLab/spyglass/actions/workflows/workflow.yml) - # spyglass -`spyglass` is a data analysis framework that facilitates the storage, analysis, visualization, and sharing of neuroscience data to support reproducible research. It is designed to be interoperable with the NWB format and integrates open-source tools into a coherent framework. +[![Import test](https://github.com/LorenFrankLab/spyglass/actions/workflows/workflow.yml/badge.svg)](https://github.com/LorenFrankLab/spyglass/actions/workflows/workflow.yml) + +`spyglass` is a data analysis framework that facilitates the storage, analysis, +visualization, and sharing of neuroscience data to support reproducible +research. It is designed to be interoperable with the NWB format and integrates +open-source tools into a coherent framework. -Documentation can be found at - [https://lorenfranklab.github.io/spyglass/](https://lorenfranklab.github.io/spyglass/) +Documentation can be found at - +[https://lorenfranklab.github.io/spyglass/](https://lorenfranklab.github.io/spyglass/) ## Installation -For installation instructions see - [https://lorenfranklab.github.io/spyglass/type/html/installation.html](https://lorenfranklab.github.io/spyglass/type/html/installation.html) +For installation instructions see - +[https://lorenfranklab.github.io/spyglass/type/html/installation.html](https://lorenfranklab.github.io/spyglass/type/html/installation.html) ## Tutorials -The tutorials for `spyglass` is currently in the form of Jupyter Notebooks and can be found in the [notebooks](https://github.com/LorenFrankLab/spyglass/tree/master/notebooks) directory. We strongly recommend opening them in the context of `jupyterlab`. +The tutorials for `spyglass` is currently in the form of Jupyter Notebooks and +can be found in the +[notebooks](https://github.com/LorenFrankLab/spyglass/tree/master/notebooks) +directory. We strongly recommend opening them in the context of `jupyterlab`. ## Contributing -See the [Developer's Note](https://lorenfranklab.github.io/spyglass/type/html/developer_notes.html) for contributing instructions found at - [https://lorenfranklab.github.io/spyglass/type/html/how_to_contribute.html](https://lorenfranklab.github.io/spyglass/type/html/how_to_contribute.html) +See the [Developer's Note](https://lorenfranklab.github.io/spyglass/type/html/developer_notes.html) +for contributing instructions found at - +[https://lorenfranklab.github.io/spyglass/type/html/how_to_contribute.html](https://lorenfranklab.github.io/spyglass/type/html/how_to_contribute.html) ## License/Copyright -License and Copyright notice can be found at [https://lorenfranklab.github.io/spyglass/type/html/copyright.html](https://lorenfranklab.github.io/spyglass/type/html/copyright.html) +License and Copyright notice can be found at +[https://lorenfranklab.github.io/spyglass/type/html/copyright.html](https://lorenfranklab.github.io/spyglass/type/html/copyright.html) ## Citation -Kyu Hyun Lee, Eric Denovellis, Ryan Ly, Jeremy Magland, Jeff Soules, Alison Comrie, Jennifer Guidera, Rhino Nevers, Daniel Gramling, Philip Adenekan, Ji Hyun Bak, Emily Monroe, Andrew Tritt, Oliver Rübel, Thinh Nguyen, Dimitri Yatsenko, Joshua Chu, Caleb Kemere, Samuel Garcia, Alessio Buccino, Emily Aery Jones, Lisa Giocomo, and Loren Frank. Spyglass: A Data Analysis Framework for Reproducible and Shareable Neuroscience Research. Neuroscience Meeting Planner. San Diego, CA: Society for Neuroscience, 2022. +Kyu Hyun Lee, Eric Denovellis, Ryan Ly, Jeremy Magland, Jeff Soules, Alison +Comrie, Jennifer Guidera, Rhino Nevers, Daniel Gramling, Philip Adenekan, Ji +Hyun Bak, Emily Monroe, Andrew Tritt, Oliver Rübel, Thinh Nguyen, Dimitri +Yatsenko, Joshua Chu, Caleb Kemere, Samuel Garcia, Alessio Buccino, Emily Aery +Jones, Lisa Giocomo, and Loren Frank. Spyglass: A Data Analysis Framework for +Reproducible and Shareable Neuroscience Research. Neuroscience Meeting Planner. +San Diego, CA: Society for Neuroscience, 2022. diff --git a/config/add_dj_module.py b/config/add_dj_module.py index 502b52b7c..d5e24e793 100644 --- a/config/add_dj_module.py +++ b/config/add_dj_module.py @@ -21,7 +21,9 @@ def add_module(module_name): # get a list of usernames for user in group.gr_mem: - file.write(f"GRANT ALL PRIVILEGES ON `{module_name}\_%`.* TO `{user}`@'%';\n") + file.write( + f"GRANT ALL PRIVILEGES ON `{module_name}\_%`.* TO `{user}`@'%';\n" + ) file.flush() # run those commands in sql diff --git a/config/add_dj_user.py b/config/add_dj_user.py index b9e3995e0..4d88dc82c 100755 --- a/config/add_dj_user.py +++ b/config/add_dj_user.py @@ -19,7 +19,9 @@ def add_user(user_name): f"GRANT ALL PRIVILEGES ON `{user_name}\_%`.* TO `{user_name}`@'%' IDENTIFIED BY 'temppass';\n" ) for module in shared_modules: - file.write(f"GRANT ALL PRIVILEGES ON `{module}`.* TO `{user_name}`@'%';\n") + file.write( + f"GRANT ALL PRIVILEGES ON `{module}`.* TO `{user_name}`@'%';\n" + ) file.write(f"GRANT SELECT ON `%`.* TO `{user_name}`@'%';\n") file.flush() diff --git a/config/dj_config.py b/config/dj_config.py index 8b81f70ae..59e2b43bb 100644 --- a/config/dj_config.py +++ b/config/dj_config.py @@ -16,6 +16,8 @@ def generate_config_yaml(filename: str, **kwargs): ---------- filename : str The name of the file to generate + output : str + File type to generate. Either yaml or json **kwargs: list of parameters names and values that can include database_host : host name of system running mysql (default lmf-db.cin.ucsf.edu) database_port : port number for mysql server (default 3306) @@ -27,7 +29,9 @@ def generate_config_yaml(filename: str, **kwargs): print("printing kwargs") print(kwargs) config["database.host"] = ( - kwargs["database_host"] if "database_host" in kwargs else "lmf-db.cin.ucsf.edu" + kwargs["database_host"] + if "database_host" in kwargs + else "lmf-db.cin.ucsf.edu" ) config["database.port"] = ( kwargs["database_port"] if "database_port" in kwargs else 3306 @@ -50,7 +54,11 @@ def generate_config_yaml(filename: str, **kwargs): analysis_dir = data_dir / "analysis" config["stores"] = { - "raw": {"protocol": "file", "location": str(raw_dir), "stage": str(raw_dir)}, + "raw": { + "protocol": "file", + "location": str(raw_dir), + "stage": str(raw_dir), + }, "analysis": { "protocol": "file", "location": str(analysis_dir), @@ -58,7 +66,12 @@ def generate_config_yaml(filename: str, **kwargs): }, } with open(filename, "w") as outfile: - yaml.dump(config, outfile, default_flow_style=False) + if filename.endswith("json"): + import json # noqa: F821 + + json.dump(config, outfile, indent=2) + else: + yaml.dump(config, outfile, default_flow_style=False) def set_configuration(user_name: str, file_name: str = None): diff --git a/docs/build-docs.sh b/docs/build-docs.sh new file mode 100644 index 000000000..3f01efd02 --- /dev/null +++ b/docs/build-docs.sh @@ -0,0 +1,40 @@ +#!/bin/bash +# Run this script from repo root to serve site: > bash ./docs/build-docs.sh serve +# Then, navigate to localhost:8000/ to inspect site, then ctrl+c to exit +# For auto-reload during dev, use `mkdocs serve -f ./docs/mkdosc.yaml` + + +# Copy top-level repo files for docs display +cp ./CHANGELOG.md ./docs/src/ +cp ./LICENSE ./docs/src/LICENSE.md +cp -r ./notebooks/ ./docs/src/ + +# Get major version +FULL_VERSION=$(grep -m 1 version pyproject.toml | tr -s ' ' | tr -d '"' | tr -d "'" | cut -d' ' -f3) +export MAJOR_VERSION="${FULL_VERSION%.*}" +echo "$MAJOR_VERSION" + +# Generate site docs +mike deploy "$MAJOR_VERSION" --config ./docs/mkdocs.yml -b documentation + +# Label this version as latest, set as default +mike alias "$MAJOR_VERSION" latest --config ./docs/mkdocs.yml -b documentation +mike set-default latest --config ./docs/mkdocs.yml -b documentation + +# # Serve site to localhost +if [ "$1" == "serve" ]; then # If first arg is serve, serve docs + mike serve --config ./docs/mkdocs.yml -b documentation +elif [ "$1" == "push" ]; then # if first arg is push + if [ -z "$2" ]; then # When no second arg, use local git user + git_user=$(git config user.name) + else # Otherwise, accept second arg as git user + git_user="${2}" + fi # Push mike results to relevant branch + export url="https://github.com/${git_user}/spyglass.git" + git push "$url" documentation +else + echo "Docs built. " + echo " Add 'serve' as script arg to serve. " + echo " Add 'push' to push to your fork." + echo " Use additional arg to dictate push-to fork" +fi diff --git a/docs/config_yaml.md b/docs/config_yaml.md deleted file mode 100644 index f715bc53b..000000000 --- a/docs/config_yaml.md +++ /dev/null @@ -1,58 +0,0 @@ -# How to insert data into `spyglass` - -In `spyglass`, every table corresponds to an object. An experimental session is defined as a collection of such objects. When an NWB file is ingested into `spyglass`, the information about these objects is first read and inserted into tables in the `common` module (e.g. `Institution`, `Lab`, `Electrode`, etc). However, not every NWB file has all the information required by `spyglass`. For example, many NWB files do not contain any information about the `DataAcquisitionDevice` or `Probe` because NWB does not yet have an official standard for specifying them. In addition, one might find that the information contained in the NWB file is incorrect and would like to modify it before inserting it into `spyglass` without having to go through the time-consuming process of re-generating the NWB file. For these cases, we provide an alternative approach to inserting data to `spyglass`. - -This alternate approach consists of two steps. First, the user must identify entries that they would like to add to the `spyglass` database that exist independently of any particular NWB file. For example, information about a particular probe is stored in the `ProbeType` and `Probe` tables of `spyglass.common`. The user can either: - -1. create these entries programmatically using DataJoint `insert` commands, for example: - ``` - sgc.ProbeType.insert1({ - "probe_type": "128c-4s6mm6cm-15um-26um-sl", - "probe_description": "A Livermore flexible probe with 128 channels, 4 shanks, 6 mm shank length, 6 cm ribbon length. 15 um contact diameter, 26 um center-to-center distance (pitch), single-line configuration.", - "manufacturer": "Lawrence Livermore National Lab", - "num_shanks": 4, - }, skip_duplicates=True) - ``` - -2. define these entries in a special YAML file called `entries.yaml` that is processed when `spyglass` is imported. One can think of `entries.yaml` as a place to define information that the database should come pre-equipped prior to ingesting any NWB files. The `entries.yaml` file should be placed in the `spyglass` base directory. An example can be found in `examples/config_yaml/entries.yaml`. It has the following structure: - ``` - TableName: - - TableEntry1Field1: Value - TableEntry1Field2: Value - - TableEntry2Field1: Value - TableEntry2Field2: Value - ``` - - For example, - ``` - ProbeType: - - probe_type: 128c-4s6mm6cm-15um-26um-sl - probe_description: A Livermore flexible probe with 128 channels, 4 shanks, 6 mm shank length, 6 cm ribbon length. 15 um contact diameter, 26 um center-to-center distance (pitch), single-line configuration. - manufacturer: Lawrence Livermore National Lab - num_shanks: 4 - ``` - -Using a YAML file over programmatically creating these entries in a notebook or script has the advantages that the YAML file maintains a record of what entries have been added that is easy to access, and the file is portable and can be shared alongside an NWB file or set of NWB files from a given experiment. - -Next, the user must associate the NWB file with entries defined in the database. This is done by creating a _configuration file_, which must: -be in the same directory as the NWB file that it configures -be in YAML format -have the following naming convention: `_spyglass_config.yaml`. - -Users can programmatically generate this configuration file. It is then read by spyglass when calling `insert_session` on the associated NWB file. - -An example of this can be found at `examples/config_yaml/​​sub-AppleBottom_ses-AppleBottom-DY20-g3_behavior+ecephys_spyglass_config.yaml`. This file is associated with the NWB file `sub-AppleBottom_ses-AppleBottom-DY20-g3_behavior+ecephys.nwb`. - -This is the general format for the config entry: -``` -TableName: -- primary_key1: value1 -``` - -For example: -``` -DataAcquisitionDevice: -- data_acquisition_device_name: Neuropixels Recording Device -``` - -In this example, the NWB file that corresponds to this config YAML will become associated with the DataAcquisitionDevice with primary key data_acquisition_device_name: Neuropixels Recording Device. This entry must exist. diff --git a/docs/developer_notes.md b/docs/developer_notes.md deleted file mode 100644 index 431c38460..000000000 --- a/docs/developer_notes.md +++ /dev/null @@ -1,106 +0,0 @@ -## Developer notes -Notes on how the repo / database is organized, intended for a new developer. - -### Overall organization -* Tables that are about similar things are grouped into a schema. Each schema is defined in a `.py` file. Example: all the tables related to quality metrics are part of the `common_metrics` schema and are defined in `common_metrics.py` in `common` module. -* The `common` module only contains schema that will be useful for everyone in the lab. If you want to add things to `common`, first check with Loren. -* For analysis that will be only useful to you, create your own schema. -### Types of tables -__NWB-related__ - * Data tier: `dj.Imported` - * Primary key: foreign key from `Session` - * Non-primary key: `object_id` - * Each NWB-related table has a corresponding data object in the NWB file. This object can be referred by a unique hash called an *object ID*. - * These tables are automatically populated when an NWB file is first ingested into the database. To enable this, include the `populate` call in the `make` method of `Session`. - * Required methods: - * `make`: must read information from an NWB file and insert it to the table. - * `fetch_nwb`: retrieve the data specified by the object ID; search the repo for examples. - * Example: `Raw`, `Institution` etc - -__Pipeline__ -* Each analysis pipeline defined by a schema. A typical pipeline has at least three tables: - * _Parameters_ table - * Naming convention: should end with `Parameters` (e.g. `MetricParameters`) - * Data tier: `dj.Manual` - * Function: holds a set of parameters for the particular analysis. - * Primary key: `x_params_name` (str); x is the name of the pipeline (e.g. `metric_params_name`). - * Non-primary key: `x_params` (dict; use `blob` in the definition); holds the parameters as key-value pairs. - * Required method: `insert_default` to insert a reasonable default parameter into the table. - * _Selection_ table - * Naming convention: should end with `Selection` (e.g. `MetricSelection`) - * Data tier: `dj.Manual` - * Function: associates a set of parameters to the data to be applied. For example, in the case of computing quality metrics, one might put extracted waveforms and a set of metrics parameters as a single entry in this table. - * Primary key: foreign key from a table containing data and the Parameters table (i.e. Selection tables are downstream of these two tables). - * Of course, it is possible for a Selection table to collect information from more than one Parameter table. For example, the Selection table for spike sorting holds information about both the interval (`SortInterval`) and the group of electrodes (`SortGroup`) to be sorted. - * Usually no other key needs to be defined - * _Data_ table - * Data tier: `dj.Computed` - * carries out the computation specified in the Selection table when `populate` is called. - * The primary key should be foreign key inherited from the Selection table. The non-primary key should be `analysis_file_name` inherited from `AnalysisNwbfile` table (i.e. name of the analysis NWB file that will hold the output of the computation). - * Required methods: - * `make`: carries out the computation and insert a new entry; must also create an analysis NWB file and insert it to the `AnalysisNwbfile` table. Note that this method is never called directly; it is called via `populate`. - * `delete`: extension of the `delete` method that checks user privilege before deleting entries as a way to prevent accidental deletion of computations that take a long time (see below). - * Example: `QualityMetrics` -* *Why have the Parameters table?* Because one might want to repeat an analysis with different sets of parameters. This way we keep track of everything. Also encourages sharing of parameters. -* *Why have the Selection table instead of going directly from Parameter table to Data table?* one still has to manually pass in the data and the parameters to use for the computation (e.g. as an argument to `populate`. Since this is required, defining it first in the Selection table is no less efficient. In addition, multiple entries in Selection table can be run in parallel when `populate` is called with `reserve_jobs=True` option. - -__Multi-pipeline__ -* These are tables that are part of many pipelines. -* Examples: `IntervalList` (whose entries define time interval for any analysis), `AnalysisNwbfile` (whose entries define analysis NWB files created for any analysis), `Sortings` (whose entries include many types of spike sorting, such as uncurated, automatically curated, manually curated etc) -* Data tier: `dj.Manual` -* Note that because these are stand-alone manual tables, they are not part of the dependency structure. This means one should try to include enough information such that they can be linked back to the pipelines. - -### Integration with NWB -__NWB files__ -* NWB files contain everything about the experiment and form the starting point of all analysis -* stored in `/stelmo/nwb/raw` -* A copy of the NWB file that only contains pointers to objects in original file is made in the same directory; the name has an extra `_` at the end, e.g. `beans20190718_.nwb`; this file is made because we want to create object IDs to refer to parts of the NWB file, but don't want to store these object IDs in the original file to avoid file corruption -* Listed in the `Nwbfile` table - -__Analysis NWB files__ -* These are NWB files that hold the results of intermediate steps in the analysis. -* Examples of data stored: filtered recordings, spike times of putative units after sorting, or waveform snippets. -* Stored in `/stelmo/nwb/analysis` -* Listed as an entry in the `AnalysisNwbfile` table. - -Note: for both types of NWB files, the fact that a file is not listed in the table doesn't mean the file does not exist in the directory. You can 'equalize' the list of NWB files and the list of actual files on disk by running `cleanup` method (i.e. it deletes any files not listed in the table from disk). - -### Reading and writing recordings -* Right now the recording starts out as an NWB file. This is opened as a `NwbRecordingExtractor`, a class in `spikeinterface`. When using `sortingview` for visualizing the results of spike sorting, this recording is saved again in HDF5 format. This duplication should be resolved in the future. - -### Naming convention -There are a few places where a name needs to be given to objects. Follow these rules: -* _Recordings_: should be given unique names. As such we have decided to simply concatenate everything that went into defining it separated by underscore, i.e. `NWBFileName_IntervalName_ElectrodeGroupName_PreprocessingParamsName`. -* _Sortings_: should be unique. Simply concatenates `SpikeSorter_SorterParamName` to the name of the recording. -* _Waveforms_: should be unique. Concatenates `WaveformParamName` to the name of the sorting. -* _Quality metrics_: should be unique. concatenates `MetricParamName` to the name of the waveform. -* _Analysis NWB files_: same as the objects, i.e. the analysis NWB file that holds recording is named `NWBFileName_IntervalName_ElectrodeGroupName_PreprocessingParamsName.nwb` -* An alternative way to get unique names that are not as long is to generate a UUID for each file. Currently each recording and sorting are given such IDs. -* A method that will not be explicitly called by the user should start with `_` - -### Time -* All valid intervals of any kind must be inserted into the `IntervalList` table prior to being used. -* Store an interval as `[start_time, stop_time]`. The list can be nested for a set of disjoint intervals. -* Some recordings have explicit timestamps associated with each sample. This is obtained by a system called PTP. In this system, time 0 is defined as 1 Jan 1970. Other (typically older) recordings do not and their times must be inferred from the TTL pulses from the camera (ask if this doesn't make sense). -* What is a valid interval? Because our experiments can be long, sometimes there are missing samples. This can be due to many reasons, such as the commutator connection being faulty for a few milliseconds. As a result we have 'holes' in our data. A valid interval is a start time and an end time between which there are no holes. - -### Misc -* You may want to create a development/testing environment independent of the lab datajoint server. To do so, run your own datajoint server with Docker. See [example](../notebook/docker_mysql_tutorial.ipynb). -* Datajoint is unable to set delete permissions on a per-table basis. In other words, if a user is able to delete entries in a given table, she can delete entries in any table in the schema. Some tables that hold important data extends the `delete` method to check if the datajoint username matches a list of allowed users when `delete` is called. If you think this would be useful for your table, see examples in `common_spikesorting.py`. -* In general, use `numpy` style docstring. -* Don't overload a single `.py` file. For each pipeline make a new `.py` file and define your schema / tables. -* Some of the 'rules' above may need to change or be inappropriate for some cases. If you want to start a discussion, talk to Loren. - -### Making a release -1. In `pyproject.toml`, under `[project]`, update the `version` key to the new version string. -2. In `CITATION.cff`, update the `version` key to the new version string. -3. Make a pull request with these changes. -4. After merging these changes, run `git tag --sign -m "spyglass ${release}" ${release} origin/master` where `${release}` is replaced with the new version string. - - This step requires a [GPG signing key](https://docs.github.com/en/authentication/managing-commit-signature-verification/generating-a-new-gpg-key). -5. Publish the new release tag. Run `git push origin ${release}`. -6. Generate distribution packages and upload them to PyPI following [these instructions](https://packaging.python.org/en/latest/tutorials/packaging-projects/#generating-distribution-archives). -7. Make a new release on GitHub with the new release tag: https://docs.github.com/en/repositories/releasing-projects-on-github/managing-releases-in-a-repository - - -### TODO -* Fetch nwb method is currently implemented for each table. This is unnecessary because (1) what matters is the query, not the table the method is attached to; and (2) you either look up the Nwbfile or the AnalysisNwbfile table for it, so really there are only two versions. It would be better to just have two standalone functions. Or just one that figures out which NWB file to look up. diff --git a/docs/mkdocs.yml b/docs/mkdocs.yml new file mode 100644 index 000000000..9862f489d --- /dev/null +++ b/docs/mkdocs.yml @@ -0,0 +1,122 @@ +site_name: Spyglass +site_url: https://lorenfranklab.github.io/spyglass +site_description: Spyglass Documentation +site_author: CBroz1 + +repo_url: https://github.com/LorenFrankLab/spyglass +docs_dir: ./src/ +edit_uri: blob/main/docs/src/ + +theme: + name: material + custom_dir: overrides + logo: images/FrankLab.png + favicon: images/Spyglass.svg + features: + - toc.integrate + - navigation.sections + - navigation.expand + - navigation.top + palette: + - media: "(prefers-color-scheme: light)" + scheme: auto + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - media: "(prefers-color-scheme: slate)" + scheme: slate + toggle: + icon: material/brightness-4 + name: Switch to light mode + disable_nav_previous_next: true + disable_nav_search: false + navigation_depth: 2 + locale: en + features: + - navigation.instant # saves loading time - 1 browser page + - navigation.tracking # even with above, changes URL by section + - navigation.top + - search.suggest + - search.share + +nav: + - Home: index.md + - Installation: + - Home: installation/index.md + - Local: installation/local.md + - Productions: installation/production.md + - Tutorials: + - Introduction: notebooks/00_intro.ipynb + - Spike Sorting: notebooks/01_spikesorting.ipynb + - Curation: notebooks/02_curation.ipynb + - LFP: notebooks/03_lfp.ipynb + - Position: + - Information: notebooks/4_position_info.ipynb + - Pipeline: notebooks/04_Trodes_position.ipynb + - From Scratch: notebooks/05_DLC_from_scratch.ipynb + - From Pre-Traine: notebooks/06_DLC_from_dir.ipynb + - Linearization Pipeline: notebooks/07_linearization.ipynb + - Mark Indicators: notebooks/08_Extract_Mark_indicators.ipynb + - GPU: notebooks/09_Decoding_with_GPUs_on_the_GPU_cluster.ipynb + - 1D Clusterless Decoding: notebooks/10_1D_Clusterless_Decoding.ipynb + - 2D Clusterless Decoding: notebooks/11_2D_Clusterless_Decoding.ipynb + - Ripple Detection: notebooks/12_Ripple_Detection.ipynb + - Docker: notebooks/docker_mysql_tutorial.ipynb + - Spyglass Kachery Setup: notebooks/Spyglass_kachery_setup.ipynb + - API Reference: api/ # defer to gen-files + literate-nav + - How to Contribute: contribute.md + - Miscellaneous: + - FigURL: misc/figurl_views.md + - Session Groups: misc/session_groups.md + - Insert Data: misc/insert_data.md + - Change Log: CHANGELOG.md + - Copyright: LICENSE.md + +extra_css: + - stylesheets/extra.css + +plugins: + - search + - exclude: + glob: + - "temp*" + - "0*yaml" + - mike: + canonical_version: latest + css_dir: stylesheets + - mkdocstrings: + default_handler: python + handlers: + python: + options: + members_order: source + group_by_category: false + line_length: 80 + docstring_style: numpy + selection: + docstring_style: numpy + enabled: true # Set to false to reduce build time + watch: + - src/spyglass/ + - literate-nav: + nav_file: navigation.md + - exclude-search: + exclude: + - "*/navigation.md" + - gen-files: + scripts: + - ./src/api/make_pages.py + - mkdocs-jupyter: # Comment this block during dev to reduce build time + ignore_h1_titles: True + ignore: ["*make_pages.py", "**checkpoints**"] + +markdown_extensions: + - attr_list + - tables + - toc: + permalink: true + +extra: + generator: false # Disable watermark + version: + provider: mike diff --git a/docs/overrides/nav.html b/docs/overrides/nav.html new file mode 100644 index 000000000..216e139cf --- /dev/null +++ b/docs/overrides/nav.html @@ -0,0 +1,9 @@ +{% set class = "md-nav md-nav--primary" %} +{% if "navigation.tabs" in features %} +{% set class = class ~ " md-nav--lifted" %} +{% endif %} +{% if "toc.integrate" in features %} +{% set class = class ~ " md-nav--integrated" %} +{% endif %} + + diff --git a/docs/src/api/make_pages.py b/docs/src/api/make_pages.py new file mode 100644 index 000000000..d4d8f83dc --- /dev/null +++ b/docs/src/api/make_pages.py @@ -0,0 +1,17 @@ +"""Generate the api pages and navigation. +""" + +import mkdocs_gen_files +from pathlib import Path + +nav = mkdocs_gen_files.Nav() +for path in sorted(Path("src").glob("**/*.py")): + if path.stem == "__init__": + continue + with mkdocs_gen_files.open(f"api/{path.with_suffix('')}.md", "w") as f: + module_path = ".".join([p for p in path.with_suffix("").parts]) + print(f"::: {module_path}", file=f) + nav[path.parts] = f"{path.with_suffix('')}.md" + +with mkdocs_gen_files.open("api/navigation.md", "w") as nav_file: + nav_file.writelines(nav.build_literate_nav()) diff --git a/docs/src/contribute.md b/docs/src/contribute.md new file mode 100644 index 000000000..b6ea027f2 --- /dev/null +++ b/docs/src/contribute.md @@ -0,0 +1,211 @@ +# Developer notes + +Notes on how the repo / database is organized, intended for a new developer. + +## Overall organization + +- Tables that are about similar things are grouped into a schema. Each schema is + defined in a `.py` file. Example: all the tables related to quality metrics + are part of the `common_metrics` schema and are defined in `common_metrics.py` + in `common` module. +- The `common` module only contains schema that will be useful for everyone in + the lab. If you want to add things to `common`, first check with Loren. +- For analysis that will be only useful to you, create your own schema. + +## Types of tables + +### NWB-related + +- Data tier: `dj.Imported` +- Primary key: foreign key from `Session` +- Non-primary key: `object_id` +- Each NWB-related table has a corresponding data object in the NWB file. This + object can be referred by a unique hash called an _object ID_. +- These tables are automatically populated when an NWB file is first ingested + into the database. To enable this, include the `populate` call in the `make` + method of `Session`. +- Required methods: + - `make`: must read information from an NWB file and insert it to the table. + - `fetch_nwb`: retrieve the data specified by the object ID; search the repo + for examples. +- Example: `Raw`, `Institution` etc + +### Pipeline + +- Each analysis pipeline defined by a schema. A typical pipeline has at least + three tables: + - _Parameters_ table + - Naming convention: should end with `Parameters` (e.g. `MetricParameters`) + - Data tier: `dj.Manual` + - Function: holds a set of parameters for the particular analysis. + - Primary key: `x_params_name` (str); x is the name of the pipeline (e.g. + `metric_params_name`). + - Non-primary key: `x_params` (dict; use `blob` in the definition); holds + the parameters as key-value pairs. + - Required method: `insert_default` to insert a reasonable default parameter + into the table. + - _Selection_ table + - Naming convention: should end with `Selection` (e.g. `MetricSelection`) + - Data tier: `dj.Manual` + - Function: associates a set of parameters to the data to be applied. For + example, in the case of computing quality metrics, one might put extracted + waveforms and a set of metrics parameters as a single entry in this table. + - Primary key: foreign key from a table containing data and the Parameters + table (i.e. Selection tables are downstream of these two tables). + - Of course, it is possible for a Selection table to collect information + from more than one Parameter table. For example, the Selection table for + spike sorting holds information about both the interval (`SortInterval`) + and the group of electrodes (`SortGroup`) to be sorted. + - Usually no other key needs to be defined + - _Data_ table + - Data tier: `dj.Computed` + - carries out the computation specified in the Selection table when + `populate` is called. + - The primary key should be foreign key inherited from the Selection table. + The non-primary key should be `analysis_file_name` inherited from + `AnalysisNwbfile` table (i.e. name of the analysis NWB file that will hold + the output of the computation). + - Required methods: + - `make`: carries out the computation and insert a new entry; must also + create an analysis NWB file and insert it to the `AnalysisNwbfile` + table. Note that this method is never called directly; it is called via + `populate`. + - `delete`: extension of the `delete` method that checks user privilege + before deleting entries as a way to prevent accidental deletion of + computations that take a long time (see below). + - Example: `QualityMetrics` +- _Why have the Parameters table?_ Because one might want to repeat an analysis + with different sets of parameters. This way we keep track of everything. Also + encourages sharing of parameters. +- _Why have the Selection table instead of going directly from Parameter table + to Data table?_ one still has to manually pass in the data and the parameters + to use for the computation (e.g. as an argument to `populate`. Since this is + required, defining it first in the Selection table is no less efficient. In + addition, multiple entries in Selection table can be run in parallel when + `populate` is called with `reserve_jobs=True` option. + +### Multi-pipeline + +- These are tables that are part of many pipelines. +- Examples: `IntervalList` (whose entries define time interval for any + analysis), `AnalysisNwbfile` (whose entries define analysis NWB files created + for any analysis), `Sortings` (whose entries include many types of spike + sorting, such as uncurated, automatically curated, manually curated etc) +- Data tier: `dj.Manual` +- Note that because these are stand-alone manual tables, they are not part of + the dependency structure. This means one should try to include enough + information such that they can be linked back to the pipelines. + +## Integration with NWB + +### NWB files + +- NWB files contain everything about the experiment and form the starting point + of all analysis +- stored in `/stelmo/nwb/raw` +- A copy of the NWB file that only contains pointers to objects in original file + is made in the same directory; the name has an extra `_` at the end, e.g. + `beans20190718_.nwb`; this file is made because we want to create object IDs + to refer to parts of the NWB file, but don't want to store these object IDs in + the original file to avoid file corruption +- Listed in the `Nwbfile` table + +### Analysis NWB files + +- These are NWB files that hold the results of intermediate steps in the analysis. +- Examples of data stored: filtered recordings, spike times of putative units + after sorting, or waveform snippets. +- Stored in `/stelmo/nwb/analysis` +- Listed as an entry in the `AnalysisNwbfile` table. + +Note: for both types of NWB files, the fact that a file is not listed in the +table doesn't mean the file does not exist in the directory. You can 'equalize' +the list of NWB files and the list of actual files on disk by running `cleanup` +method (i.e. it deletes any files not listed in the table from disk). + +## Reading and writing recordings + +- Right now the recording starts out as an NWB file. This is opened as a + `NwbRecordingExtractor`, a class in `spikeinterface`. When using `sortingview` + for visualizing the results of spike sorting, this recording is saved again in + HDF5 format. This duplication should be resolved in the future. + +## Naming convention + +There are a few places where a name needs to be given to objects. Follow these rules: + +- _Recordings_: should be given unique names. As such we have decided to simply + concatenate everything that went into defining it separated by underscore, + i.e. `NWBFileName_IntervalName_ElectrodeGroupName_PreprocessingParamsName`. +- _Sortings_: should be unique. Simply concatenates + `SpikeSorter_SorterParamName` to the name of the recording. +- _Waveforms_: should be unique. Concatenates `WaveformParamName` to the name of + the sorting. +- _Quality metrics_: should be unique. concatenates `MetricParamName` to the + name of the waveform. +- _Analysis NWB files_: same as the objects, i.e. the analysis NWB file that + holds recording is named + `NWBFileName_IntervalName_ElectrodeGroupName_PreprocessingParamsName.nwb` +- An alternative way to get unique names that are not as long is to generate a + UUID for each file. Currently each recording and sorting are given such IDs. +- A method that will not be explicitly called by the user should start with `_` + +## Time + +- All valid intervals of any kind must be inserted into the `IntervalList` table + prior to being used. +- Store an interval as `[start_time, stop_time]`. The list can be nested for a + set of disjoint intervals. +- Some recordings have explicit timestamps associated with each sample. This is + obtained by a system called PTP. In this system, time 0 is defined as 1 Jan + 1970. Other (typically older) recordings do not and their times must be + inferred from the TTL pulses from the camera (ask if this doesn't make sense). +- What is a valid interval? Because our experiments can be long, sometimes there + are missing samples. This can be due to many reasons, such as the commutator + connection being faulty for a few milliseconds. As a result we have 'holes' in + our data. A valid interval is a start time and an end time between which there + are no holes. + +## Misc + +- You may want to create a development/testing environment independent of the + lab datajoint server. To do so, run your own datajoint server with Docker. See + [example](./notebooks/docker_mysql_tutorial.ipynb). +- Datajoint is unable to set delete permissions on a per-table basis. In other + words, if a user is able to delete entries in a given table, she can delete + entries in any table in the schema. Some tables that hold important data + extends the `delete` method to check if the datajoint username matches a list + of allowed users when `delete` is called. If you think this would be useful + for your table, see examples in `common_spikesorting.py`. +- In general, use `numpy` style docstring. +- Don't overload a single `.py` file. For each pipeline make a new `.py` file + and define your schema / tables. +- Some of the 'rules' above may need to change or be inappropriate for some + cases. If you want to start a discussion, talk to Loren. + +## Making a release + +1. In `pyproject.toml`, under `[project]`, update the `version` key to the new + version string. +2. In `CITATION.cff`, update the `version` key to the new version string. +3. Make a pull request with these changes. +4. After merging these changes, run `git tag --sign -m "spyglass ${release}" + ${release} origin/master` where `${release}` is replaced with the new version + string. + + - This step requires a + [GPG signing key](https://docs.github.com/en/authentication/managing-commit-signature-verification/generating-a-new-gpg-key). + +1. Publish the new release tag. Run `git push origin ${release}`. +2. Generate distribution packages and upload them to PyPI following [these + instructions](https://packaging.python.org/en/latest/tutorials/packaging-projects/#generating-distribution-archives). +3. Make a new release on GitHub with the new release tag: + + +## TODO + +- Fetch nwb method is currently implemented for each table. This is unnecessary + because (1) what matters is the query, not the table the method is attached + to; and (2) you either look up the Nwbfile or the AnalysisNwbfile table for + it, so really there are only two versions. It would be better to just have two + standalone functions. Or just one that figures out which NWB file to look up. diff --git a/docs/src/images/FrankLab.png b/docs/src/images/FrankLab.png new file mode 100644 index 0000000000000000000000000000000000000000..131520e7867a4c56d5c6fe8944bf8c5da908e6e3 GIT binary patch literal 10259 zcmai4RZyHkkj7mW2(ZB79^4(4;I1JAcNPsA95%QGhX6r?dkAieTOhc*hTsrf7LMFq z-NW6({Z;c%O;yi)Jv}o$-P0YVsjh&FMUI7lfPkx{D69Qa5B)0`XfOXN{py^T3ejC# z;SJ*AFniET1JgzEy*mN|HqpO=h>)2DL_lC}R+5$0_0BrZj_`Xmdp{WT+{Lk0@s5k; zZH+Rfwsuota;WZo8m$GP>I;+!7QcpOiq0IWBGT-q~B@l1kiT+Ymb)Iac^xKpo z$Y!AN=eOZiy?10SyrH7I!!x%?;GXrB^V+9(eZm)IAlxIxmTHPrO*#^@xEFsJ?|3?u zLWnkOikF5vXbpzUPs_lc=kj%GzPE3OKfwJ`xZwkj?V_?Jorzv=g>5RKrgHJvx2M4M zO-r0t#hFzZAQb++wboM_*QbNOY?_|-M7sW+2tVgPhWwqC(}cm8#n*%~9D`il6g zBwr^e3q5zoD2o1uQiwQ5DhN?yJV?d>Q^Y0m`0khK=I+QB-Loa;-S1kQOyl$41SlBC zmchjN6h&M2!1x=rS~8s4w^Z+uYI5H7{P6smpD(KIR;}iT&#jm)u48x;#OA8+>5FL8 z5lB={z2<^}Q!BB+k6_Z_jV0v#q_n{&v3KheL&l(*Q`EC*$rzV89fY!PCe`Wl)6NAL zX7nyTd3z5R#42(VdiQLdz|C57C3f@XYp5#y-sP_7RA-I^{w#jAJLORn=O>w!PlFdA ziE}oQ8ez(+ZCwL{dY}H{+7}7T$IQ(`2!U0m zDpxLkQFJaC*Deo=rJS8uY(gI*Y`S?;V8euHn8_EOdx0Xa;z3-l&1Yj07$7dTldkO$ zzE@$r2ue|b9Hc!CnKq$D`;S)Ta&HtIK!j|b^|VEO1g?5Ub}W!^qL|Vxb4vz0{Ncla zH=Im^vkphE4#-Q1N&=b3-uYP7ZqexfU}m06@Su7+N4XMR=%Y(syXfcZ)mlriA_D=y>UT%fo~@8YYpkkBqzus94QNG^@=; zN$BpT*EU+LnXhuIO=QP+%(V4Q7=xcCW=QBT&1#=1lYk&IEiq{(i$v$9V{ag491Pyw zoxA48eYHd_?mG%DJ6t+HzL_!bm`mcq1dVf2Yp-M&+s)q8Bpe=z*wYD6IYGT(oxY9tr_j8jLX%>b`s1b0}>zyW>#>N*K9DyzQZKq*A1j zCe}n_&kXJRv3Rq@zSL(rQfd{T7?vMCrG9{^=L(S=jt`Lnh0IM(MfKA!9M$OkU#t0)sd%MoihzL ze4#w>{ix?9I_t^G7{B>7JpSSfI9Y3miNv9t+s64zy3sx));aEUkeYfDB)RLYcyu`PltionX zv01^(U|UkpablB3XCnS$Rd(lvhIr{MyKkNYA$4tCzYLo^>$WC=%l3|sa|IDs8~(Ks1tt=->j?0(S(-y;msSx#og z1V`r&6q*znb-DEo(BMq!mI*9nU%gJ=nCaCPnl&DY<`nC&erX)p6M$LvcL>AM$^UaS}bSwiC>l<5EcLL^RBZ$&|-^*&Y#*SWB+y{Jp zIYwPa7IQvcB{m!-&#b>v9p;s2C6)wUjvKrmwrONob?`+lTI>f~)!#F^Ro_O^*@V>( z!iVi`hO@J?HW8#jB5<*G|NDt++s*3J6B1z*Hq&`ESA~y75=%Vf<k@L~-G=pdINp5)3*&v=E+XC$iQ-QW{MK~QAk2oxMr!ds8dq7b5o%4D^1>7N=GHFn zs*Zf%@|Gs8q{LOrS46Us=Iz$$oGg90pZji$0+B3#lyF;|*W1RywQm!X#Dtyl&c zf;q?k;O|8Y3R|6oA|H>nn&XwU6fa(-@@?#1yw)zWzNS9zPczrz~Qp8CT!q3c*)QP^G$@s_Cq(QiCK?hsLCGA zi{u+HGPb?F2X|3D8yR{zprCP0wv&_`tHC;?{DXTR?J?>|(c=tcd2TS|8=_ml?3<=n zb8zEiW30j1e)uadMGbzChdFfU6>dUIokB}dUf|EUR>z^HgB`-^#u=8A*c^!MpocYd zh!{74dit$1Tcu9Nb~fJNHog0>YwAZ?jfMiuH5@?8H^i+Iq&9BW^)e{8a3uw@Y_T{Z zfZyXF+7cUtw7qt%goB3`>AuSf%`?y4x%PcH;cxG#PC0ak5PP*}THpXO{xX13#1=WQ zsz>)3E96>Wjx>);BO(rc2T43&tDi)O1UTr}W1oM~I+SsaA0fVzI>fbh_uH3rzjhCd z+_I}I8$d{8)j$g+LzZ>r0Us2Y6p)>gldl0C9 zv2Xl)4k5Hi)k%c?wRWmH1Lyxs2Bh(v2Ga2MBb0sphk&NI%Ky^g|3T<;J)AH9v}I=o zZC1nq4C0bLe|DIcJ@?=S8_}ReS_*zsqemLDIxUVak#k%uw*e5Xfkb8N^Zv1wf>Nk4 zl1ZAGPF@`=2ex_WRFV9EVOOLm>b5Ktew%R9#Wsk)#yY1j@WKTv=Fn5AAouDGWqSiN0oc>=Yh?Xc3Zvwgc8R?5A9%`r6g~X+O6=Ob>~s!*GuI^b2Io>+TMFSrFw^ z_hG{fxIwxF4)9O5tUUnn?sAi+5#^-3CYNt9)fh8}_%gdVF+ z42eA)uc$oiIBT?ra3NU7yzSX&Xvlr?i%1v--^&?q8?Wh5)!adu9m{j1NbLrRnfxVk zmY6i_vJH$i$SeHf%5~pJaB0eCA2!B~4SO4(^lFAdG=^gL3G+BrSdw_t#|3y3n(>p( zRn(=;;z|%Zh_TDH82{MFKA0PPlu`xd|25y`0Q=kZ-HQ4iMKKF|+t*8GbGo%b?Ykke z6&*fmpedn+_lINepc~B4n~S%c=r3X~a70C^lKwUq$oNS9dBX&zN)#4a=ap znt0fGF}>bm4fHAw$r%q|E(RNmndsoct;HFVnl*rA9aC`wX#{^#fhUeUE5)m-cY)-p zUfkwVT%6Y$9~NiqO$U-_9WnZrosdCjJ$hhY_nJ58fm)%t{gxfs-@;N>5Lgmh;-V3^ zGKkgxry;FrA4e<>(A-~cgd78PDiAgzj-O=@?RSsXxcO+G>`RHB+E zJ+E7>?U`+zuzy$I?bC-RNIQei|71Va-S9gLSlvR7;+~c_3;*(Xz(QScd9yjyC_eRN ziFb>wG4EC~;)9aP_M>?rM#2^-sKEcJHJLL=S8nV$m-|H>nShU+{m5(vKg|(Am%x zS=I@$I5q?&voia-6RQMbCpd6!CXs_5HU!F$3XxwyRHi&0nB3t6qw5d?8VfdlJ8K{f zX9_(uuY=%qC$Z1`I~Ar02#=V74t155kyIOUhr3u#P5!*==I6HVuCos`=%r~NFon`X z^{0E`v|wFKjQJallP0$2IU69lXSJSaUq_6Cocvk-91LYO)~AvT99|-}mSGDWN-2Q; zk>U6-64ww>VK?tSb{VIU_Z}R|Eo;PQ8Dr_o?y4_Hzw$24(o#*5bLKB32}CqSQ8cM+ z0G_gNvFpunpFX_Ux_2;jgR*bmGxs$9at)ubCz$t*6gw=G8=mQ&4|CwTW&k#Fsa={w zn^ndTP^#=~aofrA!{jAB1_`m6r2TxLvAFo$6gE|{yeBAz?gve>dmbopz=rCpuI-f2 z3Wj|J8HA@Yfh-M|`#3FoeS#_#uTIEnBVxl=+upk!NOXIq1eL>4l&DVa(c+xG z4dj|%VsIE858sP?lj#+3l)J+~X%DmhO6Uw^>Eb?AflYVNN;f$x3!5rSL#a|y%OvF| z_)Bosli*3Q3fBg152z&N8N@L&@*B!Hetzfniv8< zHWC)byPSXzrvO|`%b_%fDY8ZqlKfqWlc`?t+|V-c$T~5cKnTR1PWIbjCJ1n?a{&XJHQQJ+h2;xv4!$<1J*jb zJ4l(9CI&*upPg~%>FzRPBXNb^Pw_;XVl&1@)0$Bn9W9`zXQ^Zbh6*WLW5jcFk(~R` zc3CWSen*KkyKA|eEN9?s=gsDNSvJKn6xsDC)nw89@0?L+irnkvk%)E!SJr(OnCN$j zRjJ~Th0TDkHM9LU7W^c*Bpmnn|Mqrk_5p_`a`4bp!EsN$A`J*}@rkYt zQ7~Y?VWjtH!M$_alpm$=HJC^ZqQ)uBY%p7?{6KNjGb z*HusOQ9QIkl6xc-I(V!>l!hEa64Kt6;Pk^7v(m5;-)Eph#HgvC_JH*EYv zxPy|(DB%^y+VaCs3Z{6N}3`Hn0Q}YKHm*n7_(CC{U%^gQ_l05!ucTafU1Ca$%%R@RAXF>q zAMskBSr7l~5X2*EdBR57q=8rl&E zoKHz*O`|Q2Kfa${mp?zOp5R4#Yn5-~l8?1SJZxx9VG4l=@B|sSaHv3&NvV0kMhpsw zB7hpU+lHvt^JkYpefO`W>tJLQ%e(kng1pXidcE7FQHC9|?k0>|AN%G_iI4Z^64!Dh z3b#oUPTWE9O3V$J4RQILY9 zMln@nfi%u(^iZ>BWNfN0Z*HUQr{S0b4d)4FtTaHTsBFP>bh%QvTDDlILt?1Yt?1Y1 zfN?W{!qT@!!mq%T2ziKqMiksqpw`|toXB#TgK9|QUmGBe#Lborh!zo z%rQop{(8+0TTb6{B2Li#Y%sZOcU~*6EXIx8C$pmrp2#Gv_-ydm=RHghQETd;e>PC~ z#dH{&h30W+T+xnn5k%)KwmuDi%)mxlwaZ!A>w0*mu!iIZwx_LhvktnL`v2A|VBh!r zXZuq^%@hJ`zcsl=))&sW0pG8S3!tDYMm ze?HdbaJ>&2B!4EMy^Z+U0N@5^(Rd*Qhu0m4**GJ-QP3d|ZMAvv5Zp*<=0>Ltla~H9 z{=Nc}+rGX5PPRh+tEI?BIBGzE78x;|j3E8cmP`VC?G&`Dw(p@hHDhA?kA@%Y;-O%t z;MRBd6#SbKCFp$fvmcjmQIg$5j8`75TGqcgPruj$W-VNF)u>m+{GejZbUrE)4Moy_ zZWS(oe+jwPt|6~5?HZc-KFz01u*z+!|DXGVQm7R^5XkI-%>FUN+@czyrWnb*z48xJ z790wTT4XxBgnO^X-Fe4Y9s<#X>;==X9(eT#%Cb>f#$_SCJIEWk7qs6Q!x&JA9 zEPzdp)64Y!muvUmKHDw^P0a&pZ>eOIt{*cCPSTX+Wp3DdD6{;2)9kyjB&RKSpS&2e z)B5ER@Gs>tFrZbrX159oCYo@Sq zSq6{%dep@je+?N8Cn0Xa??jJ1{sy#i0E?-ytbnJ-D|S#$^)vfN1oLI|ZqYrWQ)a;> z5xa+K;8%4R-D?zlt;#_iod4SL0q?UY_x#-t_`Vd^hAkf$> zk@ezH64ES@hy?7&2KV<%?~cT8Tii{u0u7y=vjRICwt0sesrNoMW;)k1Epb;sFJG%q z1PPK#zDJfF`ie`0L#%FPQQz+mYp?A3lewGjRl<%IX*7xO9%immg}}fMqbCNy8;`u< z574+o8^lFX5$sI21brl{2j;~c_(aZc2R8@kqnA~yBs_SVe$)+N|2=zr@M2>$l!&r}bFf4|GzJ9fx zSz5r3+&BVqR*{v6-5l3omEFajh8>D^aIZU7Slib;MD&c81iq+>*#r%neZDft0uWGd zv&<=nK=4(7nW)m%JUFp8b7|s4>3;powUVGHY@#+tO#H1IUOT6Uq`y?5A#)wn$W)63 zaZ(T5_Z>zJQkQR6jkl{#j>{WDwOS*sxUyz9L0I(aVa5SuexC){&b+)xUXi5>xP(q4 z5ZL> zkBqH?PF#ChQ!|JzPCT>xjQ?^Z!*?|E6={ot*qlU|heijT@lcv5UVIp}&*+dQJT7fK z8U4>#4E@KnzZ^7mfH=5Ybjo3x1UiHD(_j%B4#551aTd+Hmur-zx5urm=aiH)5eM!) zx#x_G!VE`oAY{RP=eS>(3$Ni6U*Zvn@gEyX!Yhb&`rBny;2yh8d9g7Hs(_;OQ-u2n z1!Y+i18}gz4*L+H1@3=Gi&cQ(BzxIshMVL(x`o*nxNn6$Sjq6YOkoVW`(Kl6b;oKR z!h6bVym*BxG5NS?I83$c{B`enboC@@^JF~_x81;fb3CjA-;!T;r9USzX*gy>)oqjh zasl#|AWmoBUitts{RwqCGKy3TGnTuX1xK;5BOhAv{(4M=XYDSa(UP3Kfww+t7_XnY zJJmsGV}IHfuv!#f=m;0qSI=$h+DKp5)2RR*n(;SN>sE4J<> zfTam$1G6NB z{!-O)f`ki0gAXiln}ij^NxgPUy_+2)GrYgszPj#nfRce?DqktePvG-!%3s-532;) z+yyp4zf*duYcL~vOi&Y6JNVieGbzceU zZ-5i1{AJ;BUg|gg1@0(Jj1ArI;X z9)qg*sNSwnP`%aV6<(=QASRx83BW0R!pY#(S@*%`O7fCY(p2UZm*|KE&P_DqW>3zT zznRtsrzT*O$%PMdduVoss*@N3D*O96kOE1XXq4_m&H|fjLz<3(wkV2wIjA%B*if^W zvq1FT6}jm=jA2;EgGVZe`gn9rWM}ET!JGz;_Bs>J{DWxNR@e8mJL$(Ct$QA189R>W z_4%Y=JEAYuriTT;jx8GUlCj(_p^-A=^$D^e^bdnxNy5~krJ8&rPSSmyLBE93fyL78 z_((*_$z>WEO;4AH0BY*qdOfYyiTx^s{cqhS)MRm!+9=5`#}NMSkhCRR3~^s0#62$K zPuG|kNxbO|7Q`XFIvxirs1Pda6)wBn1XRCHDP@{Fw`su&)VR1vrcf1m4ux{g@Y9|! z>E`kF{oYG~s*OCYeY+osQ6yJHQ&{f9JSA#m7+UWK4u37l4IUzP&y8wF7J2zg;tq#R zEt|Txk)iQS$yS-ks*8%?!+w4$@ZJ$ls#yn#s8jZE?B0J;en3f%dTRHJH_#uMbD|4t zIh-(u02rhOoTZ=UoR$7vB7 zdldKaM49Ym{+=gpWRSY((hJ62e5&rNuT7)X+ZHy9nF=KdPiWz9f2YM}b$DcSYY|8N3$36`;dE!BXz+mBKtLdR zR!w%R8H1Czc7qj|f_`_(Nv8EmOX1R!j)4VChhuOzdtpG%Ix?qm3)b!~zJveJl{1*; z5(kgw_vm(`%Fpm=+p+G{ih0`dKlDmeBzV-=3#U4*u^FHlj%q+HHzSe0QNHEo`b!uT zD?Ih}$se~WHvC?Pl?U2Q`vuBv+<@lsY@}IrF?2ohRE=`Q!-?1_tUY`VYqJ?ar-$ES zx7?C4+6%dyM)GeWYdUKKu2)mT@z%w3QS8PuEDa_Y_^ zOk?k5|5~EVpK||Vt6AV*`0-2=QqYLyAX~HGq0}9}A(p@36U3$b6f&M=t_H__Hc1_o zo=ktJ)Uz|g+b~eMEbhB%6d*f?5{qWbxx`jT|Y@PL}=~5!& zKMXAXUC=sna7i`u{h(3MSncGn=^l6pQ=0(H6#meg2as6^%}+%j41fFKqH_?g9Rfm@ zfflshGulx??z*XZbt2@LNcf6~Vpdg{xoxIzX*rOQh$;mhD>%aqaYb0T>kmMcbop~#+{B<)4J_B>>W>nFeCH*~T+lkw z#Xf2xW)HOCJ_`s)yt zjrZy({hMrST%-cUpzWoruQN~bd-+8!+t-w6=*j+SruU2uX{#@(H)UZQzhLw<(=BHh zVjFez@VtV7^GPN@+}yC3rN9lqm2WY-!9bVU4-G?Mph17JQkMPLUaPHSfcSG~vnKs!Z8{HlYdVR|unVyAfKx&uZbO!7xhmNSL9r=}RJ-jz?2_mT*H^LBy^nGhiZg#DEUV zS1C}3-vWdjQuYz+*)e468Gxy4m-WoEXpcZ5HsUDe1jGZUFVo&t$8N=Z@emM zh(fZ$k+;qhWuvE&Xq#6^*-2?@>kD%kN=|AFBYMqqJBt^4IC#aa%uA+N7}s3S+}E93 zs;Ud3{;ikTz_W9YA#!d)dDUE%HyrKBTKo0?okX`-@r+))oP}9tU^n`b*oUAbr!HIl I#w_?h0151MxBvhE literal 0 HcmV?d00001 diff --git a/docs/src/images/Spyglass.png b/docs/src/images/Spyglass.png new file mode 100644 index 0000000000000000000000000000000000000000..97addbd52e98d40126b5582625eca77004f9e4c7 GIT binary patch literal 75394 zcmeFYg;!k7);`#HUkAwY0ATO;B0>O3z9~>|u@T7OiXAA^F zv$vI!Qd5wUqEd5pva)rs1cBsZQgx7YHAV<>4L&9%O~K~NBX=OvbjTy;4^UBQ)nI=V zmyF?}E?FEa2(>4CS8-StU0@Gm@eUicQKt50AoN2WI{}`|p0d37LcpHi!NdNKpvmkz z)fVoXbr|usD8|(N0zuHO!f(rqXktdyVvbte&eOD5VcWd zzaDe@+Zb-m=KKu?u3YpBOAt?qa4stma6MUAl?>kS*7 zT8lX`mi4vY5=%(jkEx%NVl(T4+2x#zoagAF}DvGXKWQ55K!K*N-UZ$BSmdTEUA>F{hdjUDV1=>DF3_dxckPj_6?V z!Q1lj4b~7UeR4i?UU#-hDm{F@*1}an?Ftbjzn>l9-sGoxqIOnHXUDxyt`t1VTR^uX$Wab^VEuuH;~Q zVq1-1j%A}qqtCPh8lWT zALh{$U!|etipSqluKUznn6|gnFb)0f2o!%%?hDPd`7X<~nOrJjqJYL2iCq|tEiJbS zkJ$?cwedy|nIOLzctZSrYo+R~XhSbdW60Qm4N=9LOnJiEdo_i4M?3p{__mXvnSA%E zSL#B17$d_45dv%Ita=y~fh09)q&B8KR9S&?iFDp&30<2}TS;CUezlZDCh2+W@+~bT zZGkGbZhm7y4&Q#zq+=!yu3i4Hb~}9PS^_nl7@2r2%4FQ@)K73xv;PRRUUeR@1}m+{It?}bXwiL z0PnAw_r6DG7)4Xi(1*IaDg4cVE*#S5L?I|53>AEMSTYp>U8p7}jJ6*9LJOJ_ z40pGo8oXk+YYk!rQdZBK8q`w+Bylzy5Mwv7GmIhPLeKkV-{qdSQwZx}M(`5yVDx<2 z!Ds}tSbo~`Xbzn4aN22k;g27_(&|OSzoX(#x|2t)!_iB0kYP?XPNp43-X;6_VO5rb zhJ2WG7xzQHrY0q?)Y+723CE~piw%t+#e^hdVZ14Eyc>3}Yic8HewAE3l%b?2U_{3$hEX3uSHcqN2ajDMPaSAf2^36;oP2?Ow_U`4f3R`5^@z`LnMS z=*sA*=w9*QcpmhaLE(7y_%)Ito+=yRnM3-MB*&5GUER81;rRBr_8ddiPH70^jk-J2 z^(S#f8v3l)>FTnj-?G207PimL&NlqKnjx!Xn$IrO`_^K7S+OrQk*FzBhMTHPE~llI zuI{ByTG~|VWXqFD&K8~~J66nAr1ewfC&iR+!=ZkaJ{eocyhEjHB}pZVes#mha^o`V z8S-+(a?vu9tBfm75j{g-I0- zL)9-$;!auX3LQi4aS&zenY*jItFx<_E6;#}&JIyIF@rAYpz6Tb!1R#ZkYb6t9?dY< zutG#fv^dkDO-@(hF(M=)KB7Fd2{gS($-aTexkw!}F(Py>6fPVRe9|-SR>;-JXInBz zwt0JR^ZacmGGyE0^|Hl>5r&i*h8(G@D0s9jOeP9flVNg=+1U}Eb@$ezH#4!{)~xoe zlC9M1J**ff1Sg31srQ%m%O?^uymY77LRtF>%GnYO+D%9}^~MdYh%fQ#$9~Sf9#uDO znX4P9TNqtu9OhlS-gD}goj zO>b)rY;^2zR@aAf&5do0ErTXQ_OVVmm-2hJGHIu$7r2I83U@3SXGg{qb~Vr4M~FSH zJqd)I+#%$OM8|xZuN%w`l?lsG$BMd(M)&er`goT;a_uWm%1z(&{fw+6nioDT&q_C_ zTJdaZ>-!U#NG9s!{RHJZS2_2;alNsp;7J50E>U<;REy+@MC4`(JNZm^$ojBd+5NuX zKHFitthzpj)?O&wEN!~Y=xp%)@QV4RCl+d4vfLa{(Se0^!OLW8F?6aksRtD z{si8NKBJ{T(nt6W+l1JNz!mm0tP%b@f&t=XC`%}1gl-gA(lcDxT%fK_&ri2na!t}Q zKT1lh=4pw0Dc)39*Fx7t_ieM0xBBfp6bV}!TMt_c`+c%e@(e9$A^6>D3I01i0V^`m z;*T@DKdpDilZ%@*A+TN|H}4uW8dzHc@dWL5OSB5l2NnnA&Fsu2SQ$OFPM`Q6nnzr^ z;u#VWNZ*Uz+de{Xi7zIQufk)H+i=$zBN^D=wrA}oeU-OUoL8YpH_UjXrf0RZR9yLN zYHpR#l4SRhS*7wVZ4rI(nfwU_si^0~;&WFJ@A}6uH<6AdeC5BxUe!+KsG)~(+ z?vlQW_vcrW?Yc$?)k0w@wbX^Nll#?sU3lSOZB)VVklxf@>x_^L7VbQ21gpx?V=RPN zMVTD4M(>VW4^PeO{ZmYVS^*)qdshwDn?2jzq66~eLj_m5VulRWOutX85Jd6p zwlo>=mOH(X_0p)(R=}mj;w~P)hsCpU_Pt*7?~Af`<#yWKI)v4MO;+2PZbi98Sqtj( z%=3+@GU;=b^?FVIYX^S{{w$11jomnMuGM(=Ma754>%56@C|h@EY_Yg3X4__KYLtak zme!QNT9aMXt~oop-aVV>x_wq}>9ddaBDzfjYZ-7^Rq9L<-K)rE^BLk5L1Dhn8omBO+;XS z_q#@+>i6vZTv0!U?eP&S?aPb$5NpyOcX>jt>o@*q2)|Hy2|L&+p4U!p`qsa z=iXnp8Eyy`{8>!hWMQm|d@te_?{?Pv=krM=-$%%5tH8O{3R_R*L6e(PSEq(^@y&U* zU%21EjmLSqC})7$Mbr-E!*csdf+(7w_r<&3!nVY_L}SrpvAfmIO{O#3Bkox1)XB9x zK6hz%!$7WUnTNMWW9vT(#6144LPxje^1CFTTNio1l|Owy_1MUwcZ%OQ_dmF+Dix}F z=y_7e}J7w6|0aP_qK$h0?8zs{7WkT=%JA6WW#^vSoHqq*<$okRFAKpJef$P~1*6c0L$nt)*JMk?Q-<`KcF zVJ7x`Bk=ha)%GD4MO899x3ja!3Y2sVb4zn_acVfyw9 z&U-$baTu}aWB>d231Zjc#378gwSNg6?-B!_$Begu=;MQ>u7Z`aGKd+t2ZP{YNI(d{ z9Srahg(3alds&!QAh>^?!-7CjwjlWbDWd{hU;dJS&r6+uUEw}Pf{=i3IKapI3+%s3 z!*+jx```PQ-#`)?QVI&dRm0rX(h}lkPEy zn!4_~%1VOfPLAxR7EWfC?B0&fFYSPYy#;|=M@x58DsM*zh?}6d2+cnw1cCdP#~d_N z{}gez7opKrR-=+~aXw0$z~g`J{~y2nr^o-Qsr#Rre4IT0UGu+w`9EuFxmmhOIXMDdx{Lm2y8fr~ zfB*PD6@@uo-u%BL@h>s|^AwP?D2g!0|IV2x%IkrlDPS7OY^7B-fh(}cUjAT^fq$?5 zb$z*yOy~BTssn+3mn%*#{?-5q<2Npz8z(1+i*50E;e^NyL@bwie15xnvpFVM! z6I1f}`N#85*MXO;tVZiMkjeA4j+^X_UjbgfRC0W*Ja10NV+Cz#|T*9bwLT`Tl+s=|5x|j z;2O}l+y4&ppDI*USSB$1RgqajWB<3I0t21=uUGp22m1dhQA#T3`zzb~hhJSsyCW&? zd!zUZ0<8zReuhqAr2mMzyBd)H=|Zw;Corv4``Eg*W(eq%qS@b+vt$h#9WENW%m?H0 zwJY^~mKpd4UuGlN7?_+z4%!}n;ET8+HFVj&`rSrnK;gVKL2x0W2;VqRN>n@;!GsbG zw*F-)TttZ9oUfG{cLlyX<3a6y7m-J54Xm+Ov{+ymB7-o4CP^CBvk17TC_`8LdIB2(t_V6xb9ENxos@1wb&MVV%)uRM~y3x`uBu{7BBmyn+f)8rK$K==fe+M-h{Dw&?%8*)ywJxUfc)@#})OyNOazn;LQ&23$suy&;9WXWJ#Xq&;RTf=MUn~ zk9W2URqs{GT?}&(Oop&r5rENSnZOsYgAyslaz&r6G%|&}s){+&*bLH){4ZpV7i)Lo zJxybtu2-B`dVj^oE~whgmgEyvXjU7EnBx4k`e(pAuz~N!%wQI;l3ZkgO&l!&U%&t| zAGpc!TF=kmwwA@8|A5b;KA9_G`{#J>YLL{4=r;~Z1lpxD;y+uy0ZTqdkT*xjONC{r z{)83ehsev<|1yKpRLrn7l&oKJ;s)}iqF%jCV^t~Jta}-9F<7%qGbWA0nBvcRJ&5cN zwF%jT5r>r|XB+tPN+4W)eXwhg_o zHHe)4884l|!Ym+cBI7cs)mu%NVT=p7?cerh1dPLsk(GzS(l ztPtPmizp1#r}TP|86(k_4oNC<$ToK{B*IFtEr3)lf3Fh*N)S0`dvF@iUtT) z8n&okVEX^wRS3#`j4tB6Arhc@;qhS>;5VZve4_*J_R_y>n+@}!c1L2lyi6Sw1Snn+ zP)62^8vHEPjSu4u-RzldaXa`7$I>pv3W_T1Ie3vf+l$&I_`S;h(jYO+5iGP+qqsyo z1h%SKND7MFj5(UhLTEYuIa&Ril7jbbo>)+>N;ba)bR1;d_i*#eD?mvX3egn8k`Ak`)WYr@>-1wH3i=+*yRs<$TJxt`Tf zeb)q8EAhVAPB<3yhEhq;OL^|n69ki^qY}QXeg!~zu_DV?MZQce3QQ?%4eQzlF}Iak zhwq=S82wv)QKfQ;6jyUP#tNnB2qsLNuJkYcVgZsPhaDau239RQb`mpaq0%57&=Z!V z!+yUy(gSa|kR*pKnrvoE-d!!4Ckj;kXTMUx0CnkuhqYdm-zqhgGAQ!$8jbk%A|mwp z;a4#r*=2U8qRsIPZsguZTwq%w`x>SJqqCp}1jkC(Lh-Ui8R5SocXvHqlk!4V7YRI} zB(?5G>I3y*l_(B)zFQH5hnJILP49l|C+p$`a~QRk96A|y`s-a(CDX{DC}zE>bcK+< z0v3$4xCx+l;lI;)$A=ISO2;Z?@-P)CrYbJMj3(0}6}?SomsIHd^zyQ}G~17S`?L;v z?1s(aP|@&~Okp2aMPT34D}3ztfpgJ>TW2}=kwe?ABblG%nzs@7a5maOA;-yo&b)I- z)GLiIj~l!Z^f$owa)J++*JQu<_F@M)guDG4U?zS7I@^(#)B?tPrO~kgP<_BA!FOD3 zPWC)ql)#c#$rB~Vjs@hwQgr~$S1*u8E>p{gUGq7}`NU?>7=3p~LyMKrz1ret+Y^e= z;D6_;-xVlg`ux7dm0tL5zuUNwN2gY~cD&s})mfRZrZfYrb25U-Nf>;H1E6giY-boO zMbB4TQl6h5oCYHH0oCcw(jP?}z74dP zj)0piiI8*DEwm4FhBi@H@@S?wQJ2HBFdIkv;kC~!>z7V}9%!D@v0`^SjIu3` zMY}>x^by!(WyZr+%mj#leipU+`4w5KLU-DV?U^IeYreYcS<$Qw)+gsH4Gc(}3dfE7KLrPYV<0nt;gsU&dUVy?C%s<;jCGQ( zMA!5L)Fs6yaK*7{&8AP4(er`G9+op)xz>#6yH5!(h8Z8z)lqTYz3r&(zD&i{J)!|N z*_d;j3>wT>z*Rx;BDOhM2WZ)X^e4PYexWr#6Ril9Yyz3>M=h_qm=cf1TT$oDVB5${ zJ|{?dyu7ze>o=5c$?w!xw>yb#%i3SBY2V}C_v8a(`pF~f`MNdUzBiE9etsMyi!T+s zwT8OZ^6ZGnbYPGzbc(vY%X>L5e5c?!@K?vpCrDY>n&7>%bHj>i*Pp&0WCKqYUf6x? z5YI8>Khap)&!N(i9aMCh*#0g-1_J@z!7!a6g2Vh~)_mFunIKkw&`PU9jU|b)ezSAF zWoJZSoNZbZ2Dx2JU^$;4{N{RxpC7w!#+F8N6Do2aV_o*xB z?{i&oFw36+!0v_g>kJqlLZia@Fm-Bb)orI-@CJvqTS-gW0XaHgm*Y;3zSEtQCFeXu z*W1#Q{=_{|zn1F4@(Au@eEVonYM{Lin5?Y9aiu{UCyE{EF%D3_txTgLG#z{!=3cOtqPp>kl6#r5lIIE#RMU%IG^RY*cGsn)(f zGqeA;8q3jX;~6b7#%S2bB%5b-iz%6FV>-_7`^J)-mrfZ(A>!LmNLYxUqc0~i|7x`} zz)$Mg)CZCp3QdB3l+*1Ldm4QaM*(!Jg74x&AR(aqY?ncDpc0oCh$%BB=SDW zc$oQ@-SZceZs?nWYwAO}lB&CiaMjx+=Ma4EMKj23xz41UcSxc02!RHFZ!-@8urknC z92hZaw@x^YZ_PYV2Mv<%Lr4{FlN4V2yOrV5B2t!&WE|*6Cg3$cX zXh`wxxIu^ScgNhVNvO*kA5wuI zh*GssPxG@?E^*Nc@iGwF8ev9QLNJO1V+F-nKi&MXdH|6DxivP62y^oX?;CBs4yGWU zt6*%$;e=&G*s}g*|CDLU;xpV`EyeYnG_>X!u)RFHN*!P>MPricbDsJhKPZcDQ%b zic?qG!(O_va2B_nl-kWD3q{?n`or;jdVcg5D9xRpm-#>&?8Z6xAahW3`OG%9_f`h(X1^vAVLP<;%&?tsBoS)v-RcItYXcM;KM#yWqLXPwezD#+5g{aVbdRhGn=Ss)pKf*pvukk9Ab z{%79c2r#hj0Cpkheu#Cp{5`h+2Un$TZD>DiI%Lfgmp+ysd&&F$s8|7B;lpREgMts{ zKm~$>CoJLt&JG&|Qi%a2Rlfigjbs zz2w1s)EC{g2$-{mn;57UX9N8y*HMSQDh1ajsko+J2H^U*RKanu)s)eUVt}t^z@Nbc z|2_f9^0&BE!psgR}9wpWbTb^sB(6g1d$Az&4FV}j6hq6}f; zIeJkHDJU=M4pkg>Z`?y_+y2W$8jJ0GL&2y*|A=0?apT9u>IE~B5AsN1I1 z<}OBZgqmn6PVpj-BbGbYXmitRRsiA0lM_Ig!|;!QFiT)x;S6=X+9+wmx&K3h64?dh zgUrV&SePU%g+a{ek^XeQ5&*p!9lN>N2)gMoT?70=nzKP2O3>C&u85!Fz~)Y(SlTED zyc;pzy0y^3YwsEjBzQKV$1+O`1QLl`yYDK+a{g#z>crhc(Q%k$m7-677K}Y3*^Vt6 z?#fZ3;WZ7den zNO2FWSZP_|!=gU=j1@e1vYb|{%dlb`zQJjH7@{~D!?N}MIBk7k#_?~gRN^yMiO`yW zF4hJ{D@nug@iKfjBr(iyiJNJSrtKuLvi4X3RB=1j_znACiv!=R) z78XU)Su~Q?jH8m#yHC2}SD|!o4j!c?O~a?BV#Ipgn#IkR!PIM^#nyktxtGnXwZlU3=tN>h@~=s=1ESadf>d7KW|840@1!w;E|8SVzoCI zKAoscon(2;uF5TJM|+8US;dHO&8!!uCfNV>YDAa=8TJ~GgBM=2p(K1JkFA-#QRmIN zA9A_t*-f40n8QW%Ly<%n;)lva`*E2PqR7bO4!dije3GN|>`id`=bDV)A59)@3B-8w z(jR@6(imYm1$j)mL1SOaH|2Ida~O9{C5k;C?&-=Kz(nr(@{E*(h)b8^%gR~_JByLSyKTwYX$XEPy?8KX_H;#7pD!xphv1la$ zz?)P>!s@9VUp*myZJ~D6-C?4Fn9%!_Ls+qH1DBiq7`cPKFD!ha1d`3e>RT)2Ey$2a z$v`SMGfJ+&HzDpVoDmhfnv(hk;jFAB5peygLGbCf{-3N4cK;y7Eva&x!}H8NdUer? z;87~ZmHf@OX&7}mo#GMjkSm%k^F0#OZqF%9PvVkx32VahCg^ww30b&W^I4b<$zn8> zK0Q2Q31hc70Xw2gS9gu>ZGAjg91Grr6EYNU_|*@EdYihxhc=$EiaaLl$x;}U%SOC-oEu`h)?9N@x|vg~y0#k_Vip!m6_`UMOHSK7 zKT(6svxYGnd@ijfNXN!+k&&L%Te*C%Y~=jAlV;dL7ZAC15BHJ>NHwjlmZ#r_b(%LI zw+8wOXDXP(@Glo0LLQnoHkn|1Ss!c6(8f;twW&3!@$@7);NB`W0!dW;+28=Xe#rwz2}V}_J!G{& zh~_k>RlkFKUE8LE<%Gy3L$h`@LF;vvv00~jRiw#3vg0zqq>e2U@(7z1Ysi#~wMojH z{?5I)qoZFnOVyXQ<-(`ZMj{Ja0|7k1AV6&>1i?fO`cTH{*_+90zi%U2a@jjpxB$zE zI8l1uh31ddP^Mm_NX@$44VP$an4+ICOY_xp_P~I3(+0t<*k`X(hTOH@_mABG99(b9 zXmjamNuM$tH?WBs*&gM|1>I{z(r(MrWK<4%V{jQQY71s}r1wAp$w%MNDOEf#dOYj$ zhDY8;BbbQAe-$y>tSSXu&c=iKwFhzS78`K4N_?8-9}#ttMV|qcZNP0P+|C#oWj$SK zs&+bfpXA&S+A`DLi4)u>RBt_*i^Zx$?YuvcwV;EGs&czV#Rv<%I+|HC8F9%`@u#~! zobGdZutQMskT%7mjm7%T{IhR;lvl6)3x&{mgr*T8hY@QE{*cFar;YVDjjSM{NHIHn zlcwRS6D|+-hb7@{z%1*h7D&mz!nxO^Z4d(bBGBxzk@5e}hkryKcAh4-Cw!?dpx zKN$BK6DoS=`-~_HN`M~*=Jpc@(V-21 z3M|aTAjKNLjHeSwU|7`Cd>7S)TWPu2qBCSr&g2;(pk)5#9JBk$;x^8GRI2y4_fc6u zgKK|d<_5SmO#2=y7r;on;=gf;dFHWg?VJ~1+8}p}FP}#_H(W@4skD)GR_1@(_4xbk z%H$UtGh|GLKqQXLD(>v&dyf%NA|==bUzmlD1D zZ|+9fz9`PE+aT&?3dt#bK?ctbj|Q7*NkI-3mT*5he3o)KV8ayrdhbKDs9ml- zajk2RLu#~__ze^{Ed`~lzGtq_5%&2VZgP4`i1IoC>PF6iR}bsl3*&C*j(23`IP*=p z*NW_^kp;9fHZi$tkt*xiwTZ;@Q#Z5Y8j%08)kvuh8uSoR9yPRk8Unt*e?vXkw+`gA3<4D6VTET>Ln*2dee22a>Q*pfy786 zqN_fI$KQ$ec%&ag?n5i0a>s}HJP}M7B{K;7LToHOffxvZ=&91UtCKBoR#cpF4a4K; z=fJx*sVI!^GZm)9r&J{Y^{!VtKj3I?^p2#wjR=C#ixp?NH`iG-OOnpGFeshGavvl> zgH#*_jjWn&o@e6Qt3al!WKttFglgw-h+Gt3M{lSZH&|;E*c^mq?+PP#NXZCF>xQd9 zaV4^W3~jL50M=-DG!KLqZwz6l&zUm@eN=S3=Y8mF zR?rv`%*}E4BF%Q@+6B6#Ko_|C_*DWC7fsUVKHBKKS0(&3U%gmUSe)kYS+rk$p2!mu zbKf1tHr_dK*HZarY|;knJQznD5MTR#ffGu7O3;HImqNH5iB8H|Yo#+GzK6|jze>|) z#8mU~oX-^}H0x!(?N2;zT2`!M*7{y$fhrO}k5HP`Uu;dQIG?$o1{RtorQ1Yb?;iSb zRZC{5_7ec8wZrcw@(V5zQv+8!-s8m(WhX*4p#K`3>@t#TPEop>GqBWuCN{Y4DXuKG zVuvUX441A@vh>~o8^qE4i|8w@&Nco(0IX@e?bB}90CN!GbH6ORr#G2@FLrnwn&(=u zDjx&!gXfa@1Mc0yFdrxauBO!hzszyN-e_y3D<~+{!$UX{2XF4-Yhz|q_*o4A?3Q}|&Je>&$f^RriHJ@z94@8*ep2+I!qIRjj+iz+ zQ666eF!zlSOubKrDAs~sZ~<$WF-`@#~x$@^!v48l5pUBZ5{ zS20}4Aok+L^e;$b)q4%U0w=6m`xag&4oU{MaVZ}*mCj?UzT&3j`1a(`3x9a(<@_;rV+w5>NP(8^&ix#(^sNK3Ts8XJ{C^35+mU7~?5#R;R zZ!cEKArS-sMeR%MYDz)(S3nxzHx`Xj?=T@{i^_iPa^e@{ZseXjAcB#?;=f=IeHdfR z^H;wM?p7&Xma9(GJp_|*C9KDNHix+_N;j(GseNiykI^!0<73e7woC#{IdvhBVPy}) zXY$~$rFj9=@hIY$ZI|kb&RmB4xlj<$M0naRQTE!g)N4N2r^_88+(%@Skko-r(~}<) zF-ts`acEZ!uayj~l}>qC!C84_uz}q8Y*1kAhz372sjd z!U$QC9r=aFicn%=rl<1hUKap|zK=s-ovQ_9)4wGc;}e8{#MHl%4vq63l8;gq!*Y86 z8C$4Y+eBer*LHWvG3l}x!5O>M*~69-Nf-Kju;p`>Ep(KXT6gz*uYTeLz|Jdq_Vm|* z$Y${zZZHzTWFd0Ub3m7~)@?(Yc_)puM?RJom?YPcA&g*1S) z)yF@EZ!CBu5xan_X|5^)3SRV7-=U%FwidXIylw;~0RffZ(^~u4eYQ8l@4ke*vjoOh zhw`9Egv$A`{=?RdY)ylVVdRHK`-Lj_n&pS1@ThR%(yohC%)BPZ&+D@lsc6~a#-rWO zta^1=tQ8&KfMm4bwwUlMc=w)c6OjFH8#RD5Kz>0Ue|rHH%Vkuhkn=A$AAoh{vC8qx z!Xlcjt@R}XFkR09im$hNAfv9p4ogGRntj61+NQa?;Kw?2=-K8WV^_atUY0XD5zDST z$kWdK_XOX@Evy{W^dM3)191J`o`jLY)+}@QKecYv<0GPWi#rY-agmgF9fNu;6LFu5 zl`;x6BZ!Vwj$`H<&j8(W9_@5>2jCXkTkcQbNw(L$883sXIgzp|g=u^C1aew7rQ{hI zwM5&y0_PFfWVuVn*?fMiI#t<9pULJdz5b|Qw+AO@=snK*2lirzjA?dOkg;f$?bqB>QE z$q2=JzHcC_hb3ZaaKTtbb7I^Jg{6w11;Jz*fy^x-;Y7l68BSj~?X5FG5o?&m-)uYj zFuHXsqKqZuG6Cn7@#bvWQn+REs;t;V4w^U-GldL_}i`4h1dnY{J9>w!Jk z5`42h)_-hpUvJ>Ywt~<$Yd>%Us%NsN_PCG#C!GX&-LNAJe|C^Rde`-`eZE2p1Mz)- zxFW2(;YG05_6lG7ll*}*Go_N?D!3}V*K*A8Co=Fi{y0Jnwh{cc8*J_ z-VyI%*A5FLO6m3wZvf_vsy@*VfUP73FCvf;UQ_N_!>a+PwhY?6cJ?-AtQuNzH9S7# z%llLvBV=m-x{MafH;II>H5#VmZ&0SetQSCFDA21vY2B97@{v{!L+%kYWCNcDB?Umi zbgg{?O*)8*!PUHhQ*yJZW9l3WomoxF;y%^ZwKPZ8UITyxhe)?O0!gjhy}D$SXe4B0 z+H84+#;Ta9)+;~(t}v7CN=mBHTu^UivA5rD$6hU2=lT4Y1hUZb;GW#3tQv{@H0fTh zz}~UOpstHSQaac|H2|b?SfKWdmOorghN9ZFVHAc+g1!OvgV@q7o4xn^C*Zjq0qTkF z>a#+)h==BZhN_5VdHWR1S0KB*!W=8@v9y* zcJ0gIer!8w$R<(zToCt;DPtl4Ck%XzkspX9WUN4muBrUQYp)HkE!biR%j-g2pC5g2 zuoXXP;hV<11t1?9vNgJjU5l|aWqsU>Mp#xF^yDcYdecV+bSXW2+S09hCo#A-RnJ@5_p@BP3_HG?y2I99g-#=e&C@#5MS3zPPZ0tvo^~79cuqpug-#c zT*_m8Yc1rUMJ-2dom_Gr8C+@9`}p0pbU++%9!ddc8Ax_e?>!^V z(e}4O0$V$CRrbT>Bd}z@Q7q-IO-IjdY==Zb$B; zCDs9=6=e2k5CsZQG`^n1v`%~R9pl+?Bz)sV8~^!LVP`4e5D^hF$24o?V17cN8d#-M zV!u52w){)p!wG>CWV0lqns6b&68>QR%Se@u|5oekHy1RZ1t8alpVwc9v zI%;>}pj}#Qhy_+oqQfkfs@im!S*BG37_C6&a$UjO%soE&yHgSqq|(IK-uR73)I!Xk zWN~#6og~U=0j6WsrcHQ#))6acMc34GMB`+-e_wh2}+wMz3-uptfOlZ9tIaR=5agY?!3tb~n zk&W0!ms>RNClSC2w&Z`tSf-QQL~D9AkFaUl7ukZ4WB;cUU{h6f`fnvp{|e`v253-T z_()>HNEWEwz2@TM=2koq&SMwF7Y4%W4iWOfEa#Y3ms&n#Wu=-UC=y^GpGZi5`!WUO zTcmn7XLwn0%y8p3>VP5V*HDghrE9ryV95eTV@($n?ib`z4Zy^Opr6@k z(O8J(W^C@b3uys2`)M_|Kd_Xt91D{29;O)}|52L`RPbBfpc-w$vEVtklDoT(q5tfS zK!rnyhmK?6$tBU6{~XX)brEM$m?C(W{Jk|q zkkeJVlIn;=Io90R&uCRUhx`mo?n2DWYka^ zJq;?Fb_G6ZrojdYyY0uhvl;;GBzeQlEIw@n>?K$zz>WMmNm_kws&~fYxU9J>JoJEQ zs%v>caOG<^Utx>70&oU~cp`zAxiKPkPV!gR-$11uFy(Ts3g83=(-zG4pVDC^f0U$A zOtE$oj|5e`MrB;240?1K9y?<{e29_f&NcgXy>M3O5a7ij8Q2!1kk0E8~Lsc6H?E&2ZSuDW}_H-;jr@;mVN2xB9}<#dx|-{FNjhhKNAMw~}e)miLDsVvp)7@Io$*nO>)h(+kb} zKXO()p0k5j+I`#>?k-&^%;c2exm;jlD(h-?pmlkWCIm7W5SM{2d~K3BjP6-OM##Ge z#qhVMGsUXl&~F*8MtdR4pisZ)Xid8&VUbpPP?VP$kbhJ3*3%v8&2s_Lj>?HJy-R>f zc=JLXiRuK5>Qt-8NpE?=-DPLML*}5O(+@LyVQuiuYfZ@2{fP_-#VS-Arfwyd6qfVW z=j{7}!=*(NN8l?)8sGefDs0azsM2pKV=W4(;lLC69Oj=)QaNm{3G6BwW>au1Aq2W_ znFCONQDaI{6Kb5*iI}5k>d1Ev;rm~ur80nmu9vK-u`_kuXyS;u8O-=h>9mSf$R}Kl zR+RvvDB24LCm&$_%I;2=#~OjVlke;NZXlKjHw(r==rPbjY6n`=6W6Q5>3jeXjU-^z z#gfeRE(Q`z{JF(1DNF^T3)4TlDXM~lL&r}5t~-Y1$=^F-M{XAgq`07w52XP6EO)2y zdSZ-rpWmO#PlCoD0)BSxI5qBV++=*{xLHR%TWh7uV2KQ4#z> zAlnnZ`6LrwjtTH=Sj&hQ`ybc+exb)ximfZ$6s000pp(9t?Jou}SU%^gESQ2k=TC2_ z2$)+DdCiR?C{&afun!?r&~EZPw@r1?=BGP$Yz2S9 zVxgf67r~)arIUQ$*86=?fJRuO-;NY{i}oF<=a4+On@;-^&K%=qX!O0DqzOQYcr*RN z1qa|yx`Lt+hu)di@FZ!Pp9Fhj>52UbI5EQ^bOd<9dob}aiSGB0my<_84j=l}aa@R6 z_#>A45kO&6nPpgKGn1g6_Fy3l-~4LAG89fY@;)U_YgYEh@ShdVHi)C;`y*QPe8Wvt{?cPd6!x zqHc!O<0!hIw3u&?t>G-fIFZSXERo%aTbxGV&|F=S0)VaFcOmCks$L}Tou?L#0fzP6 zwT976bW(9k2-c0DuU#o&{kr)?of3XbDDkQT09NFi;XdhkY#!sRo7ez!a~`L4VH}~z zDkz8NCk5{(s;x6g>}O#8ohk}VS7@GodNnEMAKv^2qUO@yj*s{iFP49qT*?@w8(An! z3kTc-@cpT4*TkFe637H3d)Y9Mcj)%)HHi#g!8!tz`A^kA58SxgA8KB|Yp{8Tqf$O{ z9?L~726+NV*=!bjvnTG);2c{VBbZ%I+!}pnOw={`J<-8YPEl8x5?dVYr%{nO{AnC| zMKu8WK1vWeS37^7Unq~|4}@#-fCt01X5O~5JAhJiq~a_)1O{&$jZKhI_-T_9d8RcRO$(0X-bd;0W(biAwHoX)RS!grzNjNyhKI zBNC69;*4|sCR}WjS@J#04d41guUGr;&cOiVcJB))jf=|(TBufe4(1+FMXrH2J*%|0=^{c?AJb)L;%K^Vq1K<^|S|=I&bMgY< z0Sf|E{10V6 zGy#O#ElMIm5FEfmWLmNK{+%XZabv;v6~ItI9ItD}o1~@`BF{j$|2n)Qeo70Fj}r$! z14lB@2R*dI>jQkB?+zTf8MaGV&NCczx)Io@LG3b01aB>qh}lU^ONL%f0K9O=DPK;9 z6uJO2EEndIgI`?UMI&6y(kb*5rFkU~D)$*6*G>a0Q#Nim%0?(b_y1cpYF+GU^rHbn zbBtaLE2t0hDINeonbmXVZTsfG9-#h0dKA8H->NgeD7wUC1oWo<~4X znvW@Z+YNDHw}yoYuYUq~xJ8?2s+1~^!dWKi_5X;p8+?oQ&uFQD(VBB+S3Oe*y35Bn z{z_c$#~@VG&~326R?ZPjl})5j{`>q;z4OCzI4J~!j9(laHTG}$ioHzBg)WC)UHHrK z7W?^%eBf9@7C;A)0gxV#oLIoPr~^Pee!3wLA*#7E#Nsh+`?eYJ);MoTW{UVVhSEDn zu&{DC$Io^FgtJ$HmLXob!agPLM?X>4FR;+e0n5x2^z1KrcmR;4UfB4y0OK*)I{#RR@x3OMSMSQF`d4aC(F(}s z>F$v3P6+`erKLeYQWT`SySqz}knRTQ?v^vzd%y4b&i6|%ynI+|J!_6R#$EHJbR%Pr z$_%$Zo(~0J9|B}e0q9ZK_el#l|23QSlE0V@gbH8WMxY3d#WU#`+W+#s0!uO*NaYtQ z$PsC(`iwzgpszh_{bk(dHBRKvy534jHg$T`u&NF|(WmnZH?FbAI_y30GL+$LEP)mB z;&0*z0_%|#ZR*$ZTn0*x)Jilu~{GmADMRWl$&| zc+VnZ(7hvJzPu=|lY#W>tFDzXs3R{f3+k8kY9WTDod8Bq0qffJ!0&6KGOe--FNXx<3`LmXP$1)yf~k5f7{cquv`Z#e|FM;-!4m=GN{D(c#nnL*D(RrW(vdXj0z)DK_{4!C>{JqGRCs^4d%U zn_ivhNCYFvGv;SM1{oP;IGUN;>joE;qngmK-A?p!+tmPJW9qqx=_;XqNwS{6WroQS`GUC0D z0W5cb{@Q0X^Cm^e-cmV?$PoD_ieMhw7ir#}zn*9Jt*@`*cO%W7G` z+a-aX1bCHNnt*E|s5a&G{qg^^rbxlFm$gxxtuU$hKm^uta=CHaJ|mtk4T-3)&`aGq z-x`ty`KWD8nL5p=gVAzJC%@Nmxfr{07~zqpzD)V4%8{$1Dq6;*3Wuw-btQeW2NfA8qa z{uKcpE^$Q8pwB-W%m2De$E3jPt7Od=@bBaL@8{&W7{GRlD;xjtzc0u?AC~zAtncJ8 zN2q_lvHzU2V$9%IIx%~98Xti`5;ko`~~cFXf(I}eOQs=!FP z3rwC79#ZVMbOwrR0U&1;l`T0`hrnze7lQ+@ z1V)A2&Hk84ur__BYkgig?QWCBGAma&%yL5nnCQ(E36@?chCA+El@fHU$qnJ&S*0{WG*tZ&9A02tZ( zEqD@V?4Zin>bMT&0|W|OKvHJ;Sp?o&C7J6BA+J3#jJELR84#yw{B&Gt^-QlLhzmJ2 z(`jtT7aPzWqJI!=OqBfo*IYmu=<&FbQ#$UiTm z1*NGzgfZ|5*V#^ogMXu~@hOkM2n*{B!=QSxti%CNQ`laWhttFLQTSR!T72dNa2C)5 zbWPlHm17lXG&J1wf3OFnC^qIH4i8x?rQLcai|!KI*ks_ER0lj9b_J>s z$}rU!XkWmi<vAOZ{g>$w_spLihr70A~~ubMa3x_*OM+;`*r;Qe8L(mR5KL&qufl9hHOF%##MxR(r^*O9!tYm zp5M^%Xb!}GSpbqQ(zlpx-rQ0NPOOH@hVAzijWU!twcfu$Ak5t1{`$^pf?O3Um4*aeg>Yu z>J5!KZ-n2!;BTP=02562S{DT`zX#Usidc5{;eLF!TvT;>CmXboQ$XeLq?r2RevR~x zfZqjPivxs?_}D`6zn5H%%Hwix0-9(o_70$~xdV(a>MM+~W++-EE8A&@pG)9xbSSJ% z@Qy1GvxuQYgT15&I*e!iTn(S`8(CXd3l%VqrCTICLdPoN@VcK&9#lSs7~Eg(6#;-t zv)quM!H!k8QtV^8oQ*$|8hW8_RrYxm6cuOVR*ST^OQ7S_J?D4Sr_C1!pX-^ZOs5?T zXmfM$HCvWZXGj0JT2iQFDE7+*a%!9L8B{I$`LbE#PJPeY=1=fq9v$_sNwXbhNHZas zZjx+Ef#Q5BQykhdWkC}%Nb%H=-T9APkYVZ}3}hkhRH@!1&So_Ljv4;8rk(a%9#QzC z5d=T~C#p`6~;+=<-t{Li2;4!{T0yEXe8#nHA}7zW)lbvH^8{W3b!;3u$Fm z%RKY1vy#@UlJ#)L;V7-E_e{I9Q?^s_*9Rs0s%2{0D`yWK&Vx;bvQO-drG;yTLnWo1 ztAz%o?MypT7s7^~yxsI>0>Rn9ZS*x9o8cS#o52&nk5~G3U{jZALDf}*E9jfHsc(WkFPi0gl^N@6y&Yc5%SPFRixrg7;1nIS{rn_(D^($D^ zsMk4MNOYgl;An#otrt zWTYI0+UMT=vL~#*&NM3l;yj8%k%8PYgN6w(UZIp6YE#g;GGq*~n+RF(IUZw9KY4OL zK}RShWEwT6b~| z5tD+vZQ4nnblGh`_)`2^|IdUvx~}UVW???3HZvppHt$em0*N_^*&M6SugB?buqTEv zb!?2bKjfG+5S2s--TnC<$CU7i3`N)J4h%~upVMR`-enn2pFg~f3?6)NH4{*rQ!eEB zXf)Tp{boCP4JBuqh>?Smq=TZ>{K}XgujIVg?0{SRsX4PEu7$SqM8GFNn}q)z0i1`n z%~B%=h=N#j)?-6@WD>CejLG|+hk1qcMi!Y)2l!50Xxm`0sZ*9>a>9}Ygh8>lLYEr1 z38tNn8 zdvZC%)bvv(&K%}3Pz*SM5k_qf<8+EOf3bbwAj8FwqlV~7ek~o2jZs3NTSCsw zvb$3Mp;zYaEhS9;p^!}4J39pz;YSG|c!w8P1a%z8cm9kYdG%#@+4;R!5!NN@GLrg% zz6fgN6fbMmO>xyU2lVIRKvft?BwG{WfJI6#zEzX!6!f`s3gIe>7Hf7~Cdf%)j)Ltc zhgpi7AE5f;fWwe0cx*WznheSWs9jLYh>-?%on>X_J8e*+GXA zsqIsOX#H=?n^k2WDqhlTQY5!M+|RxQkt<8}+oZhqsad0|J40KUAiwpdzKu2U8>Y$A zN4O|%o615SgtRU&(-evN_yn(<0340wOqWSVpU~IONx?EZiQf>8M5b^yQmq1ES*~Tf z;R#fMgn-Pl-6lyT8U7B|;E0cpA*a#Rvs3TOX9K+7eiy^pj(4WaIEQpJ8Za5?{=`)f zNN3kzxA;TA?a&%uY#7U`5knFN6WI9w-j`In;uTRJJj+P*Exzxt7nKL=VAc+Mu>v3v zV5sxE zq7fT4V_p>@>gajz#OBe4Z3-&6ZnmF}fa9rsKg+7n)70c+DgWD6=?8mF3PJZJx4A}D zbu@%Ldi7^^lq^E1`s_ttbJ(Y3@^Zv&ovQF?@3%BYWzZcCyFDdasuj+@%PmoiOA$Hh z$6g}1D`ioXZUL3hNbc2&f2GM|A&@)D!K5eO%h7?4C5{=7gci=CM-@ceAO!yihI%gZ zpUBI;9j@<4_jS*cJ4}`%oxa_UG(%6lKO0W3A?xbc<3+iOOrz5thRMPN|+y%?; zSjV2QW+q^HHDKN^ zBNug&_kc$ic&wX&SH$&VPAT}9Ej8u<;{IP^SGJ#Wb*--VIjW6aI zh=X0;mwn?#iLx`4czN<7{N`9AOCJYabgGnDI( zEf>v{BIWkV?;~K&Z(gi05xocVeSwrWmzFFG^L`6n}YVkSOUS~M`ZhsWLgcOYP-)WFWf41cn@@KXSh3(<|)23iX0_$xt?b8d~Fgx%I(PvhRq?J4M7Wi~U6*kz3Ha?`H&1GzeUK=d%? z!#Pic=7;T|ho7U`M|pL!SHg3R^Pi-06o5Q}qLf2ObDd2{!U&9Oaaz*`4sF}cSY~k; z(x`qwZ$;t38tBgSm?r9Hd?yKSDTJ3JT{-Cq7Ecjq3&-CEEaLoAb?zgy)`EN427<#p z9v(Kg{377SeV1iSy!L41Q?&o`Yyr;G8dw9H?K;%B(S)_aUH!DaZXXK2IVzA=F_PYb z?*EL0HvzOg-E1@hWQGOk+yf`&gfiJOK5khpr}ykX17}0Q1gH)GMDH0SJ2!HCWMaNC z)u=aR6z_^HxQ^DV;{*tZYKs$lhnyzA_!T~fci1ejK9R`Id*mu4L{Ai`sPJ{i)BRmK z&qL|L7Y_hEkR%vE)Q^#yq)nqh^Ym(;pOPrZdL}^*;wN-9Xw1KOF#h0xRq`qP zkbU6YMFe9TZB*3qq}lH6`4V=+G(4Ql9RU#$kc}PV$yh!DNO-@#?~oD<$os z4<3q)Wl=JtVkYLCp`--eElUQ5C=4*9GRvySY8XISs|>)=mpCNGo$C8y9TmVi7rLLU zBy-?j4mv0Myj00Cv>>*Y;5ZIxO#Sg6RO!^ceG#jEN21bpezSi3JvXKRd^R+Dv8QQ} z!t2L3u>APEQt{i@Ps0P81&sLQ@W@n58WDWA=Q^Ddc=he;nVeD7Ep8SX>%xhpcYaC-;a@` z=~%JP<@$MBv!l?}ltvuqJLP4e*O+wRZh(Smw#>F?GoIdZ;&yqPwBZW=$zcW6D6$)? z4PJ+2B|Y*Zr_9&&qf+$I`^d*qv`}iR(a{KcpA(h^rB$TWS$nnm5A*c0(Zs>-@x>(v z+6MO1NH~;I6H6U_9nLN#BvNq?fN_qCcWPKz^QxfuDhDCrahm!14HYC;vp7d_I+al_ zb^A6Ylslr1KAfweP1g~@Qbvt<(e@ClH!N*D7b`XXMn9@>+4 z{N|1roy88LeJ$1%3M(W)dm#e`@93s7A0Z74U28Bwl_QI2_piHg$sOYuL3VG+V~|4Q~y%EFZJ>*e>xVU z$WX5bH!nWhuC3&|MLYcldlHJ<&!kaynyJMW-yQghX7e*9Lc+ypP6%Ka5q55>`bl#F zVUa>i&Yxb=5^Vayaq{mX-F+M<#bAZZ*(h#(YmUEO8jE@KxQS@{YzAafRsmG1dn4arDIbK5zrsp;($ZUWNHjOX}8S^xAZ!9ZF8Rh!>FyC;-Oeq)6 z$kz#9Dh};)qce2^t5fxW5^*+CYtg66@9T)tLP&xX95?4^MjyOh%@k`zoZ?YeH{T0p z`>GWuO{T@{k+0(pwb~2$MjJoBrz;ckR<`B+xHTv}w>TCk4x8 z0qTbKHuBxK2++*qM<-)6vtf?dtNl1HP{LsNQqklurh#$qafNXN7oW0^CUab)`$Tc( zze+Qug}Pf<0z%(kZK4#4b{6YJ7~_`Qu=SqpKYM+1dO+$z-v|SC4Gcc&Al-MQ-tn#eflI-=Cu()vUqxo^MNy1JplYSX#Y_!TPAwdn) zv1MAf6A~1wJ?v45saA%N+>zu6lz0X@1uK9i!UegdLW4pP@SiE6h>@q!?@-iMPdUQK@lMZ> z>d4ukjWb6;?h-nb{4erC3GbU__rd9nEkxD|U-> zVvv1_sDF>D79#x#s^Ldv8aaa;OzQ9+g_jAcDbiOF@T12-iAA8* zXGa3i9`vrpZSzk+sDKIi32|yqxNrJng60@EgQ$m6A1mi6N%G+~@*0I*unCmt|jG zP{v0}0Lu6S2*l#~Fee7K@OaWkC>{c!Ayp5y<9x*5>XNWTcQ1&%-_mL>hd_R+N-*)& z`=0RWGx{Yrl6dkh-WSr&dMiNe|Naq||j#IDrrAwDRV*OuE?#q$l z@jClx0Bj!9G*O46P1iXwK;(5*=p9TX0AE)8xjN7HSrt#fjw(#rC(-gSys>;;aEv=Uhhxm9Py_rZ>7q6 z+?6b+r=aWehnYLA@Wf(KDPXYh_zW#B6H#w&Ux#G6Ki__8x{3)H-6#SR@whAo&_~jU zYM(XtN-t&Ny^Sk3T6l1gB>S{qg@iLsicdd%ULPq`%bBv@CH16}#Y-2Xy~bIO>@dXg z+?kON%?t#X{~IQ0my2#4;SUk$wQsE2nxo_ug+RPU7bm8hY}6ssFb`5Z{4Sgz4So+A zA=iKnx0B9_DnvT>d)#T@q~v0+(E24k8W(M{EHNoYVDV@+N#S)=VU*mo6e;()LR~6r zJSR;>Gtc1ib+6jOmY<~NWeZLBR-#+Mxj2c<^pM>m6;ncb}XHAKvJEraJ9EzHlE)9Qhqtrk*R8RY+|y z!bnF*uI+U&RkW#JJ7JAXs2#EcdNg*fX(VjR5Z<`ZSyG{vw3kGK7af$Ny7W>!H@1Jk zKxY6n!uZe9v7I_3wN0p`Qtb6sGx*jTM>>?%Op+85v^da-hs1w%M%0V zwOnccK4rhZ9h~5B|FmCH!8kL>Yvi&Hr%g)+WEL=zf)$SzUQf8i<@8$1hjgDGvJk z55oMD>UbCeNezQ6UHKNie{N7}O*^jfULc3;y^?va_q+*-g6^y%>B0}&5oR3=2?G)* zOYx$I^4=NOSO(ESuJyzi6bOHb+A;wHKMwFq?v!Gt%j?jeMJs<3-e0S&=6&L>LQrw! z>{Rp+tR9Vu)ah9~&=4l8W1xK(&(>-zazh$G_wGY>(kEqpTl|)A`jGEwQF3`Y?`ja0914>m1-~&u>v>cGxHjwzPC?o;%Q-T?Fl;x?6Q0v-FGx9|I4Uf~}M? zyV#OP8%q;!@pe{$yruhulV%C@L=1Yua#VhQi@GPpdlHb_E&rq|-s$GK7z|-Oy5F@Ju3(vlb!w zLYm?77K)a%Ogm>FRrd3#;Jb>aUHelfloKvD1aW**kIw@i6c^yd^3Zv>*;8xezgDcx zF0*zW`~)iFa;f1O16k&T8WxDqpg@*cuxt`U9u8|xhnjb$Kf*t)frePVzP9;pgTrZN zm9B8p#cq}!^sO4CLiVN~fnBa|SFPk&$i_a$lV#{VP5<&0oAyZ#+7Fw9c%bS{d5LHp zbsS$L2-8W>(amGyeoJ7~T)df2^&$9@9eHTGMI&d|kFiIqCNGPOC|=d?oWmbpNcbbC z(yGq{&kletDPU47CE<0)0skk~{JLh+M9*?9t1CL;l~iGn{%4~=JU0BjyXo2LW#3=) zbkY#o1OYwP@ZGANJPIrk7TPfDsYTyYSTHZ{+VNP&jAxw!Uiq--&NYhzwO=L%m5jBv zkp@0bbfJkMr|=49q_NaoNFYru9t_D7kU1ID6g`)uMhL_stPJK`g(}iMwjRN^3Au+B zEujgW`fsGJYFd$5b|X21__KVzx0}2g>~}mG3H*z@vq#&8chdVXtwMi5E1~}Fn7fd# z)?roT>%Mq1$pt&c=sT&sT3h;1_CaMRr?n;?5Fv~9i7+xh7qQbeAq&EOde5%`kw0FH z(3a%P`_bZV^>VNHDH*-dCM!Ayzl~_CYC~_g&Grydf`pjLTza>^t9ii=5y3nW4D$Ot zocU^?5kz%2Oh?B(_e-q(>Iaf^po@n1?3eiZ5(WTV`+vXwo>xmF_ z#dTmzai9hN28os?W$0r^pQIdK-3jhJj4Yt>>n?0Ct)K)xaPBhOV8997p(R1@u#Ov< zU-p>09n3_TQXKJ!cN`v6bj;XU)r>7GlfG+S><>O!4X5>a?#6%d6ixMw#e88;47d+U zUuhq_34-Mu_t*bCmEnzy{vS>tVwz$um`=KehA(htf5f+NtcZ*qEEe&u9w%$2C^QVu z*dqXh9Z%wmsVL(a;`nz_cEd@qYyhnf#|VeZRC5^4y6yZj99@tV9=-dNTg;k5$_j!}`OizfgGit>n zY(=+{i31;_-1KuU&{kupKl&i}O1{KpYAE^|RlqN&@CFsNrv4Iz_ip(Mab;NBk1L7( zwgkjOgACbPP10A+qe2(RT`~OMKf#;!d(TyxI0$PCVU4ZhNM5d%XHsr-CR%jS?P5Q&GE zw8(j{FQ`N*sBV?o)PCFCzxT)aWYZ~cH)M>T1aPOHSc;KQrnL;oN%vFqYY1$(9;_5XdiD~A&FI&yxaxfoRq%5jVew|jVHC!+uE%j2`tt{- zOYbJ)a9_u21BWVg=1NbZr{*6sWYg($_OH6|fo_6?FSBigr@?Z*@qv6iS#C5e{@zA_ z=?Z89C?7q==j10p)?meisQf#@{YVL1Rq9dKK%N^8MlbfczoaM>$5&f?tGD~Ke-~Bl z)Ex5-{%cS`ae9n5USe&bjTt>g zW*SPQioR7rqMULN(%lXO4+*dh47gwP($W+uZcl+mGll1;J-=ebUz^{dRxgeky`Qoz zHM;zPF>0J#)xK)Yf^@PWU^^u{7xQ~se{^(91?g;_pv6{5fDn z=&!8bhcaTb3DPs7$}nBpib%=tgUqOVPE}c|w5QYGmG_c-gnvbOOWMCz(t#X0CNtR{ zdH--A0Kh_aOL`3G1iBN12sJAhgE>8M_;!~!pT-qSbljI+@mK&A&&0o7A*%(UJ-*!N zltT%hV)RR_i5B)XLx?k&vdguxIQy%q62^-Z7Gmd?UYB;`MjC5&lrnrD-{8S{`|2GX ziQcD6V&T&Rfn~-e!h+x^O8u{~_CBo$qVQM--nZ(_7H<#u3ir_x;eb+?!whF2OdrYb zz~>}dl*EzB@G<%CZ%T|AuO|wF!>%=yafuOiFkHlS;=YJc9MJt$e3w?e+LyJI(Ub5b z*fq0DCx9zh8E6h0^qFbTA0$AQRj`9l$+5OT{4Z9RbwduEWZqR(9}_=&1Z~~eXQ0oW zUYB!WP8ES2@n{5y>Y0aGHyHiwe!Gp7_c-7GqRW_4Oh_{Q0}=rvGG?MH?8-%@xccZ^ zvKc})SIdnA8CA(Wx+#n!PibsJ<{p9w;vQrB>CwNDCIHoKWp4LNH(@)L8DJcriYpCM z*&N}14w`P{CO)8a>mvF}(#`tFRMGmjz&dm2RvujIBWSs?ZjW5`mlBrk|FmVS z1$3>qd9e+6$GImw)(;cfcfqW}F8e`pGGrpQ8JW$sXJ>P`J5Iy~ z8~!5ekhl=fr^K$^t)m}^Fvj^FMYd{bhI`g?Pb(hWV;fy;vqXQZfAA1bx@qmup`pLP zPF00W72J6BX#y%&KM5QNvb{m7FK}C~9oga+F?}}2Vq5vx{XpD%k+5TzlhHt0Gc3@` zBX-q=OuBSH-;aj2Jk=xd#fc{ zsydunGKW`=t9E>H4s1(yi1*JA9nU#Hjk&C8ENWSe`DFJW6X*Lj!}E=QryNFAWq+U% zLOJ2HluHAcZHV4s^M7psR8hG!26rFKJLgDpcJ08A}F`ie!BNLJf9(Ol{Gl>cyPAnOimnq9sY@D(?Ja2rA)ELE0`#LxO=w+ThC zcd_u9dvcQo@-IJA^b9H<3h@$QPB6ZCs09dOM_^mMHKI2{Or95NuNN3;79C3AW$Su& zK>tKCxl_0f;}i2bwo@!K0x0Dc<5WCup#kIFz_JG~Yx}+8 z4jPhEf_+MEXEdXtF{N(s3QX#|>7S0P^qC#+py?o>%^L6heI3CY&jR^ZtBumd#Q6CD zZk5{#mV~e12yU)Si;2pt*rrR7t5VzB6Dy3XOFLp$QHicRCeCFu*}#^Om$1=9 zsI#nZ5p^7Fy^&pAA~LaMvJ58bniQ~y(Q;gS;3`DBgM@xH_Uk*{bh$NY29B|Fv(E3F zq~8eRvYCBC?* z6(W{vTr*10cKwu!YYkc@Hy~ULCr1`GtC`Bm9Ye$Y8#KB4W9kFO=s9;fMDMz5h%4i2*^eR~dwOdxRxOxsN3mUU4TOnP4v2XLo4yfrPewu_Ge+xv%?q4ekLrqP{lOre2tJsXudYGD zK@-TX;?EO&Pt7%WCo&$C-@D}io$HH$X~d-kp;7$u?N?sR74BElW+ZFwcqiFr2dFmP zhBa17#To_CR?Wk~6p|rh0niIfa;qykake|kY&CC8X0QH4Dsg=rWsoF|0Nlvw6owQb zNLpm*zvXt$%~L}eZu*Hxi7LL+^f!3A_E+{0XCbH9$?Wh97ZedWo5mrPKUx|N)8&zR zCL&}!JX7$pBK#;2tk$bDvwNC@{Y5SG#1x-GFRRNHQ7z~ZAVN+oClNIbd|%K9#HnkTW}izEzo)Y$FohXKdZs@x&nrpJkk&4Z0*@j(@v0FJTaB5n`JBHJUp3$gUV{ zk*||-9_q}o;o1Sc7VL3SZ39}m+)$k|LmQjhr`GH#L;N!4Ne+F_Ke4ifASJmvlupeD zWWVK#_}o)BllnqpblR&BE`}i6`tc&ZzkfwrwwD1oyTSBj6G2g1(aUJ7rIn(sejY|; zt?E22zw>cAcxC~QWc}y-vijVytWV`j$-^ zjxF0-78*mt{PgHGP?h1o0H1^{J}0MUI1;mI-d96^-{qbwhwD3EbMZfeILBLq5+Ku^ zxZ?-%X7M89h+-=RNl~6}9p|Of=_VDk#4X>hQ=4*srR#LZk9!1kd7MKE>EEjRn0K*` zI3cL8w}cZK|3urV9OgMF6iNTNf@W_11CEL)M(*m*Kx_~=G6kPuFaJGg$g@GVJ^0%r zl`ned1!Q~r6})HV;``5)na~DUoW;D_`%!KyiFp26{veY|(r%$v4eTv$6{!*FEZ-+J zjgyjmSKNY)(@Wao)$$bybg(Buf`RnY!_DlWe378~#Gm{_joRL@yvxxoKW-;4a83yW zkh3N4Rbxg*;*Ki|A&6U1*{qvIYu%Gi0P!D4U9t=*)ftKnX?N%(D+v+vOf~z59d$nH zLt$}yal-`4!U+c&e*(7cU0fcosUU{!Npx1d(NEAz1N%+Cr=Z{kwp5!UXu^pgbxF8{ z$eZ0y%Kvh?K9;(Pdu4W#6twzhMiqIG)#JVbNMS z;v9hHP9zxj=&;H$YmO@s3F!6Jmy}R_DQKotHv#*JW;fJ@KgCUa?`TMbtV;#&-!>5g zc5$uAL!+M_H%42+CG>`jsY4fXh6=y`B?2ix^I-e{uV7vh;^*P%TBh`zuwkurtakOb zYvU4f@sU)na$gz*GCFX=u{fkGL)$iJHeW^a?T^MY2p_B27!wDS&%fKGV$?gwq6(4tHaCJ z$k?3bthbLz(|5{QW7!ipqZs`DuC2e7*)f1!Yq^)+OHV!9e3CqB$cV9`!VbBCj399= zv6h;<2JVk)H6G;{Dj1-jk_P?X_~9AQHNdxoX~TEDoStNv7gllcGw-GNWm{8xeN~S$ zqJEH_RP_GNFpg!~&MIPVQ0I0r&qeQpmUXPb_}V?t=HRg!hRrbtb|3Dla=+DYH8iAx zV&UR|uU(6WcmxIltc|w!Y5+P1)ZEjw|tB-=t+bRK=L06`jmXV zqi|U!^`C$4(JV%~Ili*%!0X2x!0VrmB8!d~qi?f#bJ?n8?B5kCRh8=h;&5FqwWkph zg}GX{tKA?I$vn$_AX#zfNT2Z<#9pIxsW1ghjMz&+I36T4Xn(tVv(AZ@kjYs^GS2+}j!;%QID-1L_V)u?ppTSHJu z+Fj%KA-||ana~+IYa;-Tlzn)oA6Z82Su?##VY)#E@`{cuC$h6M{!`I&xX+Kq z0dTNMJX?i>Lo9IS6SA8^fFXK9eL0oC9bIgVxRt4PKsl}v)Gmxad70om9|Re6U!E*n z4#%}l7={H=GpDGs?P5$Hd*60jHlJF>L>TUZ9R2zQbziP0`(xOK@e* z&Zfo|ce?i9X_1F3_)pQvqGgmMcJ*PUDQEZoH*xb{2QY}%Bb~&~E4?m=E!kvPbhSK9&5XBqbJ(Gx9=f+6 z5Cz0+c`v3)#{J!eI0g+DoSlCmwci_sQRH|$=i`QD`rO?^Lr8(sH$ch2m6AML&Aj#I zy9B4OHm0(1v&5E6GN-?!CKXkLm*~d%^Pv%f2ewwdNDKkbJhV6k^BZ!15W7PoCMPyS z-R;d>9V7y?9gyiCT!?ikEORORs_Vp@R7N{~1fFuSlIj5!(gF#coPr($zB>2_qj%PC zl#5n+xw%1LdRc4v#@nR*xcO%3NYj01s%_`-85qd+z`KQ!ywFS2`8od;;49?Xi2`z>QKI%J@ZEIZg$#N~y(a;e1KGp$s#6qT z+}fBH@_oc;o8E`WNM!w1k1AE2*MD^e6j^oTTYkv4&7{t7D+9Km5J<0+?fenXz_Apz z`vdYeka@5mqA>^IBcdtYj_P8xqO)HAe52 z)r~h`-YQ{yw9QTBbCS3yLTy>nFh9WO&&|8PH#MPU;Tu5}42y{Ph@?yzUE)8NMbo|k z@8iT6N_(`!No2NzC)lZtJr2`or8P8mZXZ?JC{>Yi@il6%_NZU5c?O4sM9J6JcbKOW zPd2-n2{SV(Xb`cY3Id(jaj=Be)6=uo=fSOx(TYG>XkFJpD;K(S^Eae6Op863<0r?% z2j&~~R?hS+EU0Oysdtl+R0zOHqQT~0Dkdo|=6<3lMDdyGAVpVuIPJG@Ky}Y7UZ+H0~;=o_NlzS z2j5(et2%+ZX%8f_Q3neZ{sM|j_yQ#_m|Pkde1M!#C zz3bS7xrEx-dk}c*$Q9HQxHVcw0jQ^ z3fjXV5&I$=vUo_C01w&6#Dt(nYpVxztF{Ks!)lHTlo`|QkEJz0oBP9Z7>KPC(TQ`| zKDY55m&_X{WOOKRx?6{qG3-R92Kf6!Se{KDfUX5Wa!uHddQ}d|k=Z|(7dHv^bPEJP zbnaz2ayt%QLBV1lcRlG$K=RCO&m~EEp90Sw%hJaNQ_)Y}j?0L788#m&Z%ADY`tn<( zp>xv8fd_RVpeiG2xC1eS%RBvVjU{=>LdlS);5j&PjH;5aNguucGdl5)%d#C6jkl%l zOQZT|C5G6LP-rx_>AGq>b_-GfKHE}W)*ZD0&`xe}pV!X^7) z9>?_5|Mv4jU)gQI@5l2>RlP2xoSH??{sWeh_?)TWu|SQ}q?^`H>#W(Z#AuPxm#dwm?V|?3cwpb&a%FSuMcfNxpO<nRk}B&@Z*EU?6LyNOODY?bq+?p^q``ES15?6H~S^N!SQtOmQVJ7Ok+a_m09M zyd#u{_8-Ag{lV-BK>tHIHh{L877aMt1W+14zzDwZ8MdOPq2}WGMciVPJ3#(3U96}_ zOJd5qglpkM#SA5@=Vf8M<299KMKk#OlU$WTUCrw#D{@KZ8eHtbZR2%p^|Se}D8dId z+4e1#vJ3~mskWzLk&EhbdOj4^K(%z}L;U$N0%h$Id?3l;qP*yMHJ2xAw~BWCu4f7c5o@v#!~RlR(L`CXe(8Op7qyHoZ@b zuuE`&`BcSyZ#^g>L13tE|17QXqU z%%iq+fZ|>7whu8>g`LQek-a|xZ~hmthIvq-JN(0L^kkc53G|K)-K$7K?(oN_C!=-| zu{moR25Q;q?DeSPCU4So;45?uW8fQ4E%hpHDh;Y zJR~;0*$02{(b$e>f3CjW<6gV3ykL{o?&Nu2JCLSKWl6%JM7=8!m@XDe;U+e%-JL4J3Xq?K2ZbrZ>Twi;Xzp)6 z&zO%1Rt;b0OPob*Em!_sycw6GG?I?Ril-bK%cKZ_6EhkNRAy{law;$=ggF){e^$SI zVu~uO7ph1+SxH^wGF>JxdcUql+5t58KTzh!E#Zqk5F)K{hOQFj@zo$9Z_6`vUcZ6U z^=w%7cmk;g6JIGLk%_7R*Te-_SVoYl5c}|lrkneeNt(#(c16?>T#dijBoxyPY+*AA z4}9$`4EflpB|9Nw<$wu$7pg}XbF}@bzfi&V0|Z^-&0n*tEbJQ(HMJBGBdB*R+oG#y zZ~bB`Ury1WPxXupMcukHq`9Uyg4oWES)-g=TW`mBwguIa;4-)JgPt-<}SG zrvt7#J++m8W^Z^kh`#Q|pjP=yd@b}u^ZIK6?Ey-CXOjV1;l=r8vsa&k3uvGc?eZ0q zBr+4?!K-MGM1CWS;(P`IVS_`Rgn`bS)pjyJDxKDTHXgNdQK?*n{PbcCel$bW#7JDW zNKLo4)u`s#r-;=R#fr{pH?i7M)1;cF%hHy*s|g7fO0Q*aZ%?u1=_yZOiY4($<;VP( z1dax(rN-bH@o{7d_j?(T0J6ECqz2gm=g+wDg}#Qmf=sVs6zaK-?R&e>(}#2P5KaasA5Bfv(VqgI+ zU$->B2n6{RV6ct<#Gh>_KFr<}JlkU#{&6au5-->iRsG4rgPh#r=r3OD3{D!(-}8pV zJSelZ!rE-CojL#%EIk`rjNlFp0!pDPH$p-mcU%DC&ADG5ZAA1~ z4xLKyC+Uq8Tt!MpP0(hrEM;iM;j%A4$%RO&r>cvkl-3KDPBD2zMG4-FYvD28)WMBQMBI!?zgx5!L;B z_R>3kLvY$z^_SKkxTj&TR=-o<5b@f3TYU%?BgdA|EY_(gkC2nBHOaZmh5isnV>CAk zLNg9k)WD&=6T*Xls6o){|BaYbh1{n zo>xX`6oqXdxV(9RFD^i@H2!J6)RsVhr7Tu*`M?COT%|x*=a_wV9vG`%bZ|8*g@~kT zUm65EfqXA}faa~~fq&fVaPpsp4|Efwpu4jHYs7Fm(f_=bT{-)eBZ_5a_dwY@8=ri> zO9EZ^I$ylXO>F=tL%*YM`G!lM3~M!6UfZnYC2}oteQQn^~WY@I^iu@SH!(x7RjELUfE{f%Rk8&Hh7%PMpkK>jK15;{s z>X%1ztU{<+psKz3)OzqW1e%qI6V~C4jqCxyDP5d53sNpvFU(C_X}AwYM@P|#^0xZD z!c^SJ3JX5W*J-h^uqf0%=SjJ{*n1Tca*JP4tyZFH9%=E#jMj0sWVHTmeSiJ^H|J*^ zzqYt0_-On~fo^ajpn}zp24|fTzVGr}B_Me2C5@-y0^BbPQDLHzW%e*xxp_C^uX@Br z=D*rX>>Tn1StNifv{nJ%=M7qj8Tcd}Y#EFnS0P>)LBxoTDjw-Xb}HqZZ%lDX}U zJJC{@zx{z*^hdHBqqq@JO};wEvHyJ1@utj*jT); zuSw;+=TaWs*vF3=pkd0pl+`&^r?&ORDC2y0DSk-8C$4v$5pYG?-&^nqpJ??us!Cr2 zcy;7}x8j%T4`O+>uC^(vYzPENNkLjtTh^^YD%Lf$6utAi`kSwHcDQEJGBPiVem0WJ znJS@^IKxE=h0z+Y1ni}y#a;k|3R~~l%{#9!(Gc`_F}P`>ZMmyOj<(zmpeD`>lVf3( zf4>E1*z&GSlJ^3R_st;X=kmWy-?7OAC+8!C@ZP;t!~UWnpKht37~4ZYwcAh6P~PjN z%Q%elqgz$QFU?SwZR(c!vj;p!>+i45q+S0KnGvX)$Y@kIfQH@A-f}OO-Lj5j)5whC>i}9FbAW0y~y#;nj%{EgtDB?9I9 z3NGKlc%h-yK!NI^^Zm1`hx@g^0;y-@t;PncV{98?v90R1StEnv0CN6xgNs6y?guVZ#-}pN}~!J#C%SB z07-AW2Ij0exA0Y%PMks=HkZGahfc(2A%nh4$#lo>bQzCpj`r14gw~a!MQrc<4ZXvs zpu|*EYJ#@!^TQr6+1+S8t~QL+zlamyYh($@v`YNT*kE>=u?WcJ3hTdaamT^a>GN}6 z&`2|%Z?lqH5HeuqFlR&$E0y6OSK^{49ifhgjyrUyfYY!;Ms$se*#jAaq|}iJRuhZA z*7tHX9Fgb@2xIu}ib%<%`(M>N?CbY?>~%Y4b|g>)l9H0vAJBX-2!tuzg52hbU!jip z(!u)x#zOL+dRukEi)75CmOS|^F@W&zz{|08dJ-(9uB*%5KTCT@6vWZUGm0TV6@~YN z1Q`idkg>dckw0Vw+H67ow2K|aW`l`}-ITn~mq zCj!ObRNQ3Pxq&jCjwuaJhdO;x&;PZ@mn>`+c=WDjsW~8$ioR*P=1UX2{dB@4ercD( zy6olZe|zH9AHbNPI6ORTEc%w^O}e@HIyif$evmmtO5BSTP#{$oB8DhEJUnE9JKgG) zpG7X=J<^`)7!JBqb&v|!9iXe-75XTK3FJ?}iEnIo`&n4?RUC7rHjGwCC?6=IOiDkz zb*+LwG`Q<^@(TKV8s6`b^fjT?3KEzH&P8tsx zhY^M%?tlJaQo3g0eQ**Q?_k19u{Fa#Uhj`bj*#a!jeK?`0XD|&{^o4bhifgFI776| z0F#vP>~1sxpkp;KjNEe1&;K%!0}2n_(4&FM&;SaNZ1DwJo${O|Lk# zedzh;ky0x|#$H+Hem6W|X`$VDm4Btp3fVJv26us_Ezei8L9DGw$Zs9)*9yAs&4#~R`aT`cgu~zSC8M;+p1iNMiQ_ZyPZvS++F1c2C^REKGKkU|f-3xxU@(T^JB;ZmA^$=6q^~0GCp8Zy zxe4i$4Aqu zD)0Y8%p95^cj3zr_s>@9?H$boo>0KmMMUt*;7G#B#D~Qr3P+$3Nk~`uO5^O+GRp*{ ztw+mc(DtK8gsR>Xr+=HAbZRefTJOESThBS1DDt^t=oa=o>ArqAz_pAMKzUkh@w&kz zgxt5HDjX>dn8f)eGlplL(=PaD?G@~OSTGvd1X&=m;HKa%u4=S_)WfHs=GKPy(LA%T zegEjo8Dsa0bIkg)Bi%i`bU;x9c%pXgsU)RkAmh!HsCcybx|n8zx1lj zkTqCk2{RjZFatd7l=>h55X}Q+)~FC?A}7MHsS>pm(JPW4eg0bhTYw^8`nn`I8;DP} zcKBU1`$8UsM1iou>+Z^V(4GRkpYL6=LYW+s%p?0ifWnH+2h$TPctOi$IVTvv2F`$Z z$S3Lh1>g+{@uw_LW|1EVr&EXv`64_2J*am>yNO`dZ!K+IPmTu;tLdjfn+e~yg5nr) zeF=T4Z-l*5CRMT+4!qOhyQ(b~4~)&s24`9F@`rKCwDVHiVQRtpwk> z^X;5WVZbZo+}s?OZ-q+(uF(os&IBF=R=s9r@O4CYSOietN+2K6iouxaRCr1wm;pc7 zR;SA{0kBqGvAyW61*dkCN$;XE1G~2qBo@*38jA;Xy7^!>18Bc%R|-B{Sy`s6CF55i zbU!SXssim54iPtbDMM~7Sj`6dp8giFN00k ziQnOvTm^_cX98#D+HU@}ygTceM7*7`rS`<%t#6^V^OY)JJ)Z4fsLw>&_1Y{azJ6s=(bo0N^&5TuFP>sl;XGGbEh1-M^)8-* ztj5wlf4V@$FG#IE0$YFClIPDV9Bs7`;b%I=WNH-`=ZAT{oeMgb#%6DdYNGx$XjC9s z5Pwz}J`+s-BBLqhiFSLuxmCVj%W39w2=z8@*XxM^dx(H_F3@zJq9H!7IT zq}zGm;cLdm3qN@je8t6i_o~~w&GKWE_&&%TC|t7BJWeDuNss_sEa$&X6WJVO!GL(E z-DWAX)VyG42@0EPVEPzu*#{(-s0;<2zrY1s2aqxK`^md)aEB<3Gpj7gUNFk%p1+ih zh|d9sc==RtiPns1H73NUmB`JV;sE zjXcGQZU5^ZH-HBK%vzR*S>vuMNLJmHlyik339kZ!+#Dz&qM49~uMVt*3&}^_t_d4l zaKKly^(X(PaJI?GcgTD6FM^9xK*QJel8Sw-W1r^_;k4gtB=H&Ml{$suedeA(_7(E2kPFdkSlFQO1+0eokdEl zB0PM7*&U;QaN_0eo1V#YRCK^bht_|)-yo}FOk{%s`*~DXQ{3*oIAe=&vf7z)dwD<{ z2FE`Q!uKfDIa$LB^ysP3zC(z9Jmb8k1rbfGCn;_J?59{o-nG|McQ*Vg6RFEEblw+w zIC|ZzIY%`S*_s$sd1#DM?k;|R`f84IyWYK$Gz*)m7Zl|MmOsZ757}w-6&atUGghh` z6<+e?e52u9YPtPG`_8CC&OZ(~Fsq@6iSTMqZ3_s_(LGKe{E+~G7G9rhxZWYW072eA zP@8p|@{fXrXDqIdf1tWx58#oJDXn(8UUTpSr)2yB+?o7+K?CqWXY7uZ4QyUuQ(3(D z1(&tMKbcW%%2|?OS-y;;i<-E%F(}TUvftRo^`hN3Ao-KlhvDrrSD(Vt@PiMW%qx!; z;5%LmnxYk(`MxbVn~B`#vwz1bRi(xav(on4q})(2*4?6HorTNRy1+s|6V&aH2Lx0+ zzXzF4qqtXLD; zw}&H$&y~_!LXOwEQG3VI@c6^%qJuJ1dY!)Ww9sJioT~q~gj`zKs zGrfP|o}yzjGZ%_1cOn?!t7(mNT4?*R)@v+NRh5P|82g7;W{hjvAH8knv$9yq_|N38 zo_PW4!3FTMDfHIna}cUHYirkkLR-WYbiZWO$@|yqF`Ao80{xbk7M5UWO-{DwnHPxb z2V@!cK)9Z)#%-8r&Rsw*9`9|2; z@)TPr5^SnDKfcv?pQ)fdPuKkC>(@wLs}ga{#FNU?3)2=;(GA+w4M1X8zu5D?c!gMJ z+U)f{g=`N?C#Bk*o+2p**^ggGRiE@jWmU^QQT)Zxn~d$Lwa|Q1Lw9+9XD~j;u?+vla;3lKKSt&lER&-tk#Rh6SQn#%d#8Mb4LUbqrWlt4w6SvGLH?S?dI_ha!<0vW3n!vgI?b zy#5Q99}H_e&n)1zmpZ*%Py&-<1s}}{*U_b8X^3zE@Fbxg{N%Bn*!7NeYrsw1psL6m z18NoxRotX0)mo-6!|Me6VUaS)tIudp=hVdgeWT9cP;^NHeBR4;2fbEecosP)9>kqK zF{?13&o6w*nnAa`3S04UZF(t<@AIY3XXwlRA{EvPx6RirYJ3ir8w2u;0aK%mBbMUO ztQyzq3_VvCZwxv_d-o)SHpmRg8~cvz8`GGRKfde!Kr(p~^&8OZ0cdMmB_Visnr7v_ zGgRF_VA-HP_7R;ooXd=S&TKFU9pB%iN7 z;ys`YP2GWM>umH;DST2Ht{ykWC{PCkn2wp9iI?I@_TcknP7cYV7_o??|RL(7( zXSyA6Mcsl={I@hQ=Fx-Ih)o3m;Tzm~EvA^<@ec%E#I1x+l0=ibKRHei;S~2iO1NQ> z&(}@oKiQ%6KFRaz756-OnsM5+n(HnzE{2!C88(-JMBbOTpCNhS6w!gv{rR&oCANR1 zd3RH}9IHl40Jpn2)6wCMKzN!rj9sK`+LRmFlkJx6pvE8DrSHn{;0_o!DA^YM4rPcof(x!_z{TnDas4h`>n%{fR^A==s>z753w<{J z-fYqv)&?6xe5Ef#n?;m|X`a3)%rro8m-NEXZK+tG`K ztBer_pd4D8VHW_!N!Bs$f*C^FG-3tQ{``y<}v|N>V zMxL=v8VV?>Oe8j__>3J^q-XE3Z#+^cMGbpGrX!NbUuxFZ=t<~4jXl|pkUGH&z^?6d z`!e5z?%VZbk?@1In-r=Z3SnF(6*t)st8c8glO#l=<1`5n(lo>7`m*4_r!umbi|}tz zi$C&|dWzX2jw*$?*z$vqA`mIbfQc>dQAjTnnQ^^fHm2$VL>3Q&e)J80-g7IYqxF6x zp`+o2KkBPed6fZ;rO6V9Rb<+M)jcP)lpaZIg`i^ndHILeGDf{5sg2PRU4rjPN_cX; z3odpK9C~N}Hih9rS2RQiB3qS`*u7vZV`nd+SE=pKc#T%5evQ-0QUm)EoPFr!%4aCb zZHwi{_`QbAX58tu9&g7z!Cj0&Ht@f6YYuLz0}43l=9C`LqR9dS=wwyWW$fKuKBoDG z@Yu;{45Zu;Jf%m$4${_*&kDlrG)opHVXsd0x^6d~x;x{K-t>_5%$Y8|J6wN>T_&AFyTQ=f*_gEgnhQ&H%ocT{s^Wt^> z89KcF-RL!2Vj5_;{QO7V50g(YNM^WK%*h=?VK2t8cp`3F-xh*owTQFdQ8X0^jV!jWFtK;OC#HryUMx?2&_N{HwgDHYHqXU|KsUBlu#7D1 zbt4837_vFrk<`rZ0AGE!YRejbtbqblL&PJ#y~?R?ppT9&I9;~JgCRd$S#|Hj&Wz&b zTHMgchBl=OK50rpw4ZZF>9kweQ@m%%ogrf;K34WH30YSXrzH9<`pLawS5C7akhulW z$uUm*&yP+n6}xx8Zy5w9l>d8Z7^JAQ5Bz9w(}5^N5mlRHbou}Zje&dV0&^j9(4JGK_eg?8-r73@17L-^4v{c3 z>5_n5NXp;{l8})#lsgn)kqy0<{6g_P3Bo+kcw8PO{CBk0`IG%A(it*2d8uE%ucCEf zoBMNv5fJ@nH|z2en>&Qybd=mV!kjFNDhU-6U8CtR{K9DFy=mvf^{u#X?dK@WuX8NM zU9C$wx*HHU(ST6-{^Zq`VQWatGm;N(MX9L9EHJ&b_s&LKbdYp`4}{?^!teFNHylO!?$F#;2ZDQD)Aj^*i zb*27tkC25N;&Kh#lviE7a`;u1jmTr@fS-6irj=8-b8k|u0+hctCoXYynyFdK93>zS zu+=;1u+Cm{^tAefKy|D--gj|qwyUV7vR}rL^(WYF7_ubHX39p_8NTP$B-fY341dSO zsj(V;4tv!k;HGT#Ta9a(5a~e080QefDz*(w6RL$r1{@I?%~ft-k=Q{A73ZRBQsxt* zW-H3Aj_dCn^dqwiznVg%{C)w6&?(O~^T z^JrB*1=E~PyKBN51vZM^i6Z#5L2;+CuFnNmr(K9RDyEyDxGUUct%t4<3E6#;Plb(JTw37q-9p|?C4P15Q1`*A-ei>M(7{wWgM z2$3$aXKOO1s*#j=u~w|oGay~JzMYr5gQ1aQ=t%M0^UxOJ_*M?B() z^PQ-_0zl;Jld3@qQXwC)Lql}=`duHm8E`7#tN7~m1vNEk^dbo_+P&l$7TI;U0 z1r%7_?Cz*7{~l%4TmNTtc>Yk4LxS&wR>vQ)C56{yP z(yHE%F4XM-ACZUm@HJtRCE^=i-KFZ_8*qZcNjJlIGntJDXZgv@ixl=DV5!OI_hkm+ z;M+86YTS_3G{FZcG+%3p3&II}Y~}h9q*hkV#!o1II9(n)@k<=g%;&n+9u-pBq6MNF z7YibLS}6sUYTi85=)$VRqeA%JDzd%70p4j$6CDR-i==C%Vm!1ecjqqz!67< zGjsnd00Wlp1WACkX%AdgGC^Fz10cp=b=YUHa1kkTyITPkmQ2C!g##g5aZHts#PW^d zC(I<-Pa~ z+Ue-zyZSJHdgF~%fT#`7)1SIT8Nx>YH>~@)gX#{_<)g6!OXs5hk{_SsF8ouYa?4NKyJ%jRt*SS2;D zL)TyTC~2)vYFc&@^bXC8;9}e=ey^j|e~v>y zq5!AHy;hy*Q%?m#Ve-LTRKEtfx*CKcF1RYQg}a%nhmR`@Mq$F-N++zO5#a7g!Ut#L zF&ar<9mQL}pZ(B)j@MC+C)6S2y{`_GsT|iFj$uU*H->^Q*;hNv&gs&&tQuOKhs%ij zUDhpL1Jv3lAOqy-`KU)a#Rj#M<{{K#{K!$v75O3LwqDZ=_;*TqY@l_BY5abm!&40Y zAj5Dovh_hmEpn%KQP0#r8VQ^7(CFvNwfr@^lEMIx=jhR|4j&5-932O+3r~K zxeVrzI-g4;lr%+M`vd)6Xdv_ z2}e~1kf{{8lP{npp3{*ZUn>CQrpZ8%0ScT?=w>~DSPPXU8?l}~Sd6_7i=r|YkeJRR zLok>h;tFC3C;?qciC8uGO`0py}<%~noMXWu4&L~7+Cy%(FLZ4G@4 zX`vATTE`te{x2Ep2Sgg&P5Gg%UuQ(1))Hmboz%`O>6UMPzKid!Rf_9!=k5J{&*vZ* z*lSnkJAO85%@+y#F@_sd@fH5QC>FUZ=$AHbq5T@k?Q+&&-EAVr*92g!#oH7#<2ri| zQB8rn`3{)lbzE1yW4PTJ9jPIai2mUl=!eqzsG6p9yW`Fg)a@WC{l+Ha!z0_4^i#C# zjaQ3F?=O|OBVim>mNuhiR`Vnfa?@BJFdg6oLB^yINR&l5WiZd8tX7JfD3<6h%%pXf znqzo8sN*DWQ4U25H%gnZ%X?4K<`1}KjmWZ<>;+UjZ$Yk~Sf&NJBRWz7(9_?xaeesk!Oz#C zP3fc~>}pP+Uw-be`v{_gv1bTl$rO&?#V?LnyX&lG*sDn{ z%=S>LzrQa201-0)xrO{kpBxx?er_tQDq9MJ*E>1Chpo6`e=EMQ>5Yg${G#t4%gtVw zpW`(pp2i-IGc6p2OIE(crM^p)8kCv@5M=TDCwk^Jz8>M`x_NdxvD_*;5j*t<=`Nkt zNHjhA7teqX#~Y+eDl=?(%O~AWyqH?Eu|d%bduEIU0%#68L)esYHZ3tM;FnK`&SZk5 zV4wjsHKe>jF-mjYC#;P1AiHaA5_3;0*6;e%A*_z1)2KhA4KBI7)y8a@Y&EM2Ieo>^ z�)z{EeU*qM$wt8W3W<;KF#d_c21L@tP?%>y`rJ;{zg^Z*&@C5JTv|sw{;06hL z?`xk#qDJ7jA60_5O*Oe5u+XZ?;Fv%k&IABWUyu0pf$tC>a!bmgBa|UuBH7lg?M9^M z_M$GhFKcTh<1Gvnbr2%-ZEDnq)rnB9-+gjx;o@G?x%JIZ{mVkOH-YItjn|H_^@%kf zp#~p?H0?|7En=OE>NfIo1Ec1_SczVlUi*cToMXb=KKQpNy8{;y9@bUXG+b2Nl;__N zN)))^N&adUyhGQc$Q4Vb66Ykx`ZM)UU@f35S3k^j-@DC2m3|4#a zR{9nZVUI0YhwdTp23=qf3Ds{T?Znj|2RDQr=v{IESRrUM2v`9DetTA zDzI=E=kZ~a(fWOstbd(Gos4jd$UcS=CZvwZ%5z$Z_Kcw~-#1ydB)9ir{Azsj$ zASW$1;P^;|MH2c~XmSQr--NQGzjj?K_t4@m;P<6f7zJ5rD511@6F*l1xa00KL^eGE)lc_C&pXtY@vZh-DAa&l9!Rth3N7b9gP&=T_odvs zdhI2nm<|&I^=@s@aZZ27?lj__c3S&;kZGYQD!?*y?L0;4Def_Gy`Ar0s}n9E#3|79 z@yPR~*B+9npl8E5Z`$XvH`j!{CN5a}nx>^H@vI8;spZ7N5pGoO89~6(7a6S-EEg{WQJ|e%_tiQ!06-9xLC6%1Y0fmWGf+VpZuKeI{|7BjZY8U?XVn?$ z@PjTnuXBV1x^SK8={yIzSl({y={2LhM@HrI}CXB2Uq5OpVk0|@eXb-p=b%C$I zFdcO)FTNnQV3F{+yYF(*=;H3=pI4Uoqr_=^@8;j*3c@Myn zS)A{{04m!Dv5I1)G)${|d0F2=4aHKojD^4lM~xLsS_y|95^nMv^cFOe0fkV$2cL>H z6D(@usXJ5jP?QV!zb%@qf*@3U=)1%8l91at^Tvcq<7xul277l6@A*u%jllfdN5;vZ z8V&?FB@i+@$62vO*h^2w@02?E&aC_JLTZm3?80P#oxG2J8vFL#cLl9Ww1V%^r%BRAleUyjYsy_<0=TyOtK+Iq;SgTY&xH0&{ueT-~ zuz~Jcr`%7P?1SqB=q)PsLPRsVlNd$|6+DilNvt?9N6g!_{&`j(965$*CPr(o z1B1hkln~SK*3LBR`-`2G-=SdN%pGQdB9|}}Ewl6Hoyu_Hg+)@|Ogp>YnC%LV=C?9_9-rR8BGpt|td1<5OUj76zF<`q%^b@3!^7@^!mg0H}6 zc4YsB0^;jl4m})#CH7ZYjx{Uo9}E+(X`5}}^%F6dxS`oNxi38#!PC8>NoHo#n{M@F zW~dHl$V+FczD&uvq!41=ATH=iYhF)(9T~~>%$TNJ2KLx+&b>gd7I0gx8g6Hh&Gy^C zX$xuUG1XAJ=M6kbBJxvk1ltnv7lm*sO&`+ezr(Sld81*jl``4T?cEu7T#wYa{!q?m)BVa#RrLz+cZ?4`aFpxZZ?p&(1zD;^!TZ^kVqiCG1b! zH#~|@S*~3L&T#}Z_Wr($S*4g^_kjKzpML8vcu?oU_&zWtyBH&Lt*sTRr&9{YSVHcS zXsxB0?ho`GX>MU$CN#B8Z&a9@Gfm6lVq}!eUM#+jB-fS&>Cm=U=(ZvzszdcIEA7w zVGc+(rmt>dsY29UqMfwLY_kkOPAQ-^s|wmG@6&fQW{FPVk4Ta?`@ME%Pg?a?6hfoD z{()!1S=1;82e9FvQgZ5v6iYI*dAS%CtPrgb_=|g#Iewq;H1je$&(}VMurQY*6?V@2 zw8U?P#G?IsxHf}&j@4vN`8zqCd9%u3epdQ0!+?&Q&L=MB4~u zdLYPR+|^{r%KW@CZxJTT<4D*3_endw;srIeD5QI31&pZh`&~mMa3Y_Gol4amkU8j4=U<1#PXv0{P@VXi< z%W^{7*ranUW9*9MBky}2_O*^_keo3GBuJb^n&%%-?et>7_g~GLRL{Dab8DuKHix7vrMf zLzh7*`TR``k~p_tL=#$sI-^>#)5_sN0AgLpF%V(8G_wo3!2jr@t7(Jk=m6p5S|7n+ zH%-`i@<sPGDKZj?A|zFuHq)`kecAP9PIRTpHU;-mL42N~-Ux z*L*-~#ljh|Mi_~PL^x=(c%N=JD6;$%4kh_+`$;9~yt0bxfZ*I2Pv?Z}+jrI8DbtL} zXJ63PhF-81?LHEkYn${knk}WIWK~AkS6IC^@;=&}4Yb*j%x^p4s@inBch02k1eINs zyf2Xz!c$U2G#Vj~P34($9L1}{ju<1K<27J!EqGB+{qY`OO$7J2PUg~YIcnH8rDZ6E z^um@kqU%{dcAKKG4h4UhPMW2t?2s+O={9667RbBUE)(a=OD{$m>ZxInypI#s-UU3F z95#3_2$vQw4Sm8wZnhh}LVRpjGO%nTM?lIS$|Ds|-sYxfGr>;KWILab&W7`zsUdlw zWL= zo_0Uv77mDwlIK1Z{0u3SH+c%>mHV~=+FKK)TPD2^#Nl-^S{hwuR)eu@F z-s$&|ACg?9c2cFp2_$fZ#xyK3@nL+>-k41ygGHgP*I@=An1Oe**ibtKc?s{K`K8<1 zJI9>ap3@vBg(_k9t!Xx>$l zQdsjkL~~VT`mQ{06L(LDendhFBk#XcsO&oTemH2##1BcM#~ufDe0}XodEBoh;-M0V zvpL%-GKr9OqYzNlSR$6>gI~&E1>@A7bk>OcU1XaC{Y`RKaBTd_*l!Ov$)~a_1P3;} zF7PtOtm=4V51E!f%scE4104=30uFlw*CvqN&Yhp$Sd8I{pD&Yr0c=^-c4_j+`j6Aw z(G^pvLLob@_54-|GwtwSO+mIXbTfqa4O%4f;+f_5!}$hfuj6&OTe0~&7dDD6X0@s$ z|1FC-W(GDv#Y^$iG}D~bF*!rm*6+1w#v_S7t2(R56|sSiwy+UVOHi&2dr_RMQWN&O ziol0#TOUf3+Np};w81sfd}~RRh{6qXCGg2GZ$)C`E?3H8C7dP~27CB;-`?_<4f_Av z04J@&IW7{50P<%{NMV4efH55lHn+Ay?w8q;D(1M!mYzRR zf~FxahF)Qd5-f))9n-b~pS@DhuM9?&a!~1?y9Cy3+=FlZx>g71(9vTf5)g6?^c(3Q zjK~{)NHC}Ds(D|8Q>TIlm%sZ+e~K|-=IY!0iD9o$U<%I$>H*KoS)MM;0(y*#ibsEX zRbi=)S(le%Nc5RxwkyqU&Em|((B1fW+yG>D0YHf)eYKsx^1TFg(L3e;NN;4h5@MZ# zRTp#eJJLAUGsOihux{9&RSAdeRpr!zZa`J_#r|Ck9DK6Li}>doGFx}pVyByU2-e|s zBg&@+7%|bG8mDeFuYsWOjsfRo;1zq5X}_WW-_ppwlYzU;%O>V_I44AoMH_^XD2R{K zrZ90mbv)5A?o){y3>2EP$K^A`H`_^ay9rH9m5vG35`xn_c{WwEL`IxiX!#5Cg#!w6 zJ9?oAXvx=Z@*CNn@)d-3D8q=2IRYC&8L30zAIahJ62e^_ck5@Z|Gr1R63z7(YNksE zMTZ}Jn>43wxsUNbDrl+XA55n{w@ql1aku6(N`%=>N@s$ibitQVO~EC70U4}ndVdO#Y_ih0YMSud%U*CtV36@E*D>*j zWtpFA^=y9s%W?P}PM5IjB{fC6shcL9u-H;(T*?x7_XetB)w&>v84o-pPLgwc&MUkY zS|mT0qpeY~!oi4roU0j;^SRp>tt`+{nNggP>$O1Fl zJ#Wb0_@^qzxsf@ECpL77=+g^y1qrRawX_8yd#Nwngscf`kN1rSybBbS*Zi&xuc|>0 zoC!vdkavK4Gf^NVP#S@LkCWQ{?1m+fGt9BYwEyB`vT>xt0mx7r8Nf}AJu%IO;S7yp z>nHPeyKf_HK1%8gkrfKKn(BBffk~-RL_*fH@XvH%7hlYR&Jbw=)3orM@7w!DpKbnP zwvK`?aRf;Rg%RHtLv;KTev2Cx#=mwOlbc*b?5=(58M;y4p0R*>MHO@^shG!OGlX{f zvLik3d1<7dCWK0z9++i%ybordlCkP3J1G4e-r9`Ie%5D9cO%^{e<|(x(pWj@ZCZr) zXuMHLMu(phMD7oWst`4$3$v2ChX}tDZfn$GiB~A%W#umJxFf*Sr(i2$OMZddO@+zh zFwZTr9n=(T^VQqQaN%^As$W~WVqdJ;S*G6rvDbEbmmkHu;K zTG$l>>#@C`h<2^Dl=l9FGz&lf;<&m+`4;AKy=i|>9@-n}-B<-wKK#$O z20u|g8)F6qN)*DgA+UYRi|F@h8Qz{!s=<6;v$LDW~O}ij*gJW zud4`$KdeY1>&_a1HVjg3V}UaGXHxim(t7(4Ja46~*y^okJKaJQ$;=iq260Xj*-l8k ziK2D&66=^Cs&nDN-R=&V_V#Mi*8{2cgh~Z?wO2!W2ij3R=ReD(&wyCB988p1r-(Rl z+@jqypPVrAwzPO81D{f;h;=->q0r^rGw;{k!FT)|#$D=VJE5#MSnPVuh3x}`GW=mp zr5}3|B?zd)n4E&z25+_!-cMYW8Qa&Ybr)*W_xC<1m)zNKaR_-y_SoLgz6aQfdSd%% za7KSSiG9UeVuxW!(8OT8BPW97rb32cuA0&i^2r><0M!Xy?uxInonNU3s7F->w*xO1 z+N)zAAqhdjA)8ol&4-~)hPUw$A^LK6sbz={ROdJY*9{el&!}7qq&X`*1wE9nZoY4Q zuk~0Qvhy=SLTSs0>s6gCaKusyR45dD9$uh1p3dZqLxWhETEyp=J-e$MU5zkjQZ-5m z-$g_=tliA-eL;7dMoz{@U4K^8xfE0x(&ss=!fSpS9){L-v}E==nSXwlZ3%@-g( z>3Auau-?w-4Q{AAG2Ef{X2R!d1(D?i-EZ0P#pgGL{GW@&mO07oJ0C~91MIT>%dwr! z44@5S3F4ijTqIbQx|ycc>J!$<45eM+Ax(QByg$RyP!2!+r4HCeq0*FGhLw0sbBk2!yh$ z`8d~W026)qF1VuSJ@AD;K?$@MIQGKRpc4eN;eiNisrpi{AbTV3=e1veM&KbR;+@9~ z8^0qM+B4Q;)fHmGQeoR;i4toPkiXx-3k+IONiOE3-zXssznR6-JswJgwXH{xs#vJ| zIe3D)4S=kqnE++J#n>mH%nM6^>!XgR@Z+_(w(U9N&dYE%qP32IXJSEuAY|3ZY+?zD z09q|&uumeMM;+Wa|5622=3X@pD^wJi zL~6YmaL1&>X~C5^g-f?*2iZ7w(fvVK7sgBaZdL zq{HZqe?3`eV#MuK{6IvSmOo@6hy>pyafuzT9(h1BDt^bLySY+9`x4p$7sQ()gLLlF*4wyZ@CcA^n7kbgZsXuAs_gPQgb4+BR& zbu)95fm{$vm->#1@+V)oWI`;d&Qme+BXsZu8M70lVI6T1+`X4j6xlE}R-QkHz#a1M zW*d8_LK(Bi?YT_ix_iXBnXfe2;sv4POK{CE7UHZKlE0tPQD@jEC8!hT*zRR~6UDpy zk7L1w4buc{bzY670(2-2^r!wj?oiqd;1;&5!9Y4!4S=e$9yBnIToa28(E?@`6UEE& zs|T9>$nnQl5Eok})xs`xWS<|wkz2>QBz+3G+%)0*-&nv(lf}B*OqQCGI*dY*9KM0k zX`1>!ixDCc?aGDN+kmx<-O_x^a(Fjoe%E!8c^V2w@qi0Es7aCXdQ*CVkRQ)QTP=Mg%yN zb|~|IkHfqZ-Y67lcP9~k^hxlUs~2Bbrj~dF(TO~4KVnsgzd&Fq%o$7x*5%g?ztZa5 zA0Bb-ZXR@3JXHtYVtH4de#>COSq0xt%>*J25-oHT*u`f>AAzpouoM9Wyt}nPw|#g; z{cxy4Ra|}d8WajFbo6eI4iOkZpId8aKahX zEWRy$>SpCaXW?KZ7(~{C85t3$ac=5IWiE5n))B`BS1PI&_sS%C{HH)p(|=$-r5x;& zC44(bYg~*)DZ=c;517Cqt+?eCrs3?_z+sqc772DcD?n;!mbVKn5pE3=WB?scH2d{9 z$_Fb19Nwp%%}B~z{T)d_pPQ|c^F-$GrJ={UY?b^YyFt4;h|HYmNzkX5ZBxzTkr4`U zP@_$E##NQ3Evif)Xgrk}HRmZ8b_9dcVa$k+;9~}_4Mao!e|EtgUL-~iE&`ZF@j>li zxzZy5v8f!$ZQ-DWTqAZ#+@K9i;Y8Kaf)L7HpQd;Rbs?Z_KqM2eL?D5C2Bj8>HHuo` z9mgjnDmVOoRMx-Odk^_05S?o_c?29-OXqJ_&uKLKiIF+1_@ae;n=k>cvVBS0_&r4F#Vq{;8Q>%b$R-U1|M?r#DS z+o!3YEHT4Dri?lm;boGEUQEAIbdg}CpzT%$GqkAs2?$NpmkSZ5kaPDTs{F%9(#2qvN$a5Dj2_O9YDs$Gt-sT6=!*)%VwMJI3b85I^&_ zz-HFdXJanq6cHG%w%9#MWCGP7dUN@?cHas7F*<0C!l|(QAZJxFe;VdZhWhOQ-M2^c zjuV{3*hT-2ABMVbS z*#pgbpWnN0d&uc_mAjLh0y!u|(9KxP=Wsd9gOghhq6`sq?+!BHFi-)dR{yZNk4WK2 z(MaMZoNzWkg8Nui&{?;Z`OWJf*>JH16g!&r+I5E}Zg2W&IG!l%mHpOA zX}P+c9Bg1qaDEf>C=;BMkOv8R4n@qF*^?=kjz#%?uh|_dPYo@6(EEQn=h*4s>U@&;LWsK5!Y_x@jadb z-yDh>($Mtgdt^pF1lk5fPIEVC!`G)q(WJ*qLx*^urfqCQkVc*96hq?A{CAezqy0ZmQ6S z9ObLRL0Qb#4en~R4~&{?)$(ali_ZE7Pm`#sGg)!%)cu|I_e#z4?&Ml4A-`Dp9xP(d z>v7UTfFl)o7n@3~(BtaQ*lpnct!wi-)(exj|3K8h|pRij3REIv|& zIB>5N z4F~46?T+z$oxZMpfs4}h?A9{6rYLilJ2Al9tg(C`hMt zNDYuiN;*e30@8{i-3+B92S^AQbO=aTC@CNfzYAaQ&+qa52N=6{-{(5#JgbavY(h@M zvAF9SY~nAUpe!?Q!N1i>j^peEYXm8VT%4L#2hin;D|f5eVQLZ)0yb5!EtJuTfZhu! zFR%;kuE>}7cWjmsa2ukEMRc6~83}vV4Zy!bfP=r&ovHZbeA__+oud0gL z8M4ARg1#O1T!LitWatUd;*|hLLEOwo5w~ET;>cBF>LwV~A{bYAtBm>LF4v<-Z}Utl zzakO6MA<0TA(aY2py@tv6kM#uIs5~znr*9*CG5dSDpqw&3b07iR=ofF?`Ain+1$R( z?tuZe!#OL-5$R`*$v7wKsP}VtK}e-VOI!stX(-^2k+$e?vJM+ok$$w+JbvG%#wJcz z=$`B@yy@X*W6+ILz_G3cz|{JsOr+kU5N>P?n<=z3`?fI1$^?1n^dumHx+VTg99R|+ zto$u}2S9#j;959ey3P;NA{W>!!F(+|3Hm=TfPbtLNl<#DlOcY@C*8c-?_o z&sX)`n+Sora#Zb#4DJkVzb~L7wV-!}{m2092L-oCf)s70*Uct&7z=az@}|G9S1;!Z z{+VM(b3Uv@gABm*{h-z(i9Rj7Pb{Y=_1e5_b3V}L!kJW1uIO(!t43UM!3uzBKQ_7g z?7byhxdT{R-*~SvNC=d~k%T-4VqGCd4?w*%Y;@AIku#~_fCyZrvUlSdkF#>@j#e2} zpUr;`?ZLw=#(Rffjl(>!Apv-#55LGOoiLYY+)*v>q#p};G7xoPLD*7Z2AAf2x(~vZ zCAf>PRlB0jO)rSW8ndNl2O9`f%m(Z0rIif+(u%@<<^Vbw2lM@-viwcHU#Ba-gAv%m z&8g~{PS`sc_)u&{z}w_tT4J&xi5wtudMD6&A2;I4j+|9<6{wR5)O8I@K-f8WW{Ih$ z3VlHtp_g_a0%auqE~E$PnS`+okj>?2nz_>o6s(hLGv{L4%eTI*ckd1`z+W_~5$O+A zwMAlBFlX*53qj9r&j)DL{Tzb`J|{N<8S{&(U`~$GXQ}!67?NR5TblcKrp=#%fx+q0|FJQ;-aicgh=Og(oPJ(dYuNGB z9&eg7{Mk#Zkqe~38*|5O;x$kr%Rs-hF!fE+1%bVH-JJl1ubZx$%L;BUoBU;1rW=sc z80O*q=3#}TI_|We67qbKk11n++16f>41PmGjha4iN_8p_ReNKO-4RC}1%`W;MpiUu zkKaovWyugKkdsfTv4oyBKYLU7uqRopRb`1xCFxXK@;|&<---vel$<)W^3<|fpuEr+ zZ(NmJp{#1(p^yuJL^Spz-?wMBCkl21o#5}8ZShIym4LG+P4f>zSHx9&+vvaDsg-o} z;mY;Gn|lSJ_5jE9*bl=FUcpu-C}Unk$RaNtu-j%cu;8=@IhadsMz3x)^6fxEEks{v z30lp(XbXG7r)_jEHe&6BnC@ba>k#)EZ_uJr-VYhcmTr6@p`$<}+`*s53pX0W>>F^v z{3h<>H8#IjEe1sCcaPCab;u@#FFwZVIbIz!&7%(r0{o+w7_ha&mVg+VU zqK3+rE4xc{@daO@z*A6(9OA#Ttgt!0O?B}ZOk+FzYe~!G=x*_h#3RCS z(D3~HM`3x(_K7osWJ(EQPMnAX*gZtf|3+z{nr^~5F z=p2zGB`@gCaw)C#|L}%$g#)!x7@6B*TUOe;d&(dll!;8feEdTdm}-p4?Lx0^3Jo|} z)&j^_H~)U^wq9s9hq<)z#slDIzdy+0{TEged-45cZ(`@by(Usg#+O1Zm75m{6`#zgo^WI(NQ9i8va6^g;OZ6c+?Pr2RC@ws|W_4uSm- z)$gZz2w0|*EgsfizNdCL;>?npzA*sb+EWxJm8w|CxWI9joKvL&5WsjW;ZxL&dPif% z>FPi;ma^e&WVlxP;rm2RWxuR1Un6Lw^}tYpgNcMr0i#CHxu$VIh6QNls$dr$o{y>t zBfpI5=bLZd^IbTqDtAOf4uz!9M)ntH$I)2*azI=xC;xsy2UWo~A<(WOla?U+&)u`t z{^zHcVPc9!yD~H%wQ%_QKWGA2Zro-?x3 zX#wRERnWz~{!uKjCZ}SP=LZ6);q!(f+6W-Y#N8;1%EKq6tAZWYbp#gtKso}xQYzpp zVG(c5;PQFX#j1}BXW}>+V~FBrX8ZuS-)KKVz5Z;kR9!>UJUio8zhg|*gyh5vATU?Z z1u@xu!6__mG8V3xH^>Jw(-w+M8*GQGe}SorD_?PA)Xro~t+!taCFW-#pcv%JNwrHw z+t@nDmmy-WQHmd&)%5_3fFoq9L01nO4FVXT^#7(Nce^0On{vu{jaO;lKft#t(0&M4OnkI`SbN%@iX#kq-<5U z`%HrX@Eb!62cqb5u)=O-eEBnP33%rt@@|FVlKukeQ3rw)U2(2SS&Oy+Hm7of@T0F2 zP$^Z1?Ez=(mjJp~2mY9`6HZfAQ*O#n{;2gKeHj&1J)KCD?>;M)xq?E z_(6Y3n2i6{{ta>Y$yY`M<>VoNhH?TJ`B`O0-p7plA(dFW;L?Fzd1p)=K#tqZvG$>{U=TKmX=TaAa2p|&r^STt&;v7wv- z?;Ego-fA(HdU3Uq^(WC^hRAMY0E_jYOPgA$H?DRZMKt8aU55jQVNtA1cm5cY*ng^U zA|;zQbq@jjZaywWY{ZPj4C>c{uM6|Lu19dQ&)0Zwe%Z`9ucgJPH8H1w5TCspVkf9d$K4<9w z&l5Pe5a4Aq!8oYWCy;GoyW-7_uTa(s-&07R7}=4mb%HKE`pqma(i>bo+DEUZiP*D^ ziiZv!Ho3}5q3`H5PE@Z~cC0=}-9{E2#Pa__#uA!I`wj0R?FOG2erKuyg|LU6X31|h zg%+foe^a*&y_J1F7i9@YR9f9z@t=N~ELMXRJU3|i(ZW~GKAXkKYR^J=6n zd7H04*{r<74KCYDY9xe;`{aln|Gm>B3!;;ZmOcQQBxYx4=sd zG`OZV`7vu`^@V(xnBFYiSdwJ~J=Sl_z53$O@vk0xoc2{?E{ZMOStDSb^xCJ&4Wh|r zi<^Y7X!Cj39#wMaYOs!%zBx-GA$`-txxOeW&V2_qQu1t&#Fn~U0b%1J?1BIAcBql* zwmF+d0x^?rPy;*;ot5l)MdLv>C##4*vhS^ACq{uMgjU?;I>75b)r<1;rmf(o;=lAH zV8D_i(Yw)69C>Oi6XuJx19wU(>*)C|H)bmbQk%5E?8x@^poi5Hu=KAI5ofkPxttQy zi3FVAEm&>3qMF+LP*%=v7EnJP=QPku@t6LOBVJ%^)xtxz1&Dd2d`49~hkc2MWZ=Dx zJ4yq-4ZBu$Ju*j^iQ};?Zgb-jG@3m-yH&KLT)_8l-~4qJMCz@Oc7gO%ediOJItg@x zr;NY);xf(#^%~9WeR1?kQpzbgubvkJie*`Cv7q(N%}DpVD<{eWQli#xQRML5j$aMI zFTCd}9oHFqu2GeL4sG8S>+H}EmBau}H^?(*ZCU_vVH#j!Pn=da z8XJtPYHO5B?@SYr!la(}<(yrdpGMJ>ss2Dl0@)3%VX$5?$Usfhc_zhH)n#sle5il2 zE3A{~MML&NGFUFclSBaCn;_ELBS~v^0@??R&$_;lT$zR@QUT>SBar$HEQzhUZO68$ z!;*Gb`zWMgK^^+)H`>LZp^kJ#v#K(K#OjTn}enOjn zai7pFzpPV!Oky2J3Oyobzi-J!^wi0ChXk0gO54A8xu-i1rfhHMV%2|QFz^Tqe?Hfc2wd>_O-{E>e6oRLjMRDg7psc0>|Fn&X z-b=v@Q51I`m(P_VN^?kOonOs3@`U*<%a#14Yv}42#D$tX@HDUXIrDA;2zc;PjTkK_}IL z8L$!cV1Ej3+62SQ;aZGBBmc7mJmc3nma5JfdWDl=fHf3&mNBli%Vb%sLZY|`wg~`{ zAFySDiss;kdc2Z%myM6lW#8GUq>!y*c~YN3{&S+7lu61HTTel^s6?Q4c3qAJ5LphN zJNzCg=y1R>jjYQXd(aOt_6o&}zj!dO;m%QohZ#Mj8Q`#BcqTDF2gzRats|9$0l~m) zL8rBJRc3-jMGNyiU=JEoZj_(;x9xI0F;l81be^mGf+gaf1XHPv`PXE6dQ{<%W+qR% zU*I8?yU22yLqjSg73)>>l3gT7%TpvAtTkgTBZ_u_>8xqS;BWU#y zkx)mW)_TSGi$qZ7DKa;ZXnS{tT-cfY<`CzPc1F6vhAODQqj25LL4Ws?Lu8?S0?sUC zx_Ds5T^`aN>M8e16F?X8JxCQ+VJxsb%PJFY4PtgN=XsLh9-@E$pQl?<*aLoz_=qng z4;pP7Qn7%;l7%xtFX5&JkOzuuwtpGoGdn%Hj>|BDV9-Y^(VT3&GNwdT{E@Eys8I)O zxZoTz#HhKgdcMKh2UXB1Ke{>*8r)FtbaVm_b?!>^qCFM8*z1?0TU_n(xom^JB3Ibx zhBo@q+5kyjmA>2=J@CuY_LS4ly2>fP71{M4Px~3Y*<-=8@D= z3q*9C&hkc&J&Xn-$dm6FDM*`&1lkzK-yWue?X-TFL%eca9^?ckrI4|n{xEFc*G4!Z$&vIm`OVJZ>e z(lJ1j8M0)k4lv+V|e2L zi9#rpsjz>(CE?O%KY(CWzrqG!e6>fHy_>FJ?1qL)Sz^J%U?W~&DWEZx zXHN%yBfR2D-;ocx`#a@Q6Nlf;Ys}G5K@omEqG@ai) zaw!C~lfE@(EWEAC6P$DU;rI8vFlg1nc>`P!PeQ+e%a^D=%v(RxC8p7qK`9 zOaa%EI@>ciRFV-qs!)n7baN6Yz@Dq6{_U@ZFNs2rCgRLNufXc=Aoh+J^_nA;*ma65 zOKWP(Wz4-SOt*%hHTrHs+4e-8>+4`=2gnFBWVWK|_qG1_VF0h(uL~e+G60Y25ACv- z+j{`#Uy4%+ee*NO_0&7_hB|9jjv7$SNlq5b71#SEXJ`$?d4BaaGi`a#=Dt3-Q_10F^oEggu`7dn5ktbs~v z1fyTC=h7+61W8%;qAr%ORrV|2?1d?wbf2Hi!!JZObS{tR$=~bP> ziwD8eEDoM#zcr=7@AGNwKSFDl9@ba2V~Ya&nB@1S65rlznCk@FD4&_u1$Q{c74||h zZT~YD0HXG{?yjn`c(1-C07i`UneAW3UdongJ<~P2Q^#K?6&4{aVEhB;rr9e_HqQ|? zPua)YG}NvLG3W7U0IBjkjGh<(4pzEg?%w_tc~TRwyXv#M+TU(J&+vLXNITAYiVS<> z#q$HaxdVxZ`u=Q@?V12JnZj!{OCmuL6gsFp8HHS<3C(>#&}`I~L#0wvaFL89>$-y& zesq6y%bv$>@0usi&;BpXY8$Xi)Y%-D+9-PM2+-mJMJHEy%#{DL3;V9CmqH`ma>>Of z8-}yk_DI#a6A?-2HN&oG4Ca}v6Pwat1j@0&<4e~eq1F-`9i9c7SkDJekxn`Si#X4! zK?-`9zw8T=Gi%UVPt+F!X!;Xi++BGhl3|_-+o_0B4FV)5Ti(?{tX~p`R`=Zkl+!cc z*j47O4qYFpWj?kUS1%{TtBDs@cr%K;M#I`+e(sXrh8WE;MM%_BS3Yd;N2!CTX63W< zJLZ#brpwogjJHqoSruR|3$4PyV=%OpspvWM6xs&Xx%FEQu=m%*K#JR=foLEUN_$JuMb z?Kv;^KjMKO*vniFwzIf7UHi04N?>Lb;YIpAil*M6V7qydKdmE*6d4kc61~Wptec** z!igi3C^ClM8LWhTB|C8xEFh+TMyEt+;KcS}G$Uu7>DcZ@_6gpU;NR&BT3e(5? z@%l$C55q))A8B>W)jdE{w*7`dN@4Gzu-E6RKlsUE0mvE0pmv{i z3Sfw3G09nYmgo5V&C?yeLJ=)oFifH)MDri8l(i1P*dq7}?+4$7zb7D`)rHGo3sZ=c zuinM;S5QX6pao;x|1;(^G*q0f%;6_V39IlNI)58;ZTfkK(nFdJW2;0nV=0})m&C;{ zYyq50FnM2O?3H&UISiVhLZLn^SbVHxN9PxiqOWEC{sQlC;3_2HIt;u)J5T==+5sG5 zmQr1Rajpq}OJ5!puZq~Rxz4wt$Uk&A=vT`nx<9_PJ$*~*EeKoA1dvM3+8%?xs8Evt z8J!FJAdRCJI&nf`V%@bR^M2kN1@tdxuv5}F$BfItQL|x(i~fg%@NGoZAz;32pafYMEmlb({AAT&8^gBkjV>x*27>y-HVsalXR{U3Zua1)3a^d| z51A}S+ryqvrsY{A9)15>3jyPH)s9GP8xeS-<{;M&bi7YJ+0c{2JERu?c4K1r6H!92 z{=>MGSFl{I%p_b!ZQ}1mX>wB@!`r7GUFxIqg~X*MQXPeZAYQ7% zUwAJj(CW%I3V`Y33@jAFL|cAce0pWCxxL+G%}owO{HCzByfIEuw9RZDc_D`p#^6~? zhW(p5dOW0s3Ynqfjh7ETieBAM$OXwX+b^M+%f6vX#i_73P&_*?&yDK%w?* zN;@UR7u}3<_X@%cHF+WrmV@!khLTHw_-?Y^`VKfCN>+3UA!t7wJa!~i)QXR3 zJ}_#~cojXYRA*RUeg9d*RXu}~0Qls~SI7V_5-_bdVV?8D$jQd?aa|HMuPDht0Y$^_ znQ}b>m|Kp6xed0Do%8`fCEB|f#+lTU>IFdRl&Ze_)V^SbVwu=llqw#*J9)@_*y-b(#P9(PwaVY1mXAmx(o5^IJBYb+uwj1MX5DhAQ{a-hIPcD zYuL!PdM@@(Xkbt5U44zLn&{ukb_K&4({vF>i7?NrWGp#gGvxQBOlt898jAB1>;g%H zQm)*~0pzC#oTwwWq8KQeJigqbqBxya-9Vy~X+6ebk3WKYs?Q zb3qxu2&-rMS3bikMrybom1fDMgkFUiE+ZVu%{Ume8yc&UCqcIE% znrpC&L!vF}lU$#nX&cmN*QG7Nc_AUfqQehV=Zqm)W4V<_#jB<)EVo4o>`f+WtiMQy zMEGni9B;Wd4(GmlQ@y_;gP(QyN?0)v3uc9H*DyL3fXH3|Ec7BZYinA(p6^F9t=wJD zFn6Zr_Tkr>Qwv|zeH4X_WDs`&stzs@3z;pFg-$@#ljmCp;)zPhoiQb{MPSwxs&Q0n z1YZhRBbjd&M2Z$=(ZVrzEII8}L;}8{89-VLpQ*53a_juxE-DuiCO4DcG zzQY&FUnPomZ=C!(>7r?x@Yx8N{d5xDExbda&U2Vef+DZ7U^XF_$EX7;shhP`4n>|^ ztf!7fy41_f(DtUjmKnyP-F6-Rd!!NOMbX3nheR0H1Ac5|F`ZTYWhh4y+b1 zyH5Dx3OSO}p39Jf^$BiN_Ohc2vaX7ChvbX8dwPW!+FpeMOVZy$t$Km6K`hG@-CCaQ5gTGoWEkA&!^|5z-n5T z#iBxI;{(GrYE{DYx3-7w2csdT5?c{j*E_x7e4n!;Mm=WVTrJhlPu@li{m;>QVW^L> z3B1(G3hm^^#2Q%z_WPiPb6KPB9JY{bltAuSfpn6>$3y7%g$hwxi%N|Rqtit$5ZBP2 zJ2ZREwuQDWOxNwORyRa)NX&pwuRX#mG(;Y9=RPPNuR|+IY{!EZ*u8bf_Icg5KMOLY z&Uz=ux=Gi`GKKtzmx@rpiCJ5YCRlsSkRaj-Q|6=y-0nPtHx2Y_g*zlCx$sJiCofRf zcwYifuAEq6x*};e)BU1p2%a>%yD`UC)XyGdl)?r-@=vqd+}$gZSlTAme^jp0cHyR_?I1|wu_|3WGD;0PG>JDhXZsKTpk?NzP& zx9|H}bTV(@S_orws8v|+lheut`Ct!UGPwq}GbG9-qiNX zS9c)Rc|=LSaz*6m!e0Hg_=r%Vj{qx{&%dc7C^_1{{m(jcb4z0N-1i^z!&@(q*`dd? zQ1kT9h)~w4ZU5!0c=DNE3Z~MinmVE@iAxF&dgQx6vZO3;SKZ?QHXg-*R3%vWLXAeL z6G{vLi%t(`ZeiL_HfQXg36f`Ym8ZUlPkN2cSJQg93!@t*s`5SApFMKuwt0PttV_B9 zUp67QTn3Wj+1r~!zrUBO2-CT3OX@S-l=8`LyY$A6?>Mw~TsclkwvOn>?V8ec_kn6| zbQi1r?6n4^UmiuSWU@{ssIQvg=cUhS9VRREuF&)Ten#n0C`ZLV0 zzP1(O^iVO>NH|Ow?_zhQk6OLq*CA*PIk%}FGi@Sd&vyD6fsW;Kj-I3m{VyYF{H#&h zIhA1G!q{j56kVL0S{@1VX76+M_9f?te9{t?2`|ihvwuAnlM6kV)JMy8~F&5vs zS!hFsJKF#^kD~fw^tyyYL90ifGD|D?yWqdi9p|UEjL%01S(t7Gqnj?!Ct)O+Hd$oY_j8o ztxW8nw1w1Sy#TBb+OX3GoImP(_Yw^7kXsO9){s2xdsaieMP3qL#zvD;bs`4=+Afsi zCwxfNK36_XBo>pDvaC?dL6Oy^EIoUG`!~16sK;U!f0oY$m>w)BF7K~TOtyO2X~GW= z?^k#Jwr0ck<?E9lCj*pZ7Ypr1@xO9klm;<$MOim&CQ=^ZVPYBmj1|6+CnrIZg=mvJ zLlG^gCE(!FI`WHb_$VN9f|l;?I{u^HEoovaRAhk}ZDl^h?u?Ep$4Q#69?Nu&XLt1L z)R7t?#~$(?0r*QCM}S4`>&9*R^xY2rp58>qKpiwc)!e%M=Ax#itq{*WA&;dF&~FI8 z3DA$LtUd)x}1kmL0)0=-CoSV7%2|5NBXoHv@JqAmuPbZOn)L znBI{DMPg{3h$Nq)OTMXF9q-iM*wuSZSWI!RAr0gZjq-R88~l81J=;dmrL0K76?)PAUc@DM+_1+@CWDcAgk^(D?RK%Fq8|WNdG8 zeK0h;?Ftd$(sA3k&};Ry*?yN(dTs5^AF%&jztWch_T8c~Y)*-sXOMyshH}L^WTs

)E5%F{0zJ}+@-P$f6j}ge?=I7T z0)CU5nq9rUsf;ziE=$4ljVl1wQSen5Y#9~`f^K_XWrlrwH9^EY5bu}BL>9rO7)u6o z>ti!|UHYd@*>E^Rnz0X8d>~C*)%k6t}=U(^i0k$=ph!DUE zxQbEj3+p*0y0A6^L~ofCTH(7!a<)Jxs!ANhoC^*H$bgg~?N|D*KvU{ZRal%Q`8SNb z+oU4_#(1|v&uFD zmjA#d1dRf(;v2vlU-gT83IqccflyRH8v;BI61b-pVa6HQ3Qhn9+d9zsN?6c3k*WeG zePDZjIL-0140w0PwiWe0cKP~p8do%-s?REHdDXY-a8W{YZ8#*thSe^oAcowN0*bti zKJHPgIk_kz#dxzG_=_PJuw$M%0jd8!N{J2zasu|oS=sb@z%KSD7(UCdRxCH7(BUgr zZ5|N0CzKlLJhaHIa?rM`OfeV^HDN3?JN?M#FJu=jud4P_7f^e?-8M@n$0R~-`v;)J zDP%{j07_gji~;Nqhy6a%JG9ep8q!Eml1o(2DgT`)ko)W;ui_trPcX0wO**5iaLi-8 z{zSUcn}Pef|MPYY?XE92m98c7z|FNAqumpUOP~-31|CWTYt}x{)49wZ07|s;X9?GG zdNuh-CC)K+@JNgltOkm}t_Xtn$V@RPs2sSrvW-vgR@MNMxo4SD3SVWTg9T;e@0`8t zB1*3DL`9R?$KgQk-8mOCJIHE=Gx85=!rhlT)C=Wg9(_+iuDz}{@- z5ra1Hdlp+1e9r&hw?IHk4Dd@csOSs@W0{2*^OhT!9P7vX9@T8Z?C@!2df1^%^dh_V z?=qDC{r>iFmB-B5pu~u~wSaZN{ASCim&y_aW%|3t|ckD;Qzc&*F*CL0A zQ!cRI23ax&a-rY;84dnzJh264!vc5YUz1jB!|d)N0&qm5)c~nfrM}}&B4Dc1K{XNB zDYeW}lAh|>^UhFnxi^|%YK|FbEKP_p%OhDv;(X{6zB(iz)zM}xCAw`8FDQE`) zw?@UNR|JU#{e`0G_B5dcdNIf`!0O-MN5Y^8v`a16(nG&F;4W%79C#vr6qY1VTIUS4vvhf05;i*b~0b74Gv~T0U*c z|8>P$5eq%>>AP648p6YT-X{C__^JwkQVfBn9r~}2a}_SsEO%B3PjiLL4FW*|X$B3fWmU>U>mQ1&iI%X7?Rfxs<1 z>EB=)1B}Fv`rS372nbYkCJvQhC40E=C|Z8XiAn-peDGnn_o*7D6dQaWR-mklS6j$|eR?E?&0-rMC8i{O`8a+D&=O_8UvJNq1Y;V7*Ad;D6 zvcHe!*%>PPZ!PTNbu9u1Y|K&x*=04DAK8!URjYwzVXc4akN2R4fgfZ!4ibM8ieJRf|Lc%l_EHNR#mlIgLn z#3$nUif4i#EO>-kky>)TQ@oObh7un&g&j|k=JiQa7j#NZ+l|CkXeAun9!1MdlcwA1H5*k4UrG#GviwQp%B74GPJIY(z zpJkaUoLEeG6j`2=*wVZ;PH^Iz3rI={#g9{GjF`oo(rA$=?2K1{BD|Mu7TmXJTyl%P z2LK_k1++68t8{m;c7EK*PEN-g#mtGNbGuHR06vCY9-hIb%a;|>gqX-0fA@ot`=r@9 zS71+mBWK!!M$y`W%2V%4?+r}qyVt^Y1My%NTuu2PzLym?6-TNtGGV77Ku4N%ma-eGESwRl^dLBn8ZZ>wd1T;|KqZdR%2>AgSTh+VtY=h18>g4Bc+2 zL4$j8M(D2t>Hxuf`>kS%U>ql6y4{W9vl;+Pr-Lsx}W^*zwZ$779lVBXWHCDT;oUA zYnk}Hc?pq3O5(Ha%T7aq%ON*Ki*~Q8)Zmk|UY!vw8wNy$SPR;M1>OAa2X}(=H_9dx_$79rl#4W~o~CQ@2a`WA4I62XJIhSb3PN}94Z=-GV-2-=b<+3P!; z*gSV;>eT<9pK+CCz&Scd*Z~sf`hux%1y4_uTGb!ErfL}5`$F}&7AfOS9rNpROT84h zvg}6iLsxd!29tn-bs)Cb(U%l?!BfU4Qxms1DwOjg4JjX0Qa>QdD5%f z|6V@!G(<%ER-0xrt#B_-O7I64p_^_r9t{dNjuzTk5?meYv%{O}kx6)aPJ9su@#gMt zl&O+0j3OZ$XLrlxbmT)6(AwWXko|QNOW!Q#MvG`}=m%xdhP40wwyruH>>*aGl`D*= zBJHJ%`^mfJ8ktpRNTuVJ9zSbWIr}k8s(8N2cu!_iY^)+Zh=bYiM%driFZ#KZyZPiH z)vsqq-}rAY4b9Ky%bxa-hOsgOde@N8-j)K0Q=@)xobd0sSjFK0RQyS4L%H}4T96-a z^Zb0-!ME7g&p)_|tboPL$-?~^h5rDZBcfpOD96G(p>;KpKY0%o9m0xMv;ucIQ*Z{u zewf!hgBqq1bO?HlPA}`1?D8*quHA!$V9e3Slvl+gUj93I05Eq~0znJO!?!wuiM&Ev zveUh+mN1qU2a$8a@6?E?*?dSjX#cBF@(<{Krr(2K_3U0CA(x-1J7Cwg0Yrk^xBh`# zi9=ffD{`cu`qv!-Pa|<|{wFkW&%e@I!yUlVVqW>;-Muh&tx<7j?oqT777F`0@^j+n z=Qfz7Iqk;BCtA5k1!Ax&tQev^X{5~YaiXAiT=?zUpIOQhebRr6*?++c#<1iAk9)9; zA=dC^#rXa<76vKj5md`kW*}l6kb{*i(sa9vM#VX?FW=u&Wlv*zSLN4qNO3BtuX|JMVV9?0TuqKA?S7p)Oz1fKy@h(+JpOHNJCQLLdi3-cBS0>~G;i!k3N^ss5ib|F}m^2ty58U*(u=6Ok zAR4O*+pO=q`K8&wVrSBPHL2cm-@`|;pG~@cJmP5*7}nKv6a>L^tW(5`K65^vhXU z0I*2v2lRheidvUm>MY0zUI>u5_4m_Q3tYNbDnxn}S)|n8y%&Cp<2BOJbl)o~0xjmm_S7MSiNaTr*hm z6g;G^M?WMx1}-&H`{ezrbcji_rJIqCmS*7Bbk!5-c_fpe7_T%mdovVl1rr(ETNN&* z+LnBS%*VPXa=2Sk@=xn>o5#;GrS|30mg&Vg3sOJkPqVG=zRTa1+3r|lafdBhX9<{M zd!|3=s?KsVk^>;3hKc);1VY5_G zV@1kcRiA)RIy29%N4M!NW40R5RP_VB&P}lVp!sgu19+a5K<|RWRLw#w4*F^W=y{h#T!mAQ>KVV}4LXUE9R&AZitfjY+)Y$T-T4DEAA zDNqR#weo750fI0XCw9H@RmyOJx_Y8JE;}JV#qxRjTxW1gJ+&Ue_!vm87y~HRIZ@$J zT$a3nD|I60`a?7pEY)y_geK{H;C!TQHX{>#4vSa!&B7bqOVNAizWW^RgTkX^mBdPM zrF7rQ*q03>;_s$zNg&c7p9{a=xF&(CT-(+fR@chw_1Az6dd-6)^yUpFV>cEwCV~kdU4F(KFDS;~+1rT#q8fz@y`Xt&y{xj@~C?6xW!G4W} zVp+;Z_6`LKSAO3w5geL(X=f|Z4`x`{+&NIa+Rk*DUW18_#s6t25>cWL9cTrEdhqy z;(mlWx(P)=di$sldt;{Yts%E*b`flZmD&G>o7>^Ey;3s`Pl?|=HZ zWY!z1qp3Et>|x1^o5%nVjQc_E&?16`xnef=Ni1p^@BQgQ$lm+(Ir*6iU{}+1sV%v9 zoo)*ii1k|qeHACT$Xcxn9t0MCxdqfTEF)svykAve0($jlP4o!Q>48!~sERD3Xfh6@ zNCyTi%__zrCA7q_~PHz7k_9j`v*DS za85I)k*02xr5@gXtByXjArH#QgnZ6NC5n*3U-7Jdtm|v#7}ePzh~$2(>fd@IzropY2EibBk0&ZdY_s6B|UI%2F%~Jl5Q& zql6{5Ept}=p$S^-it|dKG3vm4H@i6g7=EJkZx3me0^ZUDK$)g1Px?V2F=Xj8raa(9 zXA=$w$_wtRfgl3srWX99OZ~4T)!c&F^$(m_y`hkA?waT)Thc<@T)B)|@gA7_tlhJl z1gp;M6*$J$tH%D?#SYDgSq0AR-cc*-JA1g`Mu0SS z)=CJ$gaptjgl2T{;7*Kw4z&WuJ7|wt^hF>MbZYhQq^^C}3wzv^{Da##c2D5QFondq z?i&c>qrL)$nYq%ai1uGGFL`k)wUx}yr+_==xMw}Lsq?Em}f9$w$pFEt8Okpki z%4n~zU1#%cfR9|QvpUEohN-oUf&u0hrDz)1(3a*uPG&4**&<7}ESo&Sx>VJb;8;=J zKs!ovc;jRHpH{Pz$0YhD7G5!X>=70qD`$ppaXWN7*cO-%PVd%=y!R>r46dWMG}YZV z{(ynt^rQy}SJKrR&wFiG+<=k5>6_h;If&~SkesJ_IAcktjO0yi)T#>>WT-UqH0vBn z_^-;n1Z3(_xp2TLCVBXt_#2h}N1Iz2@Xur@vY`DZC3j)ZLPebdX>)qQ0x*LWBMo#l zGd-oGd=5&UP-eE+1V$qnCDd&WW)^q_h5{1I&EQIq0HwZ2^R&`8G~74M$A1vZUgqn zFzSJdEm0xUt(ql34qcidjlEsYe`~$uN%HgdZ^w7*TdljpR%>2=;5+2oxV&PzOS)c} zeee?labm7oDM{X{{W=i(v252Ch0a;&FWEtuNi(qMgkZBA;7-GcM_^ zCr}?CEo*x;V~H%S7>GyK&^&0?m3*aZ)X=hP7&5QWt_nD9=$wxQ-f1{B0y#|Cf+ZL5 zu-fv9hZ;xQbPmK6hDRZ|)2}f}A3gmF=u}TQ^A-kU%wB^^I^D($l6T3FzS&BjBtc(W zz?pFOt>AU2v0Snx1!^wX&HwwzQ58S4#f+tYLE86zpWAW2$(Ad!{$T!~BBKl5J^FBq z+prTx`!-LMuq@%@?J~}B+<^)lt)>m6b?86c$NnLZU)MGKQw-`AyB2=^4Zzn=RO zX$NsPW`yVS0j_zogVB`VL`T2ys;}96q$(j2b(UV!|E`yE&d5-8v-`~3tY2vc?6wnu zi-?(g6I%jGM;fTh4el&+sMA>v_V-)|fZ*gw8-12!>N~4f4-!1)#rTp_Wm4yug>9pC zKJ~_dd&DO~4B^RoeC}>pp|%`hr8AqRzEDzpdW-B^AN+c0IB_pYZ+sS&WdeVW7LE(g z4d>y2KQ0=v5%}{=qz|)D%6FVjTV-W;5+}HXNLl`V$%G81H=|AhDlrXk3a#S#?KOg*(?liw}VJJL~$U*yf0 zXB=>4a$^agsT^)SVhTLZLCX54sbf@Y(U-xMJSteS#PCF`7#B8RKxwl-{`ZSEK1VZ` z!Pb-6qX;F-z;9Uz#%%UoK=43~40N#L(=H?Luzk&B$N?HwCZ7-A}ag5S4%$AlbvbN3JM)&b=frHtQ(S|;;6!QU96*5yd0_3OeS1fGRc z{Ku%be5nZq%P#C9UYw*#mV+9~g>!$~r`WLSH|}>B%2TmxWc+<8<)-US=0CeVd21hL z8kzR=m!axZ?fl}}kb!tTEV9;al|3G={iV>tUDpqZdqiO zuxI;Eg6{i5$D)6l`ePYx`#sW(pjXJ8)oB!wqk`(3WEAIrn&8B~O09Bj>O@xl`aRtJ zU#H{JQMj3rnuqyoVy2?b@e$+wpl36d!JQ14Hq<{UTK+^#HYp`8!A%#KAb&|~ZG~a0 z2S({IRjIX^3O$ZH;S*HO74&YOo8Py*f-GA8)QU>$Z5E-X9?t~9fJ!yMf>u7OH%5Nu z^l|uEaZ(9k0Umfhj-O-{%j*xDQ$Yu?AIAr{I@s)54p=UB@FQN8b8%wFK;&L7jkE1c zPamfiGXdxqO0Pk6+PuVSQjCtNH;D)k$aoBY#C^Dyb=Vri7tgE`FKOME6(;r1u}z@c z=}N>?6CDi!i^Z9lh*;-_0vZ34pk@Ghz42mJ=Q#Uqefh^E+pM{gSYg6I#<(R+^X`?C0b1 zBz(gD(*^cR;e9%NT> zV7*8srutel;CGu~Zlb(xzp1Uut~au;NN-l`tD!f+m+higYVtB(pHm1s+?ZP< zP6X~ggwK$1S^kMKpxMST&DXjc;bhzbDhZ$E{b~(YC3a{tHx`|PZ{_6kDxTFVns=*T zn*9yVGb1|*x#hfeam>uyJXKh0v=VOApTMf zW<)f)ezR6t?aZ@8I8~yoRkctTZrUW%n!)hll@{8l*?rN!FO+v4Jcqbc3Th`APyR+T zJL2A**87p)_*^>B&^#BYEE~MxMNC|Qj5*Gab9ic&>)5e;9c$xtc-^lyUjsP#x+RHp9a3#P zH1eBZVFygePs0Td2gBcJTApaXxGguG`f$8imprC~zu5WSPcMG#__JpTT&YEyemUp= zUwK#l4rSYhXEe65jV0R@S%$0=V(PV(-B_|OGeyOVvPM~=hQy2PYcga>GRRgmMkvc9 zOMHlwkWu!G<%@~ld-#ry{)caVypK7b=X&mA=6VNe_0=1b<}OgJ64@-5&vZtMe17pjI}12Pewma!bsJW{fX(z*8L#l5Ho| zPNy6?0xi{qTO@-1Gf$(r{D)+=8m}>5&FlzxBGKvvx4M^##y<0ME?|3ZGO=9f7X}EP zk_(GKU8T>%#XF1;0s8Y>V{Sl6MpuP^rEGT?$(fD@ui;`Lom-lUS90)KC_vkvh4>VE zBtjM{R3c%G)#heQh)-9it5ac)ohx zUcOy!Ov4%c>Q|a^FE8C zHM{J84-|f+0~PPJi$G^Ls={Q_p~3gOORNy`eHum3C47nxn=%fR(xG2d6wiuHQtf*p zR@<8w0ghD*fi}Uu{4s%v_y)Lm@U#{RbCj!4=Lhu+xt~x-&EBA?qwMNMCgv|0bQgDb zJ;>l47Hu@)cwIGd#inSzIZjo5r)24{{oivei?7-xpVhdhtx^T1jPqg$Bx)CU?vyn$2eYvxsjq9y|OJe zB33$zX5hy-4wa8R!#)Z#;d-4}yRy>Z4LK_vp0p5{u{Eu$#YOyZqRZiId!g%C@m z{XQGL`D;~CRZ3+vn09StZYzu28$_yB#dU-C_G1ozTbdIVx*0TmNz6NBjtyHFUoQ5s zX0S|RgvzZn9%rA$2!Qa@mM_ful2ek=_uB}l;m&||fO2eGy?;T5zi|(dnri^aZbl5$ z2;QCdvxk^F34Mpe2Op10PCDtzf>Mg2nYpFTK0)e|^ZGB~>Ld?xyS}3#oo39DJ$$Y< zOztZ0zPGj?IL&!tN5RK>xv|<2>QwKU8ju+zBWFgjlH$GIsXs&NP4UPOw=vc zHI7MQgKc$E%Xh7}v%3=Merw)6XMgoN3zN++Nv{Oerg;o1tga!enrX|ic8s339+g{; z`A>N$fOvF!xOVDo2PEz$<-xE&kdJyGrvvCyon{JPkV^8kPD1|dT+;jO+25XHnyt{ zsVACt1n%TzFWIaw0OAIt6}CC{tjigb}Mu=qcXBI}yz6-4bWyyIc(Lk(cee9xY*5rY~L}zfN!W(bXE= zsVUp?19~d(aOBQp;Sf4^1z67bveg8a+?X50C@X#fmKV~56V5}My_cI+F?i`5aTzM# zUE;5Lrejt~FF#mVh~J{NJIr4Ca4R5P*f@8oFYuFq#CU&y0F~th%M*<@iTE-M?v6vo zm|BF>;d%nDlDqW&*PEhb>7$vXT(c@CM0Usk>PSMH2dwj2T4JS8zcIIu`5SGfkWSaS zQP70|gC-YQe=*Cn1gy}{TWrHIV8fzF*R72nVVj}Jx;;H$BX6)4V*c4tvb=B!Y!!P3q#RH`QggifvD* z5v1#R&FUB9_O(ZbAkQ5MQqy=~)sg_pG`8G?^vA|MzqW`2!9s6wq8S^4>}t=~x*1Ns zGE#9IlnF{RsvQBHFC(JnDGp9m%jT+iNq{0wBxnY8%(=l=kt~@R=hp@ohbo1zx^C=? zc8vQ-oKpiSV8Y0Bh4Q|13x;$-mk9{Ff7@RWq9yEiNRdS#N?iS6kBC3f{s4Vw0_qsS zQ0MlfKs*~iC-`|6Jwq>Geh=|PMo9wncdpj?eIak}s}DWF3N2XK;pFls8UfHz!4Ug` zeZhC{ss?&^Sfi(6-!|`_47iV;WT43hs<-!*&b + Spyglass + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/src/index.md b/docs/src/index.md new file mode 100644 index 000000000..25d84ca47 --- /dev/null +++ b/docs/src/index.md @@ -0,0 +1,24 @@ +# Spyglass + +**Spyglass** is a data analysis framework that facilitates the +storage, analysis, and sharing of neuroscience data to support +reproducible research. It is designed to be interoperable with the NWB +format and integrates open-source tools into a coherent framework. + +## Installation + +To install to this project, see [Installation](./installation/). + +## Contributing + +For contribution instructions see [How to Contribute](./contribute.md) + +## Citing Spyglass + +Kyu Hyun Lee, Eric Denovellis, Ryan Ly, Jeremy Magland, Jeff Soules, +Alison Comrie, Jennifer Guidera, Rhino Nevers, Daniel Gramling, Philip +Adenekan, Ji Hyun Bak, Emily Monroe, Andrew Tritt, Oliver Rübel, Thinh +Nguyen, Dimitri Yatsenko, Joshua Chu, Caleb Kemere, Samuel Garcia, +Alessio Buccino, Emily Aery Jones, Lisa Giocomo, and Loren Frank. +\'Spyglass: A Data Analysis Framework for Reproducible and Shareable +Neuroscience Research.\' (2022) Society for Neuroscience, San Diego, CA. diff --git a/docs/src/installation/index.md b/docs/src/installation/index.md new file mode 100644 index 000000000..36c1ee1d6 --- /dev/null +++ b/docs/src/installation/index.md @@ -0,0 +1,15 @@ +# Installation + +## Production + +Install [Production](./production.md) if you want to use +the officially released version of spyglass. This is meant for regular +end-users. Click here to for instructuons - +[Production](./production.md) + +## Local + +Install [Local](./local.md) if you want to work on the +development-version in order to add features. This is meant for anyone +interested in making updates to the code base. Click here to for +instructuons - [Local](./local.md) diff --git a/docs/src/installation/local.md b/docs/src/installation/local.md new file mode 100644 index 000000000..3b7cab562 --- /dev/null +++ b/docs/src/installation/local.md @@ -0,0 +1,122 @@ +# Local Installation + +## Clone Repository + +For local development, first pull down the code base - + +```bash +git clone https://github.com/LorenFrankLab/spyglass.git +``` + +Set up and activate a conda environment from environment.yml: + +```bash +cd spyglass +conda env create -f environment.yml +conda activate spyglass +``` + +Install this repository: + +```bash +pip install -e . +``` + +## Additional Packages + +Some of the pipeline requires installation of additional packages. For example, +the spike sorting pipeline relies on `spikeinterface`. We recommend installing +it directly from the GitHub repo: + +```bash +pip install spikeinterface[full,widgets] +``` + +You may also need to install individual sorting algorithms. For example, Loren +Frank's lab at UCSF typically uses `mountainsort4`: + +```bash +pip install mountainsort4 +``` + +WARNING: If you are on an M1 Mac, you need to install `pyfftw` via `conda` +BEFORE installing `ghostipy`: + +```bash +conda install -c conda-forge pyfftw +``` + +The LFP pipeline uses `ghostipy`: + +```bash +pip install ghostipy +``` + +## Setting up database access + +1. To use `spyglass`, you need to have access to a MySQL database. If your lab + already administers a database, connect to it by setting + [DataJoint](https://www.datajoint.org/) configurations. If you want to run + your own database, consult instructions in [datajoint tutorial](https://tutorials.datajoint.org/setting-up/get-database.html) + and/or [our tutorial notebook](../notebooks/docker_mysql_tutorial.ipynb). + +2. Add the following environment variables (e.g. in `~/.bashrc`). The following + are specific to Frank lab so you may want to change `SPYGLASS_BASE_DIR`. + + ```bash + export SPYGLASS_BASE_DIR="/stelmo/nwb" + export SPYGLASS_RECORDING_DIR="$SPYGLASS_BASE_DIR/recording" + export SPYGLASS_SORTING_DIR="$SPYGLASS_BASE_DIR/sorting" + export SPYGLASS_VIDEO_DIR="$SPYGLASS_BASE_DIR/video" + export SPYGLASS_WAVEFORMS_DIR="$SPYGLASS_BASE_DIR/waveforms" + export SPYGLASS_TEMP_DIR="$SPYGLASS_BASE_DIR/tmp/spyglass" + export DJ_SUPPORT_FILEPATH_MANAGEMENT="TRUE" + ``` + + Note that a local `SPYGLASS_TEMP_DIR` (e.g. one on your machine) will speed + up spike sorting, but make sure it has enough free space (ideally at least + 500GB) + + Before proceeding, run - + + ```bash + source ~/.bashrc + ``` + + in order to persist the changes. + +3. Set up [`kachery-cloud`](https://github.com/flatironinstitute/kachery-cloud) + (Frank Lab members only). Once you have initialized a `kachery-cloud` + directory, add the following environment variables (again, shown for Frank + lab). + + ```bash + export KACHERY_CLOUD_DIR="$SPYGLASS_BASE_DIR/.kachery-cloud" + export KACHERY_TEMP_DIR="$SPYGLASS_BASE_DIR/tmp" + ``` + + Before proceeding, run - + + ```bash + source ~/.bashrc + ``` + + in order to persist the changes. + +4. Configure DataJoint. To connect to the + [DataJoint](https://www.datajoint.org/) database, we have to specify + information about it such as the hostname and the port. You should also + change your password from the temporary one you were given. Go to the config + directory, and run + [`dj_config.py`](https://github.com/LorenFrankLab/spyglass/blob/master/config/dj_config.py) + in the config folder. Then run + [`dj_config.py`](https://github.com/LorenFrankLab/spyglass/blob/master/config/dj_config.py) + terminal with your username: + + ```bash + cd config # change to the config directory + python dj_config.py # run the configuration script + ``` + +Finally, open up a python console (e.g. run `ipython` from terminal) and import +`spyglass` to check that the installation has worked. diff --git a/docs/src/installation/production.md b/docs/src/installation/production.md new file mode 100644 index 000000000..d62b7a778 --- /dev/null +++ b/docs/src/installation/production.md @@ -0,0 +1,148 @@ +# Production Installation + +## Virtual Environment + +It is recommended you install a [virtual +environment](https://en.wikipedia.org/wiki/Virtual_environment_software). There +are many options like +[conda](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) +and [venv](https://docs.python.org/3/library/venv.html). This installation +instruction will use +[conda](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html). + +The instructions to install +[conda](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) +can be found at +[https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html](https://docs.conda.io/projects/conda/en/latest/user-guide/install/index.html). +[Conda](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) +can be used both for installing packages and for creating a +[virtual environment](https://towardsdatascience.com/introduction-to-conda-virtual-environments-eaea4ac84e28). + +To create the environment after +[conda](https://docs.conda.io/projects/conda/en/latest/user-guide/tasks/manage-environments.html) +is installed, run - + +```bash +conda create -n spyglass-env python=3.9.0 +``` + +The name selected here is `spyglass-env`. However, a different name can be used +if desired or necessary. + +Now, it is time to activate the virtual environment. To start, run - + +```bash +conda activate spyglass-env +``` + +## Installing Spyglass + +`spyglass` can be installed via +[pip]() off of +[pypi](https://pypi.org/project/spyglass-neuro/): + +```bash +pip install spyglass-neuro +``` + +## Additional Packages + +Some pipelines require installation of additional packages. For example, the +spike sorting pipeline relies on `spikeinterface`. We recommend installing it +directly from the GitHub repo: + +```bash +pip install spikeinterface[full,widgets] +``` + +You may also need to install individual sorting algorithms. For example, Loren +Frank's lab at [UCSF](https://www.ucsf.edu/) typically uses `mountainsort4`: + +```bash +pip install mountainsort4 +``` + +WARNING: If you are on an M1 Mac, you need to install `pyfftw` via `conda` +BEFORE installing `ghostipy`: + +```bash +conda install -c conda-forge pyfftw +``` + +The LFP pipeline uses `ghostipy`: + +```bash +pip install ghostipy +``` + +## Setting up database access + +1. To use `spyglass`, you need to have access to a MySQL database. If your lab + already administers a database, connect to it by setting + [DataJoint](https://www.datajoint.org/) configurations. If you want to run + your own database, consult instructions in + [datajoint tutorial](https://tutorials.datajoint.org/setting-up/get-database.html) + and/or [our tutorial notebook](../notebooks/docker_mysql_tutorial.ipynb). + +2. Add the following environment variables (e.g. in `~/.bashrc`). The following + are specific to Frank lab so you may want to change `SPYGLASS_BASE_DIR`. + + ```bash + export SPYGLASS_BASE_DIR="/stelmo/nwb" + export SPYGLASS_RECORDING_DIR="$SPYGLASS_BASE_DIR/recording" + export SPYGLASS_SORTING_DIR="$SPYGLASS_BASE_DIR/sorting" + export SPYGLASS_VIDEO_DIR="$SPYGLASS_BASE_DIR/video" + export SPYGLASS_WAVEFORMS_DIR="$SPYGLASS_BASE_DIR/waveforms" + export SPYGLASS_TEMP_DIR="$SPYGLASS_BASE_DIR/tmp/spyglass" + export DJ_SUPPORT_FILEPATH_MANAGEMENT="TRUE" + ``` + + Note that a local `SPYGLASS_TEMP_DIR` (e.g. one on your machine) will speed + up spike sorting, but make sure it has enough free space (ideally at least + 500GB) + + Before proceeding, run - + + ```bash + source ~/.bashrc + ``` + + in order to persist the changes. + +3. Set up [`kachery-cloud`](https://github.com/flatironinstitute/kachery-cloud) + (if you are in Frank lab, skip this step). Once you have initialized a + `kachery-cloud` directory, add the following environment variables (again, + shown for Frank lab). + + ```bash + export KACHERY_CLOUD_DIR="$SPYGLASS_BASE_DIR/.kachery-cloud" + export KACHERY_TEMP_DIR="$SPYGLASS_BASE_DIR/tmp" + ``` + + Before proceeding, run - + + ```bash + source ~/.bashrc + ``` + + in order to persist the changes. + +4. Configure DataJoint. To connect to the + [DataJoint](https://www.datajoint.org/) database, we have to specify + information about it such as the hostname and the port. You should also + change your password from the temporary one you were given. Download + [`dj_config.py`](https://github.com/LorenFrankLab/spyglass/blob/master/config/dj_config.py) + from + [https://github.com/LorenFrankLab/spyglass/blob/master/config/dj_config.py](https://github.com/LorenFrankLab/spyglass/blob/master/config/dj_config.py) + and save locally as `dj_config.py`, to any folder. Instructions on how to + download a single file from github can be found at + [https://stackoverflow.com/a/13593430/178550](https://stackoverflow.com/a/13593430/178550). + Then run `dj_config.py` in a terminal with your username - + + ```bash + cd config # change to the config directory + python dj_config.py # run the configuration script + ``` + +Finally, open up a python console (e.g. run `ipython` from terminal) and import +`spyglass` to check that the installation has worked. diff --git a/docs/figurl_views.md b/docs/src/misc/figurl_views.md similarity index 89% rename from docs/figurl_views.md rename to docs/src/misc/figurl_views.md index 824e874a9..551019f69 100644 --- a/docs/figurl_views.md +++ b/docs/src/misc/figurl_views.md @@ -1,6 +1,6 @@ # Creating figurl views -### Spike sorting recording view +## Spike sorting recording view ```python import spyglass.common as ndc @@ -14,7 +14,7 @@ query = ... ndf.SpikeSortingRecordingView.populate([(ndc.SpikeSortingRecording & query).proj()]) ``` -### Spike sorting view +## Spike sorting view ```python import spyglass.common as ndc @@ -26,4 +26,4 @@ query = ... # (ndf.SpikeSortingView & query).delete() ndf.SpikeSortingView.populate([(ndc.SpikeSorting & query).proj()]) -``` \ No newline at end of file +``` diff --git a/docs/src/misc/insert_data.md b/docs/src/misc/insert_data.md new file mode 100644 index 000000000..dcf80f173 --- /dev/null +++ b/docs/src/misc/insert_data.md @@ -0,0 +1,94 @@ +# How to insert data into `spyglass` + +In `spyglass`, every table corresponds to an object. An experimental session is +defined as a collection of such objects. When an NWB file is ingested into +`spyglass`, the information about these objects is first read and inserted into +tables in the `common` module (e.g. `Institution`, `Lab`, `Electrode`, etc). +However, not every NWB file has all the information required by `spyglass`. For +example, many NWB files do not contain any information about the +`DataAcquisitionDevice` or `Probe` because NWB does not yet have an official +standard for specifying them. In addition, one might find that the information +contained in the NWB file is incorrect and would like to modify it before +inserting it into `spyglass` without having to go through the time-consuming +process of re-generating the NWB file. For these cases, we provide an +alternative approach to inserting data to `spyglass`. + +This alternate approach consists of two steps. First, the user must identify +entries that they would like to add to the `spyglass` database that exist +independently of any particular NWB file. For example, information about a +particular probe is stored in the `ProbeType` and `Probe` tables of +`spyglass.common`. The user can either: + +1. create these entries programmatically using DataJoint `insert` commands, for + example: + + ```python + sgc.ProbeType.insert1({ + "probe_type": "128c-4s6mm6cm-15um-26um-sl", + "probe_description": "A Livermore flexible probe with 128 channels, 4 shanks, 6 mm shank length, 6 cm ribbon length. 15 um contact diameter, 26 um center-to-center distance (pitch), single-line configuration.", + "manufacturer": "Lawrence Livermore National Lab", + "num_shanks": 4, + }, skip_duplicates=True) + ``` + +2. define these entries in a special YAML file called `entries.yaml` that is + processed when `spyglass` is imported. One can think of `entries.yaml` as a + place to define information that the database should come pre-equipped prior + to ingesting any NWB files. The `entries.yaml` file should be placed in the + `spyglass` base directory. An example can be found in + `examples/config_yaml/entries.yaml`. It has the following structure: + + ```yaml + TableName: + - TableEntry1Field1: Value + TableEntry1Field2: Value + - TableEntry2Field1: Value + TableEntry2Field2: Value + ``` + + For example, + + ```yaml + ProbeType: + - probe_type: 128c-4s6mm6cm-15um-26um-sl + probe_description: A Livermore flexible probe with 128 channels, 4 shanks, 6 mm shank length, 6 cm ribbon length. 15 um contact diameter, 26 um center-to-center distance (pitch), single-line configuration. + manufacturer: Lawrence Livermore National Lab + num_shanks: 4 + ``` + +Using a YAML file over programmatically creating these entries in a notebook or +script has the advantages that the YAML file maintains a record of what entries +have been added that is easy to access, and the file is portable and can be +shared alongside an NWB file or set of NWB files from a given experiment. + +Next, the user must associate the NWB file with entries defined in the database. +This is done by cresqating a _configuration file_, which must: be in the same +directory as the NWB file that it configures be in YAML format have the +following naming convention: `_spyglass_config.yaml`. + +Users can programmatically generate this configuration file. It is then read by +spyglass when calling `insert_session` on the associated NWB file. + +An example of this can be found at +`examples/config_yaml/​​sub-AppleBottom_ses-AppleBottom-DY20-g3_behavior+ecephys_spyglass_config.yaml`. +This file is associated with the NWB file +`sub-AppleBottom_ses-AppleBottom-DY20-g3_behavior+ecephys.nwb`. + +This is the general format for the config entry: + +```yaml +TableName: +- primary_key1: value1 +``` + +For example: + +```yaml +DataAcquisitionDevice: +- data_acquisition_device_name: Neuropixels Recording Device +``` + +In this example, the NWB file that corresponds to this config YAML will become +associated with the DataAcquisitionDevice with primary key +data_acquisition_device_name: Neuropixels Recording Device. This entry must +exist. diff --git a/docs/session_groups.md b/docs/src/misc/session_groups.md similarity index 94% rename from docs/session_groups.md rename to docs/src/misc/session_groups.md index a243e198a..d2e0c1fb8 100644 --- a/docs/session_groups.md +++ b/docs/src/misc/session_groups.md @@ -1,6 +1,7 @@ # Session groups -A session group is a collection of sessions. Each group has a name (primary key) and a description. +A session group is a collection of sessions. Each group has a name (primary key) +and a description. ```python from spyglass.common import SessionGroup @@ -22,4 +23,4 @@ SessionGroup.get_group_sessions('test_group_1') # Update the description of a session group SessionGroup.update_session_group_description('test_group_1', 'Test description') -``` \ No newline at end of file +``` diff --git a/docs/src/stylesheets/extra.css b/docs/src/stylesheets/extra.css new file mode 100644 index 000000000..5f4229684 --- /dev/null +++ b/docs/src/stylesheets/extra.css @@ -0,0 +1,127 @@ + +/*https://github.com/squidfunk/mkdocs-material/blob/master/src/assets/stylesheets/main/_colors.scss*/ +/* Color palette of lab logo */ +:root { + --green-: #2A4344; + --blue--: #40555E; + --brown-: #5C3829; + --orange: #B45C3C; + --yellow: #ECD4A3; + --white-: #DBDCDA; +} + +[data-md-color-scheme="auto"] { + /* Primary Bar */ + --md-primary-fg-color: var(--brown-); + --md-primary-bg-color: var(--yellow); + --md-accent-fg-color: var(--blue--); + --md-accent-bg-color: var(--orange); + + /* // Code color shades */ + --md-code-fg-color: var(--brown-); + --md-code-bg-color: var(--syellow); + + /* Typeset `a` color shades */ + --md-typeset-a-color: var(--orange); +} + +[data-md-color-scheme="slate"] { + /* Primary Bar */ + --md-primary-fg-color: var(--brown-); + --md-primary-bg-color: var(--yellow); + --md-accent-fg-color: var(--orange); + --md-accent-bg-color: var(--blue--); + --md-typeset-a-color: var(--yellow); + --md-hue: 30; +} + +.table { + table-layout: auto; + width: auto; +} +td { + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; +} +blockquote +{ +margin-left: 10px; +padding-left: 10px; +padding-top: 0px; +padding-bottom: 0px; +border-left: 3px solid #80808080; +} + +/* DataJoint Header */ +.py.class dd > p:first-child { + margin-top: 0.8rem; + background-color: #F8F9FB; + margin-bottom: 0.9rem; + font-weight: bold; + margin-left: 0; + font-size: 1rem; +} + +[data-theme="slate"] .py.class dd > p:first-child { + background-color: #000; +} + +@media (prefers-color-scheme: dark) { + [data-theme="auto"] .py.class dd > p:first-child { + background-color: #000; + } +} + +@media (prefers-color-scheme: light) { + [data-theme="auto"] .py.class dd > p:first-child { + background-color: #F8F9FB; + } +} + +.py.class dd > p { + margin-bottom: 1px; + margin-top: 1px; + background-color: #F8F9FB; + margin-left: 20px; + font-size: 0.9rem; +} + +[data-theme="slate"] .py.class dd > p { + background-color: #000; +} + +@media (prefers-color-scheme: dark) { + [data-theme="auto"] .py.class dd > p { + background-color: #000; + } +} + +@media (prefers-color-scheme: light) { + [data-theme="auto"] .py.class dd > p { + background-color: #F8F9FB; + } +} + +.py.class dd > p.rubric { + background-color: white; + margin-top: 15px; + margin-left: 0; + font-size: 1rem; +} + +[data-theme="slate"] .py.class dd > p.rubric { + background-color: #000; +} + +@media (prefers-color-scheme: dark) { + [data-theme="auto"] .py.class dd > p.rubric { + background-color: #000; + } +} + +@media (prefers-color-scheme: light) { + [data-theme="auto"] .py.class dd > p.rubric { + background-color: #F8F9FB; + } +} diff --git a/examples/cli_examples/create_session_group.py b/examples/cli_examples/create_session_group.py index 0811842fb..7218a9174 100755 --- a/examples/cli_examples/create_session_group.py +++ b/examples/cli_examples/create_session_group.py @@ -5,5 +5,7 @@ nwb_file_name = "RN2_20191110_.nwb" sgc.SessionGroup.add_group("group1", "Group1", skip_duplicates=True) -sgc.SessionGroup.add_session_to_group(nwb_file_name, "group1", skip_duplicates=True) +sgc.SessionGroup.add_session_to_group( + nwb_file_name, "group1", skip_duplicates=True +) print(sgc.SessionGroup.get_group_sessions("group1")) diff --git a/examples/cli_examples/readme.md b/examples/cli_examples/readme.md index 0436a4f08..8c4b7340c 100644 --- a/examples/cli_examples/readme.md +++ b/examples/cli_examples/readme.md @@ -1,4 +1,6 @@ -### Insert a lab team +# Using the spyglass CLI + +## Insert a lab team Create a lab team. @@ -9,7 +11,7 @@ spyglass insert-lab-team team.yaml spyglass list-lab-teams ``` -### Insert a lab member +## Insert a lab member Create a lab member. @@ -20,7 +22,7 @@ spyglass insert-lab-member labmember.yaml spyglass list-lab-members ``` -### Insert a lab team member +## Insert a lab team member Add a lab member to a lab team. @@ -31,7 +33,7 @@ spyglass insert-lab-team-member labteammember.yaml spyglass list-lab-team-members ``` -### Insert a session +## Insert a session Insert a session from a raw .nwb file. @@ -47,19 +49,19 @@ spyglass list-sessions spyglass list-interval-lists RN2_20191110_.nwb # Note the trailing underscore here ``` -### Set up the sort groups +## Set up the sort groups Set up the electrode sort groups. See [set_sort_groups_by_shank.py](./set_sort_groups_by_shank.py) for an example. -### Insert a sort interval +## Insert a sort interval Create a sort interval (time interval) for spike sorting. See [insert_sort_interval.py](./insert_sort_interval.py) for an example. -### Insert spike sorting preprocessing parameters +## Insert spike sorting preprocessing parameters Define spike sorting preprocessing parameters. @@ -70,7 +72,7 @@ spyglass insert-spike-sorting-preprocessing-parameters parameters.yaml spyglass list-spike-sorting-preprocessing-parameters ``` -### Create spike sorting recording +## Create spike sorting recording Create a spike sorting recording. @@ -85,7 +87,7 @@ spyglass create-spike-sorting-recording-view recording.yaml # Use --replace to force recreate the view ``` -### Insert artifact detection parameters +## Insert artifact detection parameters Define artifact detection parameters. @@ -96,7 +98,7 @@ spyglass insert-artifact-detection-parameters parameters.yaml spyglass list-artifact-detection-parameters ``` -### Insert spike sorter parameters +## Insert spike sorter parameters Define spike sorter parameters. @@ -107,15 +109,15 @@ spyglass insert-spike-sorter-parameters parameters.yaml spyglass list-spike-sorter-parameters ``` -### Create a session group +## Create a session group A session group is a collection of sessions that can be viewed via spyglassview. See [session_groups.md](../../docs/session_groups.md) and [create_session_group.py](./create_session_group.py). -### Create spyglass view +## Create spyglass view ```bash # This will print a URL pointing to the view -create-spyglass-view -``` \ No newline at end of file +create-spyglass-view +``` diff --git a/franklab_scripts/alter_tables.py b/franklab_scripts/alter_tables.py index 1e3c17a3c..f3f8138bd 100644 --- a/franklab_scripts/alter_tables.py +++ b/franklab_scripts/alter_tables.py @@ -24,7 +24,9 @@ def main(): def update_cls(cls): - if issubclass(cls, (dj.Manual, dj.Lookup, dj.Imported, dj.Computed, dj.Part)): + if issubclass( + cls, (dj.Manual, dj.Lookup, dj.Imported, dj.Computed, dj.Part) + ): print("Updating", cls) try: # NOTE: datajoint does not allow altering indexes yet diff --git a/franklab_scripts/nightly_cleanup.py b/franklab_scripts/nightly_cleanup.py index bbcca4084..abac74a38 100755 --- a/franklab_scripts/nightly_cleanup.py +++ b/franklab_scripts/nightly_cleanup.py @@ -7,7 +7,11 @@ import numpy as np -from spyglass.decoding.clusterless import MarkParameters, UnitMarkParameters, UnitMarks +from spyglass.decoding.clusterless import ( + MarkParameters, + UnitMarkParameters, + UnitMarks, +) warnings.simplefilter("ignore", category=DeprecationWarning) warnings.simplefilter("ignore", category=ResourceWarning) diff --git a/notebooks/00_intro.ipynb b/notebooks/00_intro.ipynb index 0ff7332ab..374dd5d86 100644 --- a/notebooks/00_intro.ipynb +++ b/notebooks/00_intro.ipynb @@ -34,7 +34,7 @@ "\n", "warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n", "warnings.simplefilter(\"ignore\", category=ResourceWarning)\n", - "warnings.simplefilter(\"ignore\", category=UserWarning)\n" + "warnings.simplefilter(\"ignore\", category=UserWarning)" ] }, { @@ -47,7 +47,7 @@ "import spyglass.common as sgc\n", "\n", "# We import spyglass.data_import to allow for inserting an NWB file into the database\n", - "import spyglass.data_import as sdi\n" + "import spyglass.data_import as sdi" ] }, { @@ -83,7 +83,7 @@ "outputs": [], "source": [ "# Draw tables that are three levels below and one level above Session\n", - "dj.ERD(sgc.Session) - 1 + 3\n" + "dj.ERD(sgc.Session) - 1 + 3" ] }, { @@ -113,7 +113,7 @@ "nwb_file_name = \"montague20200802_tutorial.nwb\"\n", "filename, file_extension = os.path.splitext(nwb_file_name)\n", "# This is a copy of the original nwb file, except it doesn't contain the raw data (for storage reasons)\n", - "nwb_copy_file_name = filename + \"_\" + file_extension\n" + "nwb_copy_file_name = filename + \"_\" + file_extension" ] }, { @@ -122,7 +122,7 @@ "metadata": {}, "outputs": [], "source": [ - "nwb_file_name\n" + "nwb_file_name" ] }, { @@ -144,7 +144,7 @@ }, "outputs": [], "source": [ - "sdi.insert_sessions(nwb_file_name)\n" + "sdi.insert_sessions(nwb_file_name)" ] }, { @@ -164,7 +164,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgc.Lab()\n" + "sgc.Lab()" ] }, { @@ -187,7 +187,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgc.Session()\n" + "sgc.Session()" ] }, { @@ -204,7 +204,7 @@ "outputs": [], "source": [ "# can also look at the docstring\n", - "sgc.Session.describe()\n" + "sgc.Session.describe()" ] }, { @@ -220,7 +220,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgc.Session & {\"nwb_file_name\": nwb_copy_file_name}\n" + "sgc.Session & {\"nwb_file_name\": nwb_copy_file_name}" ] }, { @@ -236,7 +236,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgc.Raw & {\"nwb_file_name\": nwb_copy_file_name}\n" + "sgc.Raw & {\"nwb_file_name\": nwb_copy_file_name}" ] }, { @@ -252,7 +252,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgc.IntervalList & {\"nwb_file_name\": nwb_copy_file_name}\n" + "sgc.IntervalList & {\"nwb_file_name\": nwb_copy_file_name}" ] }, { @@ -271,7 +271,7 @@ "(\n", " sgc.IntervalList\n", " & {\"nwb_file_name\": nwb_copy_file_name, \"interval_list_name\": \"04_r2\"}\n", - ").fetch1(\"valid_times\")\n" + ").fetch1(\"valid_times\")" ] }, { @@ -293,7 +293,7 @@ " - {\"interval_list_name\": \"01_s1\"}\n", " )\n", " - {\"interval_list_name\": \"04_r2\"}\n", - ").fetch(\"interval_list_name\")\n" + ").fetch(\"interval_list_name\")" ] }, { @@ -319,7 +319,7 @@ "outputs": [], "source": [ "# our data is currently in Session table\n", - "sgc.Session & {\"nwb_file_name\": nwb_copy_file_name}\n" + "sgc.Session & {\"nwb_file_name\": nwb_copy_file_name}" ] }, { @@ -329,7 +329,7 @@ "outputs": [], "source": [ "# Type `yes` when prompted to delete\n", - "(sgc.Session & {\"nwb_file_name\": nwb_copy_file_name}).delete()\n" + "(sgc.Session & {\"nwb_file_name\": nwb_copy_file_name}).delete()" ] }, { @@ -339,7 +339,7 @@ "outputs": [], "source": [ "# Check that delete worked\n", - "sgc.Session & {\"nwb_file_name\": nwb_copy_file_name}\n" + "sgc.Session & {\"nwb_file_name\": nwb_copy_file_name}" ] }, { @@ -356,7 +356,7 @@ "outputs": [], "source": [ "# Entries are also gone from downstream tables, e.g. IntervalList\n", - "sgc.IntervalList & {\"nwb_file_name\": nwb_copy_file_name}\n" + "sgc.IntervalList & {\"nwb_file_name\": nwb_copy_file_name}" ] }, { @@ -375,7 +375,7 @@ "outputs": [], "source": [ "# Check out the Nwb file\n", - "sgc.Nwbfile & {\"nwb_file_name\": nwb_copy_file_name}\n" + "sgc.Nwbfile & {\"nwb_file_name\": nwb_copy_file_name}" ] }, { @@ -385,7 +385,7 @@ "outputs": [], "source": [ "# Let's delete the entry\n", - "(sgc.Nwbfile & {\"nwb_file_name\": nwb_copy_file_name}).delete()\n" + "(sgc.Nwbfile & {\"nwb_file_name\": nwb_copy_file_name}).delete()" ] }, { @@ -405,7 +405,7 @@ "outputs": [], "source": [ "# We clean it up\n", - "sgc.Nwbfile().cleanup(delete_files=True)\n" + "sgc.Nwbfile().cleanup(delete_files=True)" ] }, { @@ -438,7 +438,7 @@ "outputs": [], "source": [ "# take a look at the lab members\n", - "sgc.LabMember()\n" + "sgc.LabMember()" ] }, { @@ -448,7 +448,7 @@ "outputs": [], "source": [ "# LabMember also has a Parts table called LabMemberInfo\n", - "sgc.LabMember.LabMemberInfo()\n" + "sgc.LabMember.LabMemberInfo()" ] }, { @@ -458,7 +458,7 @@ "outputs": [], "source": [ "# these are the existing lab teams\n", - "sgc.LabTeam()\n" + "sgc.LabTeam()" ] }, { @@ -473,7 +473,7 @@ "# team_description is optional\n", "sgc.LabTeam().create_new_team(\n", " team_name=\"Beans\", team_members=[\"Alison Comrie\"], team_description=\"test\"\n", - ")\n" + ")" ] }, { @@ -485,8 +485,9 @@ "# add info about the team members\n", "# add your name and your google account\n", "sgc.LabMember.LabMemberInfo.insert(\n", - " [[\"Alison Comrie\", \"comrie.alison@gmail.com\", \"alison\"]], skip_duplicates=True\n", - ")\n" + " [[\"Alison Comrie\", \"comrie.alison@gmail.com\", \"alison\"]],\n", + " skip_duplicates=True,\n", + ")" ] }, { @@ -495,7 +496,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgc.LabMember.LabMemberInfo()\n" + "sgc.LabMember.LabMemberInfo()" ] } ], diff --git a/notebooks/01_spikesorting.ipynb b/notebooks/01_spikesorting.ipynb index 96dcdbe26..4dd320282 100644 --- a/notebooks/01_spikesorting.ipynb +++ b/notebooks/01_spikesorting.ipynb @@ -64,7 +64,7 @@ "import warnings\n", "\n", "warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n", - "warnings.simplefilter(\"ignore\", category=ResourceWarning)\n" + "warnings.simplefilter(\"ignore\", category=ResourceWarning)" ] }, { @@ -82,7 +82,7 @@ "source": [ "your_name = \"FirstName LastName\"\n", "your_email = \"gmail@gmail.com\"\n", - "datajoint_username = \"user\"\n" + "datajoint_username = \"user\"" ] }, { @@ -101,8 +101,9 @@ " [your_name, your_email, datajoint_username], skip_duplicates=True\n", ")\n", "sgc.LabTeam.LabTeamMember.insert1(\n", - " {\"team_name\": \"LorenLab\", \"lab_member_name\": your_name}, skip_duplicates=True\n", - ")\n" + " {\"team_name\": \"LorenLab\", \"lab_member_name\": your_name},\n", + " skip_duplicates=True,\n", + ")" ] }, { @@ -119,7 +120,7 @@ "outputs": [], "source": [ "if your_name in (sgc.LabTeam._____() & {___: ____}).fetch(______).tolist():\n", - " print(\"You made it in!\")\n" + " print(\"You made it in!\")" ] }, { @@ -139,7 +140,7 @@ "import spyglass.data_import as sdi\n", "\n", "sdi.insert_sessions(\"montague20200802_tutorial.nwb\")\n", - "nwb_file_name = \"montague20200802_tutorial_.nwb\"\n" + "nwb_file_name = \"montague20200802_tutorial_.nwb\"" ] }, { @@ -178,7 +179,7 @@ "outputs": [], "source": [ "# answer 'yes' when prompted\n", - "sgss.SortGroup().set_group_by_shank(nwb_file_name)\n" + "sgss.SortGroup().set_group_by_shank(nwb_file_name)" ] }, { @@ -194,7 +195,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgss.SortGroup.SortGroupElectrode & {\"nwb_file_name\": nwb_file_name}\n" + "sgss.SortGroup.SortGroupElectrode & {\"nwb_file_name\": nwb_file_name}" ] }, { @@ -215,7 +216,7 @@ }, "outputs": [], "source": [ - "sgc.IntervalList & {\"nwb_file_name\": nwb_file_name}\n" + "sgc.IntervalList & {\"nwb_file_name\": nwb_file_name}" ] }, { @@ -238,7 +239,7 @@ ").fetch1(\"valid_times\")\n", "print(\n", " f\"IntervalList begins as a {np.round((interval_list[0][1] - interval_list[0][0]) / 60,0):g} min long epoch\"\n", - ")\n" + ")" ] }, { @@ -258,7 +259,7 @@ "sort_interval = interval_list[0]\n", "sort_interval_name = interval_list_name + \"_first180\"\n", "sort_interval = np.copy(interval_list[0])\n", - "sort_interval[1] = sort_interval[0] + 180\n" + "sort_interval[1] = sort_interval[0] + 180" ] }, { @@ -281,7 +282,7 @@ " \"sort_interval\": sort_interval,\n", " },\n", " skip_duplicates=True,\n", - ")\n" + ")" ] }, { @@ -300,7 +301,7 @@ "sgss.SortInterval & {\n", " \"nwb_file_name\": nwb_file_name,\n", " \"sort_interval_name\": sort_interval_name,\n", - "}\n" + "}" ] }, { @@ -323,7 +324,7 @@ "print(\n", " f\"The sort interval goes from {fetched_sort_interval[0]} to {fetched_sort_interval[1]}, \\\n", "which is {(fetched_sort_interval[1] - fetched_sort_interval[0])} seconds. COOL!\"\n", - ")\n" + ")" ] }, { @@ -340,7 +341,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgss.SpikeSortingPreprocessingParameters()\n" + "sgss.SpikeSortingPreprocessingParameters()" ] }, { @@ -358,9 +359,10 @@ "source": [ "sgss.SpikeSortingPreprocessingParameters().insert_default()\n", "preproc_params = (\n", - " sgss.SpikeSortingPreprocessingParameters() & {\"preproc_params_name\": \"default\"}\n", + " sgss.SpikeSortingPreprocessingParameters()\n", + " & {\"preproc_params_name\": \"default\"}\n", ").fetch1(\"preproc_params\")\n", - "print(preproc_params)\n" + "print(preproc_params)" ] }, { @@ -383,7 +385,7 @@ " \"preproc_params\": preproc_params,\n", " },\n", " skip_duplicates=True,\n", - ")\n" + ")" ] }, { @@ -408,7 +410,7 @@ "key[\"interval_list_name\"] = \"02_r1\"\n", "key[\"team_name\"] = \"LorenLab\"\n", "\n", - "ssr_key = key\n" + "ssr_key = key" ] }, { @@ -426,7 +428,7 @@ "outputs": [], "source": [ "sgss.SpikeSortingRecordingSelection.insert1(ssr_key, skip_duplicates=True)\n", - "sgss.SpikeSortingRecordingSelection() & ssr_key\n" + "sgss.SpikeSortingRecordingSelection() & ssr_key" ] }, { @@ -445,7 +447,7 @@ "source": [ "sgss.SpikeSortingRecording.populate(\n", " [(sgss.SpikeSortingRecordingSelection & ssr_key).proj()]\n", - ")\n" + ")" ] }, { @@ -461,7 +463,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgss.SpikeSortingRecording() & ssr_key\n" + "sgss.SpikeSortingRecording() & ssr_key" ] }, { @@ -478,7 +480,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgss.ArtifactDetectionParameters().insert_default()\n" + "sgss.ArtifactDetectionParameters().insert_default()" ] }, { @@ -488,7 +490,7 @@ "outputs": [], "source": [ "artifact_key = (sgss.SpikeSortingRecording() & ssr_key).fetch1(\"KEY\")\n", - "artifact_key[\"artifact_params_name\"] = \"none\"\n" + "artifact_key[\"artifact_params_name\"] = \"none\"" ] }, { @@ -504,7 +506,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgss.ArtifactDetectionSelection().insert1(artifact_key)\n" + "sgss.ArtifactDetectionSelection().insert1(artifact_key)" ] }, { @@ -513,7 +515,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgss.ArtifactDetectionSelection() & artifact_key\n" + "sgss.ArtifactDetectionSelection() & artifact_key" ] }, { @@ -529,7 +531,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgss.ArtifactDetection.populate(artifact_key)\n" + "sgss.ArtifactDetection.populate(artifact_key)" ] }, { @@ -545,7 +547,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgss.ArtifactRemovedIntervalList() & artifact_key\n" + "sgss.ArtifactRemovedIntervalList() & artifact_key" ] }, { @@ -571,7 +573,7 @@ }, "outputs": [], "source": [ - "sgss.SpikeSorterParameters().insert_default()\n" + "sgss.SpikeSorterParameters().insert_default()" ] }, { @@ -586,7 +588,7 @@ " sgss.SpikeSorterParameters\n", " & {\"sorter\": sorter_name, \"sorter_params_name\": \"default\"}\n", ").fetch1()\n", - "print(ms4_default_params)\n" + "print(ms4_default_params)" ] }, { @@ -620,9 +622,10 @@ "param_dict[\"verbose\"] = True\n", "# set clip size as number of samples for 1.33 millisecond based on the sampling rate\n", "param_dict[\"clip_size\"] = np.int(\n", - " 1.33e-3 * (sgc.Raw & {\"nwb_file_name\": nwb_file_name}).fetch1(\"sampling_rate\")\n", + " 1.33e-3\n", + " * (sgc.Raw & {\"nwb_file_name\": nwb_file_name}).fetch1(\"sampling_rate\")\n", ")\n", - "param_dict\n" + "param_dict" ] }, { @@ -640,7 +643,7 @@ }, "outputs": [], "source": [ - "parameter_set_name = \"franklab_hippocampus_tutorial\"\n" + "parameter_set_name = \"franklab_hippocampus_tutorial\"" ] }, { @@ -671,7 +674,7 @@ " sgss.SpikeSorterParameters\n", " & {\"sorter\": sorter_name, \"sorter_params_name\": parameter_set_name}\n", ").fetch1()\n", - "p\n" + "p" ] }, { @@ -695,7 +698,7 @@ "ss_key[\"sorter_params_name\"] = parameter_set_name\n", "del ss_key[\"artifact_params_name\"]\n", "sgss.SpikeSortingSelection.insert1(ss_key, skip_duplicates=True)\n", - "(sgss.SpikeSortingSelection & ss_key)\n" + "(sgss.SpikeSortingSelection & ss_key)" ] }, { @@ -714,7 +717,7 @@ }, "outputs": [], "source": [ - "sgss.SpikeSorting.populate([(sgss.SpikeSortingSelection & ss_key).proj()])\n" + "sgss.SpikeSorting.populate([(sgss.SpikeSortingSelection & ss_key).proj()])" ] }, { @@ -730,7 +733,7 @@ "metadata": {}, "outputs": [], "source": [ - "sgss.SpikeSorting() & ss_key\n" + "sgss.SpikeSorting() & ss_key" ] }, { diff --git a/notebooks/02_curation.ipynb b/notebooks/02_curation.ipynb index f2a47434c..d3984409b 100644 --- a/notebooks/02_curation.ipynb +++ b/notebooks/02_curation.ipynb @@ -77,7 +77,7 @@ "import warnings\n", "\n", "warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n", - "warnings.simplefilter(\"ignore\", category=ResourceWarning)\n" + "warnings.simplefilter(\"ignore\", category=ResourceWarning)" ] }, { @@ -153,7 +153,7 @@ " WaveformParameters,\n", " Waveforms,\n", " WaveformSelection,\n", - ")\n" + ")" ] }, { @@ -165,7 +165,7 @@ "# Define the name of the file that you copied and renamed from previous tutorials\n", "nwb_file_name = \"beans20190718.nwb\"\n", "filename, file_extension = os.path.splitext(nwb_file_name)\n", - "nwb_file_name2 = filename + \"_\" + file_extension\n" + "nwb_file_name2 = filename + \"_\" + file_extension" ] }, { @@ -407,7 +407,7 @@ } ], "source": [ - "SpikeSorting & {\"nwb_file_name\": nwb_file_name2}\n" + "SpikeSorting & {\"nwb_file_name\": nwb_file_name2}" ] }, { @@ -433,7 +433,7 @@ "outputs": [], "source": [ "# workspace_uri = (SpikeSorting & {'nwb_file_name': nwb_file_name2}).fetch1('curation_feed_uri')\n", - "# print(f'https://sortingview.vercel.app/workspace?workspace={workspace_uri}&channel=franklab')\n" + "# print(f'https://sortingview.vercel.app/workspace?workspace={workspace_uri}&channel=franklab')" ] }, { diff --git a/notebooks/03_lfp.ipynb b/notebooks/03_lfp.ipynb index f59b2aa67..999483e7a 100644 --- a/notebooks/03_lfp.ipynb +++ b/notebooks/03_lfp.ipynb @@ -14,13 +14,14 @@ "metadata": {}, "outputs": [], "source": [ - "\n", "import pynwb\n", "import os\n", "import numpy as np\n", + "\n", "# DataJoint and DataJoint schema\n", "import datajoint as dj\n", - "dj.config['filepath_checksum_size_limit'] = 1 * 1024**2\n", + "\n", + "dj.config[\"filepath_checksum_size_limit\"] = 1 * 1024**2\n", "\n", "## We also import a bunch of tables so that we can call them easily\n", "from spyglass.common import (\n", @@ -64,14 +65,14 @@ " LFPSelection,\n", " LFP,\n", " LFPOutput,\n", - " LFPBandSelection, \n", - " LFPBand\n", + " LFPBandSelection,\n", + " LFPBand,\n", ")\n", "\n", "import warnings\n", "\n", "warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n", - "warnings.simplefilter(\"ignore\", category=ResourceWarning)\n" + "warnings.simplefilter(\"ignore\", category=ResourceWarning)" ] }, { @@ -95,8 +96,8 @@ "nwb_file_name = nwb_file_names[0]\n", "print(nwb_file_name)\n", "\n", - "# test: \n", - "nwb_file_name = 'tonks20211103_.nwb'\n" + "# test:\n", + "nwb_file_name = \"tonks20211103_.nwb\"" ] }, { @@ -115,7 +116,7 @@ "metadata": {}, "outputs": [], "source": [ - "FirFilterParameters().create_standard_filters()\n" + "FirFilterParameters().create_standard_filters()" ] }, { @@ -142,17 +143,26 @@ "metadata": {}, "outputs": [], "source": [ - "electrode_ids = (Electrode & {\"nwb_file_name\": nwb_file_name}).fetch(\"electrode_id\")\n", - "#electrode_indexes = [0, 4, 8]\n", - "#lfp_electrode_ids = electrode_ids[electrode_indexes]\n", - "lfp_electrode_ids = [28, 32, 40]\n", + "electrode_ids = (Electrode & {\"nwb_file_name\": nwb_file_name}).fetch(\n", + " \"electrode_id\"\n", + ")\n", + "# electrode_indexes = [0, 4, 8]\n", + "# lfp_electrode_ids = electrode_ids[electrode_indexes]\n", + "lfp_electrode_ids = [28, 32, 40]\n", "lfp_electrode_group_name = \"test_group\"\n", "\n", "\n", - "lfp_eg_key = {\"nwb_file_name\" : nwb_file_name, \"lfp_electrode_group_name\" : lfp_electrode_group_name}\n", - "#Delete the old test group if it exists (uncomment the line below if so) and then insert the new one\n", - "#(LFPElectrodeGroup & lfp_eg_key).delete(force_parts=True)\n", - "LFPElectrodeGroup.create_lfp_electrode_group(nwb_file_name=nwb_file_name, group_name=lfp_electrode_group_name, electrode_list=lfp_electrode_ids)\n" + "lfp_eg_key = {\n", + " \"nwb_file_name\": nwb_file_name,\n", + " \"lfp_electrode_group_name\": lfp_electrode_group_name,\n", + "}\n", + "# Delete the old test group if it exists (uncomment the line below if so) and then insert the new one\n", + "# (LFPElectrodeGroup & lfp_eg_key).delete(force_parts=True)\n", + "LFPElectrodeGroup.create_lfp_electrode_group(\n", + " nwb_file_name=nwb_file_name,\n", + " group_name=lfp_electrode_group_name,\n", + " electrode_list=lfp_electrode_ids,\n", + ")" ] }, { @@ -193,13 +203,26 @@ "source": [ "# we choose the first run period and the standard LFP filter for 30KHz data and add a new short interval for this demonstration\n", "orig_interval_list_name = \"02_r1\"\n", - "valid_times = (IntervalList & {\"nwb_file_name\" : nwb_file_name, \"interval_list_name\" : orig_interval_list_name}).fetch1(\"valid_times\")\n", - "new_valid_times = np.asarray([[valid_times[0,0], valid_times[0,0]+100]])\n", + "valid_times = (\n", + " IntervalList\n", + " & {\n", + " \"nwb_file_name\": nwb_file_name,\n", + " \"interval_list_name\": orig_interval_list_name,\n", + " }\n", + ").fetch1(\"valid_times\")\n", + "new_valid_times = np.asarray([[valid_times[0, 0], valid_times[0, 0] + 100]])\n", "interval_list_name = \"test interval\"\n", - "IntervalList.insert1({\"nwb_file_name\":nwb_file_name, \"interval_list_name\":interval_list_name, \"valid_times\":new_valid_times}, skip_duplicates=True)\n", + "IntervalList.insert1(\n", + " {\n", + " \"nwb_file_name\": nwb_file_name,\n", + " \"interval_list_name\": interval_list_name,\n", + " \"valid_times\": new_valid_times,\n", + " },\n", + " skip_duplicates=True,\n", + ")\n", "\n", "filter_name = \"LFP 0-400 Hz\"\n", - "filter_sampling_rate = 30000\n" + "filter_sampling_rate = 30000" ] }, { @@ -270,7 +293,7 @@ " \"bandpass\",\n", " [4, 5, 11, 12],\n", " \"theta filter for 1 Khz data\",\n", - ")\n" + ")" ] }, { @@ -289,10 +312,8 @@ }, "outputs": [], "source": [ - "\n", - "\n", "# assume that we've filtered these electrodes; change this if not\n", - "lfp_band_electrode_ids = [28, 32]\n", + "lfp_band_electrode_ids = [28, 32]\n", "\n", "# set the interval list name for this band; here we use the same interval as above\n", "interval_list_name = \"test interval\"\n", @@ -322,9 +343,11 @@ " filter_name=filter_name,\n", " interval_list_name=interval_list_name,\n", " reference_electrode_list=ref_elect,\n", - " lfp_band_sampling_rate=lfp_band_sampling_rate\n", + " lfp_band_sampling_rate=lfp_band_sampling_rate,\n", ")\n", - "lfp_b_key = (LFPBandSelection & {\"lfp_id\": lfp_id, \"filter_name\" : filter_name}).fetch1(\"KEY\")" + "lfp_b_key = (\n", + " LFPBandSelection & {\"lfp_id\": lfp_id, \"filter_name\": filter_name}\n", + ").fetch1(\"KEY\")" ] }, { @@ -343,7 +366,7 @@ }, "outputs": [], "source": [ - "(LFPBandSelection() & {\"nwb_file_name\": nwb_file_name})\n" + "(LFPBandSelection() & {\"nwb_file_name\": nwb_file_name})" ] }, { @@ -355,7 +378,7 @@ "outputs": [], "source": [ "LFPBand().populate(LFPBandSelection() & {\"nwb_file_name\": nwb_file_name})\n", - "LFPBand()\n" + "LFPBand()" ] }, { @@ -374,7 +397,7 @@ "outputs": [], "source": [ "import matplotlib.pyplot as plt\n", - "import numpy as np\n" + "import numpy as np" ] }, { @@ -392,10 +415,10 @@ "lfp_elect_indeces = get_electrode_indices(lfp_eseries, lfp_band_electrode_ids)\n", "lfp_timestamps = np.asarray(lfp_eseries.timestamps)\n", "\n", - "lfp_band_eseries = (LFPBand() & lfp_b_key).fetch_nwb()[0][\n", - " \"filtered_data\"\n", - "]\n", - "lfp_band_elect_indeces = get_electrode_indices(lfp_band_eseries, lfp_band_electrode_ids)\n", + "lfp_band_eseries = (LFPBand() & lfp_b_key).fetch_nwb()[0][\"filtered_data\"]\n", + "lfp_band_elect_indeces = get_electrode_indices(\n", + " lfp_band_eseries, lfp_band_electrode_ids\n", + ")\n", "lfp_band_timestamps = np.asarray(lfp_band_eseries.timestamps)" ] }, @@ -406,10 +429,10 @@ "outputs": [], "source": [ "# get a list of times for the first run epoch and then select a 2 second interval 100 seconds from the beginning\n", - "#run1times = (\n", + "# run1times = (\n", "# IntervalList & {\"nwb_file_name\": nwb_file_name, \"interval_list_name\": \"02_r1\"}\n", - "#).fetch1(\"valid_times\")\n", - "plottimes = [new_valid_times[0][0] + 10, new_valid_times[0][0] + 12]\n" + "# ).fetch1(\"valid_times\")\n", + "plottimes = [new_valid_times[0][0] + 10, new_valid_times[0][0] + 12]" ] }, { @@ -426,16 +449,14 @@ ")[0]\n", "\n", "lfp_time_ind = np.where(\n", - " np.logical_and(\n", - " lfp_timestamps > plottimes[0], lfp_timestamps < plottimes[1]\n", - " )\n", + " np.logical_and(lfp_timestamps > plottimes[0], lfp_timestamps < plottimes[1])\n", ")[0]\n", "lfp_band_time_ind = np.where(\n", " np.logical_and(\n", " lfp_band_timestamps > plottimes[0],\n", " lfp_band_timestamps < plottimes[1],\n", " )\n", - ")[0]\n" + ")[0]" ] }, { @@ -462,7 +483,7 @@ "plt.xlabel(\"Time (sec)\")\n", "plt.ylabel(\"Amplitude (AD units)\")\n", "\n", - "plt.show()\n" + "plt.show()" ] }, { diff --git a/notebooks/04_Trodes_position.ipynb b/notebooks/04_Trodes_position.ipynb index 80e0c4cb9..b0e24468a 100644 --- a/notebooks/04_Trodes_position.ipynb +++ b/notebooks/04_Trodes_position.ipynb @@ -42,7 +42,7 @@ }, "outputs": [], "source": [ - "dj.config['filepath_checksum_size_limit'] = 1 * 1024**3\n", + "dj.config[\"filepath_checksum_size_limit\"] = 1 * 1024**3\n", "dj.config.save_global()" ] }, @@ -190,7 +190,7 @@ "source": [ "nwb_file_name = \"chimi20200216_new.nwb\"\n", "nwb_copy_file_name = sgc.nwb_helper_fn.get_nwb_copy_filename(nwb_file_name)\n", - "sgc.RawPosition() & {\"nwb_file_name\":nwb_copy_file_name}" + "sgc.RawPosition() & {\"nwb_file_name\": nwb_copy_file_name}" ] }, { @@ -422,8 +422,11 @@ "source": [ "interval_list_name = \"pos 1 valid times\"\n", "raw_position_df = (\n", - " sgc.RawPosition() & {\"nwb_file_name\": nwb_copy_file_name,\n", - " \"interval_list_name\": interval_list_name}\n", + " sgc.RawPosition()\n", + " & {\n", + " \"nwb_file_name\": nwb_copy_file_name,\n", + " \"interval_list_name\": interval_list_name,\n", + " }\n", ").fetch1_dataframe()\n", "raw_position_df" ] @@ -475,7 +478,7 @@ "ax.plot(raw_position_df.xloc2, raw_position_df.yloc2, color=\"red\")\n", "ax.set_xlabel(\"x-position [pixels]\", fontsize=18)\n", "ax.set_ylabel(\"y-position [pixels]\", fontsize=18)\n", - "ax.set_title(\"Raw Position\", fontsize=28)\n" + "ax.set_title(\"Raw Position\", fontsize=28)" ] }, { @@ -506,7 +509,7 @@ " \"trodes_pos_params_name\": \"default\",\n", " },\n", " skip_duplicates=True,\n", - ")\n" + ")" ] }, { @@ -529,11 +532,14 @@ "outputs": [], "source": [ "sgp.TrodesPosSelection()\n", - "trodes_key =(sgp.TrodesPosSelection() & {\n", + "trodes_key = (\n", + " sgp.TrodesPosSelection()\n", + " & {\n", " \"nwb_file_name\": nwb_copy_file_name,\n", " \"interval_list_name\": \"pos 1 valid times\",\n", " \"trodes_pos_params_name\": \"default\",\n", - " }).fetch1(\"KEY\")" + " }\n", + ").fetch1(\"KEY\")" ] }, { @@ -595,7 +601,7 @@ } ], "source": [ - "sgp.TrodesPos.populate(trodes_key)\n" + "sgp.TrodesPos.populate(trodes_key)" ] }, { @@ -938,7 +944,7 @@ " \"position_info_param_name\": \"default\",\n", " }\n", ").fetch1_dataframe()\n", - "position_info\n" + "position_info" ] }, { @@ -980,7 +986,7 @@ } ], "source": [ - "position_info.index\n" + "position_info.index" ] }, { @@ -1034,7 +1040,7 @@ "ax.plot(position_info.head_position_x, position_info.head_position_y)\n", "ax.set_xlabel(\"x-position [cm]\", fontsize=18)\n", "ax.set_ylabel(\"y-position [cm]\", fontsize=18)\n", - "ax.set_title(\"Head Position\", fontsize=28)\n" + "ax.set_title(\"Head Position\", fontsize=28)" ] }, { @@ -1075,7 +1081,7 @@ "ax.plot(position_info.head_velocity_x, position_info.head_velocity_y)\n", "ax.set_xlabel(\"x-velocity [cm/s]\", fontsize=18)\n", "ax.set_ylabel(\"y-velocity [cm/s]\", fontsize=18)\n", - "ax.set_title(\"Head Velocity\", fontsize=28)\n" + "ax.set_title(\"Head Velocity\", fontsize=28)" ] }, { @@ -1117,7 +1123,7 @@ "ax.set_xlabel(\"Time\", fontsize=18)\n", "ax.set_ylabel(\"Speed [cm/s]\", fontsize=18)\n", "ax.set_title(\"Head Speed\", fontsize=28)\n", - "ax.set_xlim((position_info.index.min(), position_info.index.max()))\n" + "ax.set_xlim((position_info.index.min(), position_info.index.max()))" ] }, { @@ -1180,7 +1186,7 @@ " \"interval_list_name\": \"pos 1 valid times\",\n", " \"position_info_param_name\": \"default\",\n", " }\n", - ")\n" + ")" ] }, { @@ -1350,7 +1356,7 @@ " skip_duplicates=True,\n", ")\n", "\n", - "PositionInfoParameters()\n" + "PositionInfoParameters()" ] }, { @@ -1493,7 +1499,7 @@ " skip_duplicates=True,\n", ")\n", "\n", - "IntervalPositionInfoSelection()\n" + "IntervalPositionInfoSelection()" ] }, { @@ -1507,7 +1513,7 @@ }, "outputs": [], "source": [ - "IntervalPositionInfo.populate()\n" + "IntervalPositionInfo.populate()" ] }, { @@ -1717,7 +1723,7 @@ " }\n", ").fetch1_dataframe()\n", "\n", - "upsampled_position_info\n" + "upsampled_position_info" ] }, { @@ -1763,11 +1769,12 @@ "axes[0].set_title(\"Head Position\", fontsize=28)\n", "\n", "axes[1].plot(\n", - " upsampled_position_info.head_position_x, upsampled_position_info.head_position_y\n", + " upsampled_position_info.head_position_x,\n", + " upsampled_position_info.head_position_y,\n", ")\n", "axes[1].set_xlabel(\"x-position [cm]\", fontsize=18)\n", "axes[1].set_ylabel(\"y-position [cm]\", fontsize=18)\n", - "axes[1].set_title(\"Upsampled Head Position\", fontsize=28)\n" + "axes[1].set_title(\"Upsampled Head Position\", fontsize=28)" ] }, { @@ -1816,7 +1823,7 @@ "axes[1].plot(upsampled_position_info.index, upsampled_position_info.head_speed)\n", "axes[1].set_xlabel(\"Time\", fontsize=18)\n", "axes[1].set_ylabel(\"Speed [cm/s]\", fontsize=18)\n", - "axes[1].set_title(\"Upsampled Head Speed\", fontsize=28)\n" + "axes[1].set_title(\"Upsampled Head Speed\", fontsize=28)" ] }, { @@ -1862,11 +1869,12 @@ "axes[0].set_title(\"Head Velocity\", fontsize=28)\n", "\n", "axes[1].plot(\n", - " upsampled_position_info.head_velocity_x, upsampled_position_info.head_velocity_y\n", + " upsampled_position_info.head_velocity_x,\n", + " upsampled_position_info.head_velocity_y,\n", ")\n", "axes[1].set_xlabel(\"x-velocity [cm/s]\", fontsize=18)\n", "axes[1].set_ylabel(\"y-velocity [cm/s]\", fontsize=18)\n", - "axes[1].set_title(\"Upsampled Head Velocity\", fontsize=28)\n" + "axes[1].set_title(\"Upsampled Head Velocity\", fontsize=28)" ] }, { diff --git a/notebooks/05_DLC_from_scratch.ipynb b/notebooks/05_DLC_from_scratch.ipynb index 9c339ca3b..23b85958a 100644 --- a/notebooks/05_DLC_from_scratch.ipynb +++ b/notebooks/05_DLC_from_scratch.ipynb @@ -232,8 +232,10 @@ "metadata": {}, "outputs": [], "source": [ - "video_list = [{'nwb_file_name': 'J1620210529_.nwb', \"epoch\": 2},\n", - " {'nwb_file_name': 'peanut20201103_.nwb', \"epoch\": 4},]" + "video_list = [\n", + " {\"nwb_file_name\": \"J1620210529_.nwb\", \"epoch\": 2},\n", + " {\"nwb_file_name\": \"peanut20201103_.nwb\", \"epoch\": 4},\n", + "]" ] }, { @@ -264,7 +266,7 @@ "team_name = \"LorenLab\"\n", "project_name = \"tutorial_scratch_DG\"\n", "frames_per_video = 100\n", - "bodyparts = ['redLED_C', 'greenLED', 'redLED_L', 'redLED_R', 'tailBase']\n", + "bodyparts = [\"redLED_C\", \"greenLED\", \"redLED_L\", \"redLED_R\", \"tailBase\"]\n", "project_key = sgp.DLCProject.insert_new_project(\n", " project_name=project_name,\n", " bodyparts=bodyparts,\n", @@ -397,7 +399,7 @@ "metadata": {}, "outputs": [], "source": [ - "gputouse = 1 ## 1-9" + "gputouse = 1 ## 1-9" ] }, { @@ -429,14 +431,17 @@ ], "source": [ "training_params_name = \"tutorial\"\n", - "sgp.DLCModelTrainingParams.insert_new_params(paramset_name=training_params_name,\n", - " params={'trainingsetindex': 0,\n", - " 'shuffle': 1,\n", - " 'gputouse': gputouse,\n", - " \"net_type\": 'resnet_50',\n", - " 'augmenter_type': 'imgaug',\n", - " },\n", - " skip_duplicates=True)" + "sgp.DLCModelTrainingParams.insert_new_params(\n", + " paramset_name=training_params_name,\n", + " params={\n", + " \"trainingsetindex\": 0,\n", + " \"shuffle\": 1,\n", + " \"gputouse\": gputouse,\n", + " \"net_type\": \"resnet_50\",\n", + " \"augmenter_type\": \"imgaug\",\n", + " },\n", + " skip_duplicates=True,\n", + ")" ] }, { @@ -536,12 +541,21 @@ } ], "source": [ - "sgp.DLCModelTrainingSelection().insert1({**project_key,\n", - " \"dlc_training_params_name\": training_params_name,\n", - " \"training_id\": 0,\n", - " \"model_prefix\": '',})\n", - "model_training_key = (sgp.DLCModelTrainingSelection & {**project_key,\n", - " \"dlc_training_params_name\":training_params_name,}).fetch1(\"KEY\")\n", + "sgp.DLCModelTrainingSelection().insert1(\n", + " {\n", + " **project_key,\n", + " \"dlc_training_params_name\": training_params_name,\n", + " \"training_id\": 0,\n", + " \"model_prefix\": \"\",\n", + " }\n", + ")\n", + "model_training_key = (\n", + " sgp.DLCModelTrainingSelection\n", + " & {\n", + " **project_key,\n", + " \"dlc_training_params_name\": training_params_name,\n", + " }\n", + ").fetch1(\"KEY\")\n", "sgp.DLCModelTraining.populate(model_training_key)" ] }, @@ -701,7 +715,7 @@ }, "outputs": [], "source": [ - "nwb_file_name = 'J1620210604_.nwb'\n", + "nwb_file_name = \"J1620210604_.nwb\"\n", "epoch = 14" ] }, @@ -716,8 +730,7 @@ }, "outputs": [], "source": [ - "sgc.VideoFile() & {\"nwb_file_name\": nwb_file_name,\n", - " \"epoch\": epoch}" + "sgc.VideoFile() & {\"nwb_file_name\": nwb_file_name, \"epoch\": epoch}" ] }, { @@ -745,10 +758,10 @@ " \"nwb_file_name\": nwb_file_name,\n", " \"epoch\": epoch,\n", " \"video_file_num\": 0,\n", - " **model_key\n", + " **model_key,\n", " },\n", " task_mode=\"trigger\",\n", - " params={\"gputouse\": gputouse, \"videotype\": \"mp4\"}\n", + " params={\"gputouse\": gputouse, \"videotype\": \"mp4\"},\n", ")" ] }, @@ -876,7 +889,7 @@ "source": [ "si_key = pose_estimation_key.copy()\n", "fields = list(sgp.DLCSmoothInterpSelection.fetch().dtype.fields.keys())\n", - "si_key = {key: val for key,val in si_key.items() if key in fields}\n", + "si_key = {key: val for key, val in si_key.items() if key in fields}\n", "si_key" ] }, @@ -900,7 +913,7 @@ }, "outputs": [], "source": [ - "print((sgp.DLCPoseEstimation.BodyPart & pose_estimation_key).fetch('bodypart'))" + "print((sgp.DLCPoseEstimation.BodyPart & pose_estimation_key).fetch(\"bodypart\"))" ] }, { @@ -947,17 +960,18 @@ }, "outputs": [], "source": [ - "bodyparts = ['greenLED', 'redLED_C']\n", + "bodyparts = [\"greenLED\", \"redLED_C\"]\n", "sgp.DLCSmoothInterpSelection.insert(\n", " [\n", " {\n", " **si_key,\n", - " 'bodypart': bodypart,\n", - " 'dlc_si_params_name': si_params_name,\n", + " \"bodypart\": bodypart,\n", + " \"dlc_si_params_name\": si_params_name,\n", " }\n", " for bodypart in bodyparts\n", " ],\n", - " skip_duplicates=True)" + " skip_duplicates=True,\n", + ")" ] }, { @@ -1023,11 +1037,9 @@ }, "outputs": [], "source": [ - "(sgp.DLCSmoothInterp() & {**si_key,'bodypart': bodyparts[0]}).fetch1_dataframe().plot.scatter(\n", - " x='x',\n", - " y='y',\n", - " s=1,\n", - " figsize=(5,5))" + "(\n", + " sgp.DLCSmoothInterp() & {**si_key, \"bodypart\": bodyparts[0]}\n", + ").fetch1_dataframe().plot.scatter(x=\"x\", y=\"y\", s=1, figsize=(5, 5))" ] }, { @@ -1072,8 +1084,10 @@ "if \"dlc_si_params_name\" in cohort_key:\n", " del cohort_key[\"dlc_si_params_name\"]\n", "cohort_key[\"dlc_si_cohort_selection_name\"] = \"green_red_led\"\n", - "cohort_key[\"bodyparts_params_dict\"] = {\"greenLED\": si_params_name,\n", - " \"redLED_C\": si_params_name,}\n", + "cohort_key[\"bodyparts_params_dict\"] = {\n", + " \"greenLED\": si_params_name,\n", + " \"redLED_C\": si_params_name,\n", + "}\n", "print(cohort_key)" ] }, @@ -1196,8 +1210,8 @@ "source": [ "centroid_key = cohort_key.copy()\n", "fields = list(sgp.DLCCentroidSelection.fetch().dtype.fields.keys())\n", - "centroid_key = {key: val for key,val in centroid_key.items() if key in fields}\n", - "centroid_key['dlc_centroid_params_name'] = centroid_params_name\n", + "centroid_key = {key: val for key, val in centroid_key.items() if key in fields}\n", + "centroid_key[\"dlc_centroid_params_name\"] = centroid_params_name\n", "print(centroid_key)" ] }, @@ -1244,13 +1258,14 @@ "outputs": [], "source": [ "(sgp.DLCCentroid() & centroid_key).fetch1_dataframe().plot.scatter(\n", - " x='position_x',\n", - " y='position_y',\n", - " c='speed',\n", - " colormap='viridis',\n", + " x=\"position_x\",\n", + " y=\"position_y\",\n", + " c=\"speed\",\n", + " colormap=\"viridis\",\n", " alpha=0.5,\n", " s=0.5,\n", - " figsize=(10,10))" + " figsize=(10, 10),\n", + ")" ] }, { @@ -1304,7 +1319,7 @@ "outputs": [], "source": [ "fields = list(sgp.DLCOrientationSelection.fetch().dtype.fields.keys())\n", - "orient_key = {key: val for key,val in cohort_key.items() if key in fields}\n", + "orient_key = {key: val for key, val in cohort_key.items() if key in fields}\n", "orient_key[\"dlc_orientation_params_name\"] = dlc_orientation_params_name\n", "print(orient_key)" ] @@ -1385,10 +1400,14 @@ "outputs": [], "source": [ "fields = list(sgp.DLCPos.fetch().dtype.fields.keys())\n", - "dlc_key = {key: val for key,val in centroid_key.items() if key in fields}\n", + "dlc_key = {key: val for key, val in centroid_key.items() if key in fields}\n", "dlc_key[\"dlc_si_cohort_centroid\"] = centroid_key[\"dlc_si_cohort_selection_name\"]\n", - "dlc_key[\"dlc_si_cohort_orientation\"] = orient_key[\"dlc_si_cohort_selection_name\"]\n", - "dlc_key[\"dlc_orientation_params_name\"] = orient_key[\"dlc_orientation_params_name\"]\n", + "dlc_key[\"dlc_si_cohort_orientation\"] = orient_key[\n", + " \"dlc_si_cohort_selection_name\"\n", + "]\n", + "dlc_key[\"dlc_orientation_params_name\"] = orient_key[\n", + " \"dlc_orientation_params_name\"\n", + "]\n", "print(dlc_key)" ] }, @@ -1507,7 +1526,8 @@ "}\n", "sgp.DLCPosVideoParams.insert1(\n", " {\"dlc_pos_video_params_name\": \"five_percent\", \"params\": params},\n", - " skip_duplicates=True)" + " skip_duplicates=True,\n", + ")" ] }, { @@ -1522,11 +1542,9 @@ "outputs": [], "source": [ "sgp.DLCPosVideoSelection.insert1(\n", - " {\n", - " **dlc_key,\n", - " \"dlc_pos_video_params_name\": \"five_percent\"\n", - " },\n", - " skip_duplicates=True)" + " {**dlc_key, \"dlc_pos_video_params_name\": \"five_percent\"},\n", + " skip_duplicates=True,\n", + ")" ] }, { @@ -1634,12 +1652,12 @@ "source": [ "sgp.PositionVideoSelection().insert1(\n", " {\n", - " 'nwb_file_name': 'J1620210604_.nwb',\n", - " 'interval_list_name': 'pos 13 valid times',\n", - " 'trodes_position_id': 0,\n", - " 'dlc_position_id': 1,\n", - " 'plot': 'DLC',\n", - " 'output_dir': '/home/dgramling/Src/'\n", + " \"nwb_file_name\": \"J1620210604_.nwb\",\n", + " \"interval_list_name\": \"pos 13 valid times\",\n", + " \"trodes_position_id\": 0,\n", + " \"dlc_position_id\": 1,\n", + " \"plot\": \"DLC\",\n", + " \"output_dir\": \"/home/dgramling/Src/\",\n", " }\n", ")" ] @@ -1655,7 +1673,7 @@ }, "outputs": [], "source": [ - "sgp.PositionVideo.populate({'plot': 'DLC'})" + "sgp.PositionVideo.populate({\"plot\": \"DLC\"})" ] }, { diff --git a/notebooks/06_DLC_from_dir.ipynb b/notebooks/06_DLC_from_dir.ipynb index fcf3979f4..bcfd472f1 100644 --- a/notebooks/06_DLC_from_dir.ipynb +++ b/notebooks/06_DLC_from_dir.ipynb @@ -119,10 +119,11 @@ "project_key = sgp.DLCProject.insert_existing_project(\n", " project_name=project_name,\n", " lab_team=lab_team,\n", - " config_path='/nimbus/deeplabcut/projects/tutorial_model-LorenLab-2022-07-15/config.yaml',\n", - " bodyparts=['redLED_C', 'greenLED', 'redLED_L', 'redLED_R', 'tailBase'],\n", + " config_path=\"/nimbus/deeplabcut/projects/tutorial_model-LorenLab-2022-07-15/config.yaml\",\n", + " bodyparts=[\"redLED_C\", \"greenLED\", \"redLED_L\", \"redLED_R\", \"tailBase\"],\n", " frames_per_video=200,\n", - " skip_duplicates=True)" + " skip_duplicates=True,\n", + ")" ] }, { @@ -215,9 +216,9 @@ "outputs": [], "source": [ "dlc_model_name = \"tutorial_model_DG\"\n", - "sgp.DLCModelInput().insert1({\"dlc_model_name\" : dlc_model_name,\n", - " **project_key},\n", - " skip_duplicates=True)\n", + "sgp.DLCModelInput().insert1(\n", + " {\"dlc_model_name\": dlc_model_name, **project_key}, skip_duplicates=True\n", + ")\n", "sgp.DLCModelInput()" ] }, @@ -346,10 +347,9 @@ }, "outputs": [], "source": [ - "sgp.DLCModelSelection().insert1({\n", - " **temp_model_key,\n", - " \"dlc_model_params_name\": \"default\"},\n", - " skip_duplicates=True)" + "sgp.DLCModelSelection().insert1(\n", + " {**temp_model_key, \"dlc_model_params_name\": \"default\"}, skip_duplicates=True\n", + ")" ] }, { @@ -434,7 +434,7 @@ }, "outputs": [], "source": [ - "nwb_file_name = 'J1620210529_.nwb'\n", + "nwb_file_name = \"J1620210529_.nwb\"\n", "epoch = 2" ] }, @@ -449,8 +449,7 @@ }, "outputs": [], "source": [ - "sgc.VideoFile() & {\"nwb_file_name\": nwb_file_name,\n", - " \"epoch\": epoch}" + "sgc.VideoFile() & {\"nwb_file_name\": nwb_file_name, \"epoch\": epoch}" ] }, { @@ -497,7 +496,7 @@ }, "outputs": [], "source": [ - "gputouse = 0## 0-9" + "gputouse = 0 ## 0-9" ] }, { @@ -676,7 +675,7 @@ "source": [ "si_key = pose_estimation_key.copy()\n", "fields = list(sgp.DLCSmoothInterpSelection.fetch().dtype.fields.keys())\n", - "si_key = {key: val for key,val in si_key.items() if key in fields}\n", + "si_key = {key: val for key, val in si_key.items() if key in fields}\n", "si_key" ] }, @@ -700,7 +699,7 @@ }, "outputs": [], "source": [ - "print((sgp.DLCPoseEstimation.BodyPart & pose_estimation_key).fetch('bodypart'))" + "print((sgp.DLCPoseEstimation.BodyPart & pose_estimation_key).fetch(\"bodypart\"))" ] }, { @@ -747,17 +746,18 @@ }, "outputs": [], "source": [ - "bodyparts = ['greenLED', 'redLED_C']\n", + "bodyparts = [\"greenLED\", \"redLED_C\"]\n", "sgp.DLCSmoothInterpSelection.insert(\n", " [\n", " {\n", " **si_key,\n", - " 'bodypart': bodypart,\n", - " 'dlc_si_params_name': si_params_name,\n", + " \"bodypart\": bodypart,\n", + " \"dlc_si_params_name\": si_params_name,\n", " }\n", " for bodypart in bodyparts\n", " ],\n", - " skip_duplicates=True)" + " skip_duplicates=True,\n", + ")" ] }, { @@ -823,11 +823,9 @@ }, "outputs": [], "source": [ - "(sgp.DLCSmoothInterp() & {**si_key,'bodypart': bodyparts[0]}).fetch1_dataframe().plot.scatter(\n", - " x='x',\n", - " y='y',\n", - " s=1,\n", - " figsize=(5,5))" + "(\n", + " sgp.DLCSmoothInterp() & {**si_key, \"bodypart\": bodyparts[0]}\n", + ").fetch1_dataframe().plot.scatter(x=\"x\", y=\"y\", s=1, figsize=(5, 5))" ] }, { @@ -872,8 +870,10 @@ "if \"dlc_si_params_name\" in cohort_key:\n", " del cohort_key[\"dlc_si_params_name\"]\n", "cohort_key[\"dlc_si_cohort_selection_name\"] = \"green_red_led\"\n", - "cohort_key[\"bodyparts_params_dict\"] = {\"greenLED\": si_params_name,\n", - " \"redLED_C\": si_params_name,}\n", + "cohort_key[\"bodyparts_params_dict\"] = {\n", + " \"greenLED\": si_params_name,\n", + " \"redLED_C\": si_params_name,\n", + "}\n", "print(cohort_key)" ] }, @@ -996,8 +996,8 @@ "source": [ "centroid_key = cohort_key.copy()\n", "fields = list(sgp.DLCCentroidSelection.fetch().dtype.fields.keys())\n", - "centroid_key = {key: val for key,val in centroid_key.items() if key in fields}\n", - "centroid_key['dlc_centroid_params_name'] = centroid_params_name\n", + "centroid_key = {key: val for key, val in centroid_key.items() if key in fields}\n", + "centroid_key[\"dlc_centroid_params_name\"] = centroid_params_name\n", "print(centroid_key)" ] }, @@ -1044,13 +1044,14 @@ "outputs": [], "source": [ "(sgp.DLCCentroid() & centroid_key).fetch1_dataframe().plot.scatter(\n", - " x='position_x',\n", - " y='position_y',\n", - " c='speed',\n", - " colormap='viridis',\n", + " x=\"position_x\",\n", + " y=\"position_y\",\n", + " c=\"speed\",\n", + " colormap=\"viridis\",\n", " alpha=0.5,\n", " s=0.5,\n", - " figsize=(10,10))" + " figsize=(10, 10),\n", + ")" ] }, { @@ -1104,7 +1105,7 @@ "outputs": [], "source": [ "fields = list(sgp.DLCOrientationSelection.fetch().dtype.fields.keys())\n", - "orient_key = {key: val for key,val in cohort_key.items() if key in fields}\n", + "orient_key = {key: val for key, val in cohort_key.items() if key in fields}\n", "orient_key[\"dlc_orientation_params_name\"] = dlc_orientation_params_name\n", "print(orient_key)" ] @@ -1185,10 +1186,14 @@ "outputs": [], "source": [ "fields = list(sgp.DLCPosV1.fetch().dtype.fields.keys())\n", - "dlc_key = {key: val for key,val in centroid_key.items() if key in fields}\n", + "dlc_key = {key: val for key, val in centroid_key.items() if key in fields}\n", "dlc_key[\"dlc_si_cohort_centroid\"] = centroid_key[\"dlc_si_cohort_selection_name\"]\n", - "dlc_key[\"dlc_si_cohort_orientation\"] = orient_key[\"dlc_si_cohort_selection_name\"]\n", - "dlc_key[\"dlc_orientation_params_name\"] = orient_key[\"dlc_orientation_params_name\"]\n", + "dlc_key[\"dlc_si_cohort_orientation\"] = orient_key[\n", + " \"dlc_si_cohort_selection_name\"\n", + "]\n", + "dlc_key[\"dlc_orientation_params_name\"] = orient_key[\n", + " \"dlc_orientation_params_name\"\n", + "]\n", "print(dlc_key)" ] }, @@ -1307,7 +1312,8 @@ "}\n", "sgp.DLCPosVideoParams.insert1(\n", " {\"dlc_pos_video_params_name\": \"five_percent\", \"params\": params},\n", - " skip_duplicates=True)" + " skip_duplicates=True,\n", + ")" ] }, { @@ -1322,11 +1328,9 @@ "outputs": [], "source": [ "sgp.DLCPosVideoSelection.insert1(\n", - " {\n", - " **dlc_key,\n", - " \"dlc_pos_video_params_name\": \"five_percent\"\n", - " },\n", - " skip_duplicates=True)" + " {**dlc_key, \"dlc_pos_video_params_name\": \"five_percent\"},\n", + " skip_duplicates=True,\n", + ")" ] }, { @@ -1424,12 +1428,12 @@ "source": [ "sgp.PositionVideoSelection().insert1(\n", " {\n", - " 'nwb_file_name': 'J1620210604_.nwb',\n", - " 'interval_list_name': 'pos 13 valid times',\n", - " 'trodes_position_id': 0,\n", - " 'dlc_position_id': 1,\n", - " 'plot': 'DLC',\n", - " 'output_dir': '/home/dgramling/Src/'\n", + " \"nwb_file_name\": \"J1620210604_.nwb\",\n", + " \"interval_list_name\": \"pos 13 valid times\",\n", + " \"trodes_position_id\": 0,\n", + " \"dlc_position_id\": 1,\n", + " \"plot\": \"DLC\",\n", + " \"output_dir\": \"/home/dgramling/Src/\",\n", " }\n", ")" ] @@ -1445,7 +1449,7 @@ }, "outputs": [], "source": [ - "sgp.PositionVideo.populate({'plot': 'DLC'})" + "sgp.PositionVideo.populate({\"plot\": \"DLC\"})" ] }, { diff --git a/notebooks/07_linearization.ipynb b/notebooks/07_linearization.ipynb index 420b87da0..a227ac33b 100644 --- a/notebooks/07_linearization.ipynb +++ b/notebooks/07_linearization.ipynb @@ -104,7 +104,7 @@ "\n", "nwb_file_name = \"chimi20200216_new.nwb\"\n", "nwb_copy_file_name = get_nwb_copy_filename(nwb_file_name)\n", - "nwb_copy_file_name\n" + "nwb_copy_file_name" ] }, { @@ -324,7 +324,7 @@ " \"position_info_param_name\": \"default\",\n", " }\n", ").fetch1_dataframe()\n", - "position_info\n" + "position_info" ] }, { @@ -368,10 +368,14 @@ "import matplotlib.pyplot as plt\n", "\n", "fig, ax = plt.subplots(1, 1, figsize=(10, 10))\n", - "ax.plot(position_info.head_position_x, position_info.head_position_y, color=\"lightgrey\")\n", + "ax.plot(\n", + " position_info.head_position_x,\n", + " position_info.head_position_y,\n", + " color=\"lightgrey\",\n", + ")\n", "ax.set_xlabel(\"x-position [cm]\", fontsize=18)\n", "ax.set_ylabel(\"y-position [cm]\", fontsize=18)\n", - "ax.set_title(\"Head Position\", fontsize=28)\n" + "ax.set_title(\"Head Position\", fontsize=28)" ] }, { @@ -441,7 +445,7 @@ " (4, 5),\n", " (4, 7),\n", "]\n", - "linear_edge_spacing = 15\n" + "linear_edge_spacing = 15" ] }, { @@ -576,7 +580,7 @@ ")\n", "\n", "graph = TrackGraph() & {\"track_graph_name\": \"6 arm\"}\n", - "graph\n" + "graph" ] }, { @@ -615,7 +619,7 @@ ")\n", "ax.set_xlabel(\"x-position [cm]\", fontsize=18)\n", "ax.set_ylabel(\"y-position [cm]\", fontsize=18)\n", - "graph.plot_track_graph(ax=ax)\n" + "graph.plot_track_graph(ax=ax)" ] }, { @@ -647,7 +651,7 @@ ], "source": [ "fig, ax = plt.subplots(1, 1, figsize=(20, 1))\n", - "graph.plot_track_graph_as_1D(ax=ax)\n" + "graph.plot_track_graph_as_1D(ax=ax)" ] }, { @@ -770,7 +774,7 @@ "LinearizationParameters.insert1(\n", " {\"linearization_param_name\": \"default\"}, skip_duplicates=True\n", ")\n", - "LinearizationParameters()\n" + "LinearizationParameters()" ] }, { @@ -896,7 +900,7 @@ " skip_duplicates=True,\n", ")\n", "\n", - "IntervalLinearizationSelection()\n" + "IntervalLinearizationSelection()" ] }, { @@ -1028,7 +1032,7 @@ "\n", "\n", "IntervalLinearizedPosition().populate()\n", - "IntervalLinearizedPosition()\n" + "IntervalLinearizedPosition()" ] }, { @@ -1219,7 +1223,7 @@ " \"linearization_param_name\": \"default\",\n", " }\n", ").fetch1_dataframe()\n", - "linear_position_df\n" + "linear_position_df" ] }, { @@ -1273,7 +1277,7 @@ "\n", "ax.set_xlabel(\"Time [s]\", fontsize=18)\n", "ax.set_ylabel(\"Linear Position [cm]\", fontsize=18)\n", - "ax.set_title(\"Linear Position\", fontsize=28)\n" + "ax.set_title(\"Linear Position\", fontsize=28)" ] }, { @@ -1325,8 +1329,9 @@ "ax.set_xlabel(\"x-position [cm]\", fontsize=18)\n", "ax.set_ylabel(\"y-position [cm]\", fontsize=18)\n", "ax.plot(\n", - " linear_position_df.projected_x_position, linear_position_df.projected_y_position\n", - ")\n" + " linear_position_df.projected_x_position,\n", + " linear_position_df.projected_y_position,\n", + ")" ] }, { @@ -1380,22 +1385,26 @@ "\n", "\n", "key = {\n", - " 'nwb_file_name': nwb_copy_file_name,\n", - " 'interval_list_name': 'pos 1 valid times'\n", + " \"nwb_file_name\": nwb_copy_file_name,\n", + " \"interval_list_name\": \"pos 1 valid times\",\n", "}\n", "\n", - "epoch = int(key['interval_list_name']\n", - " .replace('pos ', '')\n", - " .replace(' valid times', '')\n", - " ) + 1\n", - "video_info = (nd.common.common_behav.VideoFile() &\n", - " {'nwb_file_name': key['nwb_file_name'],\n", - " 'epoch': epoch}).fetch1()\n", + "epoch = (\n", + " int(\n", + " key[\"interval_list_name\"]\n", + " .replace(\"pos \", \"\")\n", + " .replace(\" valid times\", \"\")\n", + " )\n", + " + 1\n", + ")\n", + "video_info = (\n", + " nd.common.common_behav.VideoFile()\n", + " & {\"nwb_file_name\": key[\"nwb_file_name\"], \"epoch\": epoch}\n", + ").fetch1()\n", "\n", - "io = pynwb.NWBHDF5IO('/stelmo/nwb/raw/' +\n", - " video_info['nwb_file_name'], 'r')\n", + "io = pynwb.NWBHDF5IO(\"/stelmo/nwb/raw/\" + video_info[\"nwb_file_name\"], \"r\")\n", "nwb_file = io.read()\n", - "nwb_video = nwb_file.objects[video_info['video_file_object_id']]\n", + "nwb_video = nwb_file.objects[video_info[\"video_file_object_id\"]]\n", "video_filename = nwb_video.external_file.value[0]\n", "\n", "fig, ax = plt.subplots(figsize=(8, 8))\n", @@ -1428,7 +1437,7 @@ } ], "source": [ - "picker.node_positions\n" + "picker.node_positions" ] }, { @@ -1449,7 +1458,7 @@ } ], "source": [ - "picker.edges\n" + "picker.edges" ] }, { diff --git a/notebooks/08_Extract_Mark_indicators.ipynb b/notebooks/08_Extract_Mark_indicators.ipynb index c5a752adf..54109bc35 100644 --- a/notebooks/08_Extract_Mark_indicators.ipynb +++ b/notebooks/08_Extract_Mark_indicators.ipynb @@ -22,7 +22,7 @@ "warnings.simplefilter(\"ignore\", category=ResourceWarning)\n", "warnings.simplefilter(\"ignore\", category=UserWarning)\n", "\n", - "nwb_copy_file_name = \"J1620210531_.nwb\"\n" + "nwb_copy_file_name = \"J1620210531_.nwb\"" ] }, { @@ -134,7 +134,7 @@ "from spyglass.decoding.clusterless import populate_mark_indicators\n", "\n", "\n", - "populate_mark_indicators?" + "?populate_mark_indicators" ] }, { @@ -172,7 +172,7 @@ "source": [ "from spyglass.spikesorting import SpikeSorting\n", "\n", - "SpikeSorting.primary_key\n" + "SpikeSorting.primary_key" ] }, { @@ -211,7 +211,7 @@ " \"sorter_params_name\": \"clusterless_fixed\",\n", " \"artifact_removed_interval_list_name\": \"J1620210531_.nwb_1_raw data valid times no premaze no home_franklab_tetrode_hippocampus_JG_DG_group_0.8_2000_8_1_artifact_removed_valid_times\",\n", " },\n", - "]\n" + "]" ] }, { @@ -350,7 +350,7 @@ "source": [ "from spyglass.spikesorting import SpikeSortingSelection\n", "\n", - "SpikeSortingSelection & spikesorting_selections\n" + "SpikeSortingSelection & spikesorting_selections" ] }, { @@ -553,7 +553,7 @@ "IntervalPositionInfo & {\n", " \"nwb_file_name\": \"J1620210531_.nwb\",\n", " \"position_info_param_name\": \"default_decoding\",\n", - "}\n" + "}" ] }, { @@ -586,7 +586,7 @@ } ], "source": [ - "populate_mark_indicators(spikesorting_selections)\n" + "populate_mark_indicators(spikesorting_selections)" ] }, { @@ -891,7 +891,7 @@ "source": [ "from spyglass.decoding import UnitMarksIndicator\n", "\n", - "UnitMarksIndicator & spikesorting_selections\n" + "UnitMarksIndicator & spikesorting_selections" ] } ], diff --git a/notebooks/09_Decoding_with_GPUs_on_the_GPU_cluster.ipynb b/notebooks/09_Decoding_with_GPUs_on_the_GPU_cluster.ipynb index 0486ce469..4458f20eb 100644 --- a/notebooks/09_Decoding_with_GPUs_on_the_GPU_cluster.ipynb +++ b/notebooks/09_Decoding_with_GPUs_on_the_GPU_cluster.ipynb @@ -120,7 +120,13 @@ "\n", "\n", "# Create simulated data\n", - "time, position, sampling_frequency, spikes, place_fields = make_simulated_run_data()\n", + "(\n", + " time,\n", + " position,\n", + " sampling_frequency,\n", + " spikes,\n", + " place_fields,\n", + ") = make_simulated_run_data()\n", "\n", "replay_time, test_spikes = make_continuous_replay()\n", "\n", @@ -155,7 +161,7 @@ " time=replay_time,\n", " state_names=state_names,\n", " use_gpu=True, # Also need to specify use of GPU for the computation of the causal and acausal posterior\n", - " )\n" + " )" ] }, { @@ -597,7 +603,7 @@ "cluster = LocalCUDACluster(CUDA_VISIBLE_DEVICES=[4, 5, 6])\n", "client = Client(cluster)\n", "\n", - "client\n" + "client" ] }, { @@ -704,7 +710,9 @@ "@dask.delayed\n", "def test_gpu(x, ind):\n", " # Create a log file for this run of the function\n", - " logger = setup_logger(name_logfile=f\"test_{ind}\", path_logfile=f\"test_{ind}.log\")\n", + " logger = setup_logger(\n", + " name_logfile=f\"test_{ind}\", path_logfile=f\"test_{ind}.log\"\n", + " )\n", "\n", " # Test to see if these go into different log files\n", " logger.info(f\"This is a test of {ind}\")\n", @@ -722,7 +730,7 @@ "results = [test_gpu(x, ind) for ind, x in enumerate(data)]\n", "\n", "# Run `dask.compute` on the results list for the code to run\n", - "dask.compute(*results)\n" + "dask.compute(*results)" ] }, { diff --git a/notebooks/10_1D_Clusterless_Decoding.ipynb b/notebooks/10_1D_Clusterless_Decoding.ipynb index 6a276a7eb..de6e3d181 100644 --- a/notebooks/10_1D_Clusterless_Decoding.ipynb +++ b/notebooks/10_1D_Clusterless_Decoding.ipynb @@ -32,7 +32,7 @@ "import warnings\n", "\n", "warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n", - "warnings.simplefilter(\"ignore\", category=ResourceWarning)\n" + "warnings.simplefilter(\"ignore\", category=ResourceWarning)" ] }, { @@ -49,7 +49,7 @@ "\n", "FORMAT = \"%(asctime)s %(message)s\"\n", "\n", - "logging.basicConfig(level=\"INFO\", format=FORMAT, datefmt=\"%d-%b-%y %H:%M:%S\")\n" + "logging.basicConfig(level=\"INFO\", format=FORMAT, datefmt=\"%d-%b-%y %H:%M:%S\")" ] }, { @@ -59,7 +59,7 @@ "metadata": {}, "outputs": [], "source": [ - "nwb_copy_file_name = \"chimi20200216_new_.nwb\"\n" + "nwb_copy_file_name = \"chimi20200216_new_.nwb\"" ] }, { @@ -597,7 +597,7 @@ " }\n", ").fetch_xarray()\n", "\n", - "marks\n" + "marks" ] }, { @@ -838,7 +838,7 @@ } ], "source": [ - "UnitMarksIndicator.plot_all_marks(marks)\n" + "UnitMarksIndicator.plot_all_marks(marks)" ] }, { @@ -1056,7 +1056,7 @@ "\n", "position_info = (IntervalPositionInfo() & position_key).fetch1_dataframe()\n", "\n", - "position_info\n" + "position_info" ] }, { @@ -1098,7 +1098,7 @@ ], "source": [ "plt.figure(figsize=(7, 6))\n", - "plt.plot(position_info.head_position_x, position_info.head_position_y)\n" + "plt.plot(position_info.head_position_x, position_info.head_position_y)" ] }, { @@ -1291,7 +1291,7 @@ " IntervalLinearizedPosition() & linearization_key\n", ").fetch1_dataframe()\n", "\n", - "linear_position_df\n" + "linear_position_df" ] }, { @@ -1337,7 +1337,7 @@ " c=linear_position_df.track_segment_id,\n", " cmap=\"tab20\",\n", " s=1,\n", - ")\n" + ")" ] }, { @@ -1383,7 +1383,7 @@ " s=1,\n", " c=linear_position_df.track_segment_id,\n", " cmap=\"tab20\",\n", - ")\n" + ")" ] }, { @@ -1412,7 +1412,7 @@ } ], "source": [ - "position_info.shape, marks.shape, linear_position_df.shape\n" + "position_info.shape, marks.shape, linear_position_df.shape" ] }, { @@ -1485,7 +1485,7 @@ " interval_list_intersect(interval, valid_ephys_times), valid_pos_times[0]\n", ")\n", "valid_time_slice = slice(intersect_interval[0][0], intersect_interval[0][1])\n", - "valid_time_slice\n" + "valid_time_slice" ] }, { @@ -1497,7 +1497,7 @@ "source": [ "linear_position_df = linear_position_df.loc[valid_time_slice]\n", "marks = marks.sel(time=valid_time_slice)\n", - "position_info = position_info.loc[valid_time_slice]\n" + "position_info = position_info.loc[valid_time_slice]" ] }, { @@ -1518,7 +1518,7 @@ } ], "source": [ - "position_info.shape, marks.shape, linear_position_df.shape\n" + "position_info.shape, marks.shape, linear_position_df.shape" ] }, { @@ -1579,7 +1579,9 @@ " & {\"classifier_param_name\": \"default_decoding_gpu\"}\n", ").fetch1()\n", "\n", - "track_graph = (TrackGraph() & {\"track_graph_name\": \"6 arm\"}).get_networkx_track_graph()\n", + "track_graph = (\n", + " TrackGraph() & {\"track_graph_name\": \"6 arm\"}\n", + ").get_networkx_track_graph()\n", "track_graph_params = (TrackGraph() & {\"track_graph_name\": \"6 arm\"}).fetch1()\n", "\n", "parameters[\"classifier_params\"][\"environments\"] = [\n", @@ -1599,7 +1601,7 @@ " \"block_size\": 2**12,\n", "}\n", "\n", - "pprint.pprint(parameters)\n" + "pprint.pprint(parameters)" ] }, { @@ -1660,14 +1662,14 @@ " classifier.fit(\n", " position=linear_position_df.linear_position.values,\n", " multiunits=marks.values,\n", - " **parameters[\"fit_params\"]\n", + " **parameters[\"fit_params\"],\n", " )\n", " results = classifier.predict(\n", " multiunits=marks.values,\n", " time=linear_position_df.index,\n", - " **parameters[\"predict_params\"]\n", + " **parameters[\"predict_params\"],\n", " )\n", - " logging.info(\"Done!\")\n" + " logging.info(\"Done!\")" ] }, { @@ -1729,7 +1731,9 @@ } ], "source": [ - "from spyglass.decoding.visualization import create_interactive_1D_decoding_figurl\n", + "from spyglass.decoding.visualization import (\n", + " create_interactive_1D_decoding_figurl,\n", + ")\n", "\n", "\n", "view = create_interactive_1D_decoding_figurl(\n", diff --git a/notebooks/11_2D_Clusterless_Decoding.ipynb b/notebooks/11_2D_Clusterless_Decoding.ipynb index 8d6b5c708..7f47e4cfa 100644 --- a/notebooks/11_2D_Clusterless_Decoding.ipynb +++ b/notebooks/11_2D_Clusterless_Decoding.ipynb @@ -30,7 +30,7 @@ "import warnings\n", "\n", "warnings.simplefilter(\"ignore\", category=DeprecationWarning)\n", - "warnings.simplefilter(\"ignore\", category=ResourceWarning)\n" + "warnings.simplefilter(\"ignore\", category=ResourceWarning)" ] }, { @@ -47,7 +47,7 @@ "\n", "FORMAT = \"%(asctime)s %(message)s\"\n", "\n", - "logging.basicConfig(level=\"INFO\", format=FORMAT, datefmt=\"%d-%b-%y %H:%M:%S\")\n" + "logging.basicConfig(level=\"INFO\", format=FORMAT, datefmt=\"%d-%b-%y %H:%M:%S\")" ] }, { @@ -57,7 +57,7 @@ "metadata": {}, "outputs": [], "source": [ - "nwb_copy_file_name = \"chimi20200216_new_.nwb\"\n" + "nwb_copy_file_name = \"chimi20200216_new_.nwb\"" ] }, { @@ -635,7 +635,7 @@ " }\n", ").fetch_xarray()\n", "\n", - "marks\n" + "marks" ] }, { @@ -670,7 +670,7 @@ " marks.isel(electrodes=0).dropna(\"time\").isel(marks=0),\n", " marks.isel(electrodes=0).dropna(\"time\").isel(marks=1),\n", " s=1,\n", - ")\n" + ")" ] }, { @@ -878,7 +878,7 @@ "\n", "position_info = (IntervalPositionInfo() & position_key).fetch1_dataframe()\n", "\n", - "position_info\n" + "position_info" ] }, { @@ -909,7 +909,7 @@ } ], "source": [ - "plt.plot(position_info.head_position_x, position_info.head_position_y)\n" + "plt.plot(position_info.head_position_x, position_info.head_position_y)" ] }, { @@ -930,7 +930,7 @@ } ], "source": [ - "position_info.shape, marks.shape\n" + "position_info.shape, marks.shape" ] }, { @@ -995,7 +995,7 @@ " interval_list_intersect(interval, valid_ephys_times), valid_pos_times[0]\n", ")\n", "valid_time_slice = slice(intersect_interval[0][0], intersect_interval[0][1])\n", - "valid_time_slice\n" + "valid_time_slice" ] }, { @@ -1063,7 +1063,9 @@ " \"disable_progress_bar\": False,\n", " \"use_diffusion\": False,\n", "}\n", - "parameters[\"classifier_params\"][\"environments\"][0] = Environment(place_bin_size=3.0)\n", + "parameters[\"classifier_params\"][\"environments\"][0] = Environment(\n", + " place_bin_size=3.0\n", + ")\n", "\n", "\n", "import cupy as cp\n", @@ -1073,14 +1075,14 @@ " classifier.fit(\n", " position=position_info[[\"head_position_x\", \"head_position_y\"]].values,\n", " multiunits=marks.values,\n", - " **parameters[\"fit_params\"]\n", + " **parameters[\"fit_params\"],\n", " )\n", " results = classifier.predict(\n", " multiunits=marks.values,\n", " time=position_info.index,\n", - " **parameters[\"predict_params\"]\n", + " **parameters[\"predict_params\"],\n", " )\n", - " logging.info(\"Done!\")\n" + " logging.info(\"Done!\")" ] }, { @@ -1122,7 +1124,7 @@ "metadata": {}, "outputs": [], "source": [ - "# view\n" + "# view" ] }, { @@ -1151,7 +1153,7 @@ } ], "source": [ - "view.url(label=\"2D Decode Example\")\n" + "view.url(label=\"2D Decode Example\")" ] } ], diff --git a/notebooks/12_Ripple_Detection.ipynb b/notebooks/12_Ripple_Detection.ipynb index c91dfa84c..6a16d6eec 100644 --- a/notebooks/12_Ripple_Detection.ipynb +++ b/notebooks/12_Ripple_Detection.ipynb @@ -29,7 +29,11 @@ } ], "source": [ - "from spyglass.common.common_ripple import RippleLFPSelection, RippleParameters, RippleTimes\n", + "from spyglass.common.common_ripple import (\n", + " RippleLFPSelection,\n", + " RippleParameters,\n", + " RippleTimes,\n", + ")\n", "from spyglass.common import IntervalPositionInfo" ] }, @@ -79,7 +83,7 @@ } ], "source": [ - "RippleLFPSelection.set_lfp_electrodes?" + "?RippleLFPSelection.set_lfp_electrodes" ] }, { @@ -105,7 +109,7 @@ "metadata": {}, "outputs": [], "source": [ - "nwb_file_name = 'chimi20200216_new_.nwb'" + "nwb_file_name = \"chimi20200216_new_.nwb\"" ] }, { @@ -694,7 +698,9 @@ "source": [ "from spyglass.common import Electrode, BrainRegion\n", "\n", - "electrodes = ((Electrode() & {'nwb_file_name': nwb_file_name}) * BrainRegion).fetch(format=\"frame\")\n", + "electrodes = (\n", + " (Electrode() & {\"nwb_file_name\": nwb_file_name}) * BrainRegion\n", + ").fetch(format=\"frame\")\n", "electrodes" ] }, @@ -1525,7 +1531,10 @@ } ], "source": [ - "electrodes.loc[(electrodes.region_name == 'Hippocampus') & (electrodes.probe_electrode == 0)]" + "electrodes.loc[\n", + " (electrodes.region_name == \"Hippocampus\")\n", + " & (electrodes.probe_electrode == 0)\n", + "]" ] }, { @@ -1554,11 +1563,14 @@ } ], "source": [ - "electrode_list = (electrodes\n", - " .loc[(electrodes.region_name == 'Hippocampus') &\n", - " (electrodes.probe_electrode == 0)]\n", - " .reset_index()\n", - " .electrode_id).tolist()\n", + "electrode_list = (\n", + " electrodes.loc[\n", + " (electrodes.region_name == \"Hippocampus\")\n", + " & (electrodes.probe_electrode == 0)\n", + " ]\n", + " .reset_index()\n", + " .electrode_id\n", + ").tolist()\n", "\n", "electrode_list" ] @@ -1581,7 +1593,8 @@ "RippleLFPSelection.set_lfp_electrodes(\n", " nwb_file_name,\n", " electrode_list,\n", - " group_name='CA1',)" + " group_name=\"CA1\",\n", + ")" ] }, { @@ -1812,7 +1825,7 @@ } ], "source": [ - "(RippleParameters() & {'ripple_param_name': 'default'}).fetch1()" + "(RippleParameters() & {\"ripple_param_name\": \"default\"}).fetch1()" ] }, { @@ -2029,10 +2042,14 @@ } ], "source": [ - "(IntervalPositionInfo & {'nwb_file_name': nwb_file_name,\n", - " 'position_info_param_name': 'default',\n", - " 'interval_list_name': 'pos 1 valid times',\n", - " }).fetch1_dataframe()" + "(\n", + " IntervalPositionInfo\n", + " & {\n", + " \"nwb_file_name\": nwb_file_name,\n", + " \"position_info_param_name\": \"default\",\n", + " \"interval_list_name\": \"pos 1 valid times\",\n", + " }\n", + ").fetch1_dataframe()" ] }, { @@ -2077,11 +2094,11 @@ ], "source": [ "key = {\n", - " 'ripple_param_name': 'default',\n", - " 'nwb_file_name': nwb_file_name,\n", - " 'group_name': 'CA1',\n", - " 'position_info_param_name': 'default',\n", - " 'interval_list_name': 'pos 1 valid times'\n", + " \"ripple_param_name\": \"default\",\n", + " \"nwb_file_name\": nwb_file_name,\n", + " \"group_name\": \"CA1\",\n", + " \"position_info_param_name\": \"default\",\n", + " \"interval_list_name\": \"pos 1 valid times\",\n", "}\n", "RippleTimes().populate(key)" ] diff --git a/notebooks/13_Theta_phase_and_power.ipynb b/notebooks/13_Theta_phase_and_power.ipynb index e4d023dee..1c42c1926 100644 --- a/notebooks/13_Theta_phase_and_power.ipynb +++ b/notebooks/13_Theta_phase_and_power.ipynb @@ -369,7 +369,9 @@ "# Use the same interval list name as in the LFPBand entry of interest; here for simplicity, we will use a test interval\n", "lfp_key[\"target_interval_list_name\"] = \"test_interval_one_session\"\n", "\n", - "LFPBand() & lfp_key # Make sure that this prints out the entry we want to analyze" + "(\n", + " LFPBand() & lfp_key\n", + ") # Make sure that this prints out the entry we want to analyze" ] }, { @@ -588,8 +590,12 @@ "outputs": [], "source": [ "# Compute the theta phase and power for electrodes in the electrode_list we previously defined.\n", - "theta_phase = (LFPBand() & lfp_key).compute_signal_phase(electrode_list=electrode_list)\n", - "theta_power = (LFPBand() & lfp_key).compute_signal_power(electrode_list=electrode_list)" + "theta_phase = (LFPBand() & lfp_key).compute_signal_phase(\n", + " electrode_list=electrode_list\n", + ")\n", + "theta_power = (LFPBand() & lfp_key).compute_signal_power(\n", + " electrode_list=electrode_list\n", + ")" ] }, { diff --git a/notebooks/4_position_info.ipynb b/notebooks/4_position_info.ipynb index b47ab399d..62364c8c2 100644 --- a/notebooks/4_position_info.ipynb +++ b/notebooks/4_position_info.ipynb @@ -1999,7 +1999,7 @@ "\n", "nwb_file_name = \"chimi20200216_new.nwb\"\n", "\n", - "nd.insert_sessions(nwb_file_name)\n" + "nd.insert_sessions(nwb_file_name)" ] }, { @@ -2031,7 +2031,7 @@ "from spyglass.common.nwb_helper_fn import get_nwb_copy_filename\n", "\n", "nwb_copy_file_name = get_nwb_copy_filename(nwb_file_name)\n", - "nwb_copy_file_name\n" + "nwb_copy_file_name" ] }, { @@ -2197,7 +2197,7 @@ "source": [ "from spyglass.common.common_position import PositionInfoParameters\n", "\n", - "PositionInfoParameters()\n" + "PositionInfoParameters()" ] }, { @@ -2344,7 +2344,7 @@ "PositionInfoParameters.insert1(\n", " {\"position_info_param_name\": \"default\"}, skip_duplicates=True\n", ")\n", - "PositionInfoParameters()\n" + "PositionInfoParameters()" ] }, { @@ -2512,7 +2512,7 @@ "source": [ "import pandas as pd\n", "\n", - "pd.DataFrame(nd.common.IntervalList & {\"nwb_file_name\": nwb_copy_file_name})\n" + "pd.DataFrame(nd.common.IntervalList & {\"nwb_file_name\": nwb_copy_file_name})" ] }, { @@ -2694,9 +2694,12 @@ "\n", "raw_position_df = (\n", " RawPosition()\n", - " & {\"nwb_file_name\": nwb_copy_file_name, \"interval_list_name\": \"pos 1 valid times\"}\n", + " & {\n", + " \"nwb_file_name\": nwb_copy_file_name,\n", + " \"interval_list_name\": \"pos 1 valid times\",\n", + " }\n", ").fetch1_dataframe()\n", - "raw_position_df\n" + "raw_position_df" ] }, { @@ -2744,7 +2747,7 @@ "ax.plot(raw_position_df.xloc2, raw_position_df.yloc2, color=\"red\")\n", "ax.set_xlabel(\"x-position [pixels]\", fontsize=18)\n", "ax.set_ylabel(\"y-position [pixels]\", fontsize=18)\n", - "ax.set_title(\"Raw Position\", fontsize=28)\n" + "ax.set_title(\"Raw Position\", fontsize=28)" ] }, { @@ -2775,7 +2778,7 @@ " \"position_info_param_name\": \"default\",\n", " },\n", " skip_duplicates=True,\n", - ")\n" + ")" ] }, { @@ -2913,7 +2916,7 @@ } ], "source": [ - "IntervalPositionInfoSelection()\n" + "IntervalPositionInfoSelection()" ] }, { @@ -2937,7 +2940,7 @@ "source": [ "from spyglass.common.common_position import IntervalPositionInfo\n", "\n", - "IntervalPositionInfo.populate()\n" + "IntervalPositionInfo.populate()" ] }, { @@ -3135,7 +3138,7 @@ } ], "source": [ - "IntervalPositionInfo()\n" + "IntervalPositionInfo()" ] }, { @@ -3356,7 +3359,7 @@ " \"position_info_param_name\": \"default\",\n", " }\n", ").fetch1_dataframe()\n", - "position_info\n" + "position_info" ] }, { @@ -3394,7 +3397,7 @@ } ], "source": [ - "position_info.index\n" + "position_info.index" ] }, { @@ -3444,7 +3447,7 @@ "ax.plot(position_info.head_position_x, position_info.head_position_y)\n", "ax.set_xlabel(\"x-position [cm]\", fontsize=18)\n", "ax.set_ylabel(\"y-position [cm]\", fontsize=18)\n", - "ax.set_title(\"Head Position\", fontsize=28)\n" + "ax.set_title(\"Head Position\", fontsize=28)" ] }, { @@ -3481,7 +3484,7 @@ "ax.plot(position_info.head_velocity_x, position_info.head_velocity_y)\n", "ax.set_xlabel(\"x-velocity [cm/s]\", fontsize=18)\n", "ax.set_ylabel(\"y-velocity [cm/s]\", fontsize=18)\n", - "ax.set_title(\"Head Velocity\", fontsize=28)\n" + "ax.set_title(\"Head Velocity\", fontsize=28)" ] }, { @@ -3519,7 +3522,7 @@ "ax.set_xlabel(\"Time\", fontsize=18)\n", "ax.set_ylabel(\"Speed [cm/s]\", fontsize=18)\n", "ax.set_title(\"Head Speed\", fontsize=28)\n", - "ax.set_xlim((position_info.index.min(), position_info.index.max()))\n" + "ax.set_xlim((position_info.index.min(), position_info.index.max()))" ] }, { @@ -3578,7 +3581,7 @@ " \"interval_list_name\": \"pos 1 valid times\",\n", " \"position_info_param_name\": \"default\",\n", " }\n", - ")\n" + ")" ] }, { @@ -3744,7 +3747,7 @@ " skip_duplicates=True,\n", ")\n", "\n", - "PositionInfoParameters()\n" + "PositionInfoParameters()" ] }, { @@ -3883,7 +3886,7 @@ " skip_duplicates=True,\n", ")\n", "\n", - "IntervalPositionInfoSelection()\n" + "IntervalPositionInfoSelection()" ] }, { @@ -3893,7 +3896,7 @@ "metadata": {}, "outputs": [], "source": [ - "IntervalPositionInfo.populate()\n" + "IntervalPositionInfo.populate()" ] }, { @@ -4099,7 +4102,7 @@ " }\n", ").fetch1_dataframe()\n", "\n", - "upsampled_position_info\n" + "upsampled_position_info" ] }, { @@ -4141,11 +4144,12 @@ "axes[0].set_title(\"Head Position\", fontsize=28)\n", "\n", "axes[1].plot(\n", - " upsampled_position_info.head_position_x, upsampled_position_info.head_position_y\n", + " upsampled_position_info.head_position_x,\n", + " upsampled_position_info.head_position_y,\n", ")\n", "axes[1].set_xlabel(\"x-position [cm]\", fontsize=18)\n", "axes[1].set_ylabel(\"y-position [cm]\", fontsize=18)\n", - "axes[1].set_title(\"Upsampled Head Position\", fontsize=28)\n" + "axes[1].set_title(\"Upsampled Head Position\", fontsize=28)" ] }, { @@ -4190,7 +4194,7 @@ "axes[1].plot(upsampled_position_info.index, upsampled_position_info.head_speed)\n", "axes[1].set_xlabel(\"Time\", fontsize=18)\n", "axes[1].set_ylabel(\"Speed [cm/s]\", fontsize=18)\n", - "axes[1].set_title(\"Upsampled Head Speed\", fontsize=28)\n" + "axes[1].set_title(\"Upsampled Head Speed\", fontsize=28)" ] }, { @@ -4232,11 +4236,12 @@ "axes[0].set_title(\"Head Velocity\", fontsize=28)\n", "\n", "axes[1].plot(\n", - " upsampled_position_info.head_velocity_x, upsampled_position_info.head_velocity_y\n", + " upsampled_position_info.head_velocity_x,\n", + " upsampled_position_info.head_velocity_y,\n", ")\n", "axes[1].set_xlabel(\"x-velocity [cm/s]\", fontsize=18)\n", "axes[1].set_ylabel(\"y-velocity [cm/s]\", fontsize=18)\n", - "axes[1].set_title(\"Upsampled Head Velocity\", fontsize=28)\n" + "axes[1].set_title(\"Upsampled Head Velocity\", fontsize=28)" ] }, { diff --git a/pyproject.toml b/pyproject.toml index f9c5bf993..1acd049f1 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,4 @@ -[build-system] -requires = ["setuptools>=61.0", "wheel"] -build-backend = "setuptools.build_meta" + [project] name = "spyglass-neuro" @@ -61,6 +59,13 @@ dependencies = [ "ndx_franklab_novela>=0.1.0", ] +[build-system] +requires = ["setuptools>=61.0", "wheel"] +build-backend = "setuptools.build_meta" + +[tool.setuptools.packages.find] +where = ["src"] + [project.scripts] spyglass_cli = "spyglass.cli:cli" @@ -70,8 +75,9 @@ spyglass_cli = "spyglass.cli:cli" [project.optional-dependencies] position = ["ffmpeg", "numba>=0.54", "deeplabcut<2.3.0"] + [tool.black] -# line-length = 120 +line-length = 80 [tool.codespell] skip = '.git,*.pdf,*.svg,*.ipynb' diff --git a/requirements-dev.txt b/requirements-dev.txt index ae9eab4a0..2419ad30a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,5 +1,5 @@ -pytest -pytest-cov -kachery +pytest # unit testing +pytest-cov # code coverage +kachery # database access kachery-client kachery-cloud diff --git a/requirements-docs.txt b/requirements-docs.txt new file mode 100644 index 000000000..185cb565f --- /dev/null +++ b/requirements-docs.txt @@ -0,0 +1,9 @@ +mike # Docs versioning +mkdocs # Docs core +mkdocs-exclude # Docs exclude files +mkdocs-exclude-search # Docs exclude files in search +mkdocs-gen-files # Docs API generator +mkdocs-jupyter # Docs render notebooks +mkdocs-literate-nav # Dynamic page list for API docs +mkdocs-material # Docs theme +mkdocstrings[python] # Docs API docstrings \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index 2455897c8..bf615cc86 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,5 +1,5 @@ [flake8] -max-line-length = 120 +max-line-length = 80 max-complexity = 17 exclude = .git, diff --git a/setup.py b/setup.py index 3e1cbcd3f..7f1a1763c 100644 --- a/setup.py +++ b/setup.py @@ -1,3 +1,4 @@ -from setuptools import find_packages, setup +from setuptools import setup -setup() +if __name__ == "__main__": + setup() diff --git a/src/spyglass/cli/cli.py b/src/spyglass/cli/cli.py index 081aa4733..ba150c766 100644 --- a/src/spyglass/cli/cli.py +++ b/src/spyglass/cli/cli.py @@ -144,7 +144,9 @@ def list_sort_groups(nwb_file_name: str): def list_sort_group_electrodes(nwb_file_name: str): import spyglass.spikesorting as nds - results = nds.SortGroup.SortGroupElectrode & {"nwb_file_name": nwb_file_name} + results = nds.SortGroup.SortGroupElectrode & { + "nwb_file_name": nwb_file_name + } print(results) @@ -180,7 +182,9 @@ def list_sort_intervals(nwb_file_name: str): @click.command(help="Insert spike sorting preprocessing parameters") @click.argument("yaml_file_name", required=False) -def insert_spike_sorting_preprocessing_parameters(yaml_file_name: Union[str, None]): +def insert_spike_sorting_preprocessing_parameters( + yaml_file_name: Union[str, None] +): if yaml_file_name is None: print("You must specify a yaml file. Sample content:") print("==========================================") @@ -225,7 +229,11 @@ def insert_artifact_detection_parameters(yaml_file_name: Union[str, None]): if yaml_file_name is None: print("You must specify a yaml file. Sample content:") print("==========================================") - print(yaml.safe_dump(sample_artifact_detection_parameters, sort_keys=False)) + print( + yaml.safe_dump( + sample_artifact_detection_parameters, sort_keys=False + ) + ) return import spyglass.spikesorting as nds @@ -313,7 +321,9 @@ def create_spike_sorting_recording_view( x = {k: x[k] for k in sample_spike_sorting_recording_selection_key.keys()} if replace: (ndf.SpikeSortingRecordingView & x).delete() - ndf.SpikeSortingRecordingView.populate([(nds.SpikeSortingRecording & x).proj()]) + ndf.SpikeSortingRecordingView.populate( + [(nds.SpikeSortingRecording & x).proj()] + ) figurl = (ndf.SpikeSortingRecordingView & x).fetch1("figurl") print(figurl) @@ -383,7 +393,9 @@ def run_spike_sorting(yaml_file_name: Union[str, None]): k: x[k] for k in sample_spike_sorting_recording_selection_key.keys() } spike_sorting_recording_key = ( - (nds.SpikeSortingRecording & spike_sorting_recording_query).proj().fetch1() + (nds.SpikeSortingRecording & spike_sorting_recording_query) + .proj() + .fetch1() ) artifact_key = dict( @@ -394,9 +406,9 @@ def run_spike_sorting(yaml_file_name: Union[str, None]): nds.ArtifactDetection.populate( [(nds.ArtifactDetectionSelection & artifact_key).proj()] ) - artifact_removed_interval_list_name = (nds.ArtifactDetection & artifact_key).fetch1( - "artifact_removed_interval_list_name" - ) + artifact_removed_interval_list_name = ( + nds.ArtifactDetection & artifact_key + ).fetch1("artifact_removed_interval_list_name") sorter_params_name = x["sorter_params_name"] sorter = ( @@ -413,7 +425,9 @@ def run_spike_sorting(yaml_file_name: Union[str, None]): ) nds.SpikeSortingSelection.insert1(sorting_key, skip_duplicates=True) - nds.SpikeSorting.populate([(nds.SpikeSortingSelection & sorting_key).proj()]) + nds.SpikeSorting.populate( + [(nds.SpikeSortingSelection & sorting_key).proj()] + ) @click.command(help="List spike sorting for a session.") diff --git a/src/spyglass/common/__init__.py b/src/spyglass/common/__init__.py index 3f5cbb743..3c1182f10 100644 --- a/src/spyglass/common/__init__.py +++ b/src/spyglass/common/__init__.py @@ -2,7 +2,12 @@ import spyglass as sg from .common_backup import CuratedSpikeSortingBackUp, SpikeSortingBackUp -from .common_behav import PositionSource, RawPosition, StateScriptFile, VideoFile +from .common_behav import ( + PositionSource, + RawPosition, + StateScriptFile, + VideoFile, +) from .common_device import ( CameraDevice, DataAcquisitionDevice, diff --git a/src/spyglass/common/common_behav.py b/src/spyglass/common/common_behav.py index 25ec48b7e..842bdfa4f 100644 --- a/src/spyglass/common/common_behav.py +++ b/src/spyglass/common/common_behav.py @@ -96,7 +96,9 @@ def make(self, key): # TODO refactor this. this calculates sampling rate (unused here) and is expensive to do twice pos_dict = get_all_spatial_series(nwbf) for epoch in pos_dict: - if key["interval_list_name"] == PositionSource.get_pos_interval_name(epoch): + if key[ + "interval_list_name" + ] == PositionSource.get_pos_interval_name(epoch): pdict = pos_dict[epoch] key["raw_position_object_id"] = pdict["raw_position_object_id"] self.insert1(key) @@ -139,7 +141,9 @@ def make(self, key): return for associated_file_obj in associated_files.data_interfaces.values(): - if not isinstance(associated_file_obj, ndx_franklab_novela.AssociatedFiles): + if not isinstance( + associated_file_obj, ndx_franklab_novela.AssociatedFiles + ): print( f'Data interface {associated_file_obj.name} within "associated_files" processing module is not ' f"of expected type ndx_franklab_novela.AssociatedFiles\n" @@ -153,8 +157,10 @@ def make(self, key): print(associated_file_obj.description) if ( "statescript".upper() in associated_file_obj.description.upper() - or "state_script".upper() in associated_file_obj.description.upper() - or "state script".upper() in associated_file_obj.description.upper() + or "state_script".upper() + in associated_file_obj.description.upper() + or "state script".upper() + in associated_file_obj.description.upper() ): # find the file associated with this epoch if str(key["epoch"]) in epoch_list: @@ -189,7 +195,9 @@ def make(self, key): nwb_file_name = key["nwb_file_name"] nwb_file_abspath = Nwbfile.get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) - video = get_data_interface(nwbf, "video", pynwb.behavior.BehavioralEvents) + video = get_data_interface( + nwbf, "video", pynwb.behavior.BehavioralEvents + ) if video is None: print(f"No video data interface found in {nwb_file_name}\n") diff --git a/src/spyglass/common/common_device.py b/src/spyglass/common/common_device.py index 3b6b4ecbd..7849e6099 100644 --- a/src/spyglass/common/common_device.py +++ b/src/spyglass/common/common_device.py @@ -266,7 +266,9 @@ def _add_amplifier(cls, amplifier): ) if val.lower() in ["y", "yes"]: key = {"data_acquisition_device_amplifier": amplifier} - DataAcquisitionDeviceAmplifier.insert1(key, skip_duplicates=True) + DataAcquisitionDeviceAmplifier.insert1( + key, skip_duplicates=True + ) else: raise PopulateException( f"User chose not to add data acquisition device amplifier '{amplifier}' to the database." @@ -461,7 +463,9 @@ def get_all_probe_names(cls, nwbf, config): # make a dict mapping probe type to dict of device metadata from the config YAML if exists if "Probe" in config: - config_probes = [probe_dict["probe_type"] for probe_dict in config["Probe"]] + config_probes = [ + probe_dict["probe_type"] for probe_dict in config["Probe"] + ] else: config_probes = list() @@ -484,7 +488,9 @@ def __read_ndx_probe_data( getattr(nwb_probe_obj, "manufacturer") or "" ) new_probe_type_dict["probe_type"] = nwb_probe_obj.probe_type - new_probe_type_dict["probe_description"] = nwb_probe_obj.probe_description + new_probe_type_dict[ + "probe_description" + ] = nwb_probe_obj.probe_description new_probe_type_dict["num_shanks"] = len(nwb_probe_obj.shanks) cls._add_probe_type(new_probe_type_dict) @@ -505,12 +511,18 @@ def __read_ndx_probe_data( for electrode in shank.shanks_electrodes.values(): # the next line will need to be fixed if we have different sized contacts on a shank elect_dict[electrode.name] = dict() - elect_dict[electrode.name]["probe_id"] = new_probe_dict["probe_type"] - elect_dict[electrode.name]["probe_shank"] = shank_dict[shank.name][ - "probe_shank" + elect_dict[electrode.name]["probe_id"] = new_probe_dict[ + "probe_type" ] - elect_dict[electrode.name]["contact_size"] = nwb_probe_obj.contact_size - elect_dict[electrode.name]["probe_electrode"] = int(electrode.name) + elect_dict[electrode.name]["probe_shank"] = shank_dict[ + shank.name + ]["probe_shank"] + elect_dict[electrode.name][ + "contact_size" + ] = nwb_probe_obj.contact_size + elect_dict[electrode.name]["probe_electrode"] = int( + electrode.name + ) elect_dict[electrode.name]["rel_x"] = electrode.rel_x elect_dict[electrode.name]["rel_y"] = electrode.rel_y elect_dict[electrode.name]["rel_z"] = electrode.rel_z @@ -616,7 +628,9 @@ def create_from_nwbfile( query = ProbeType & {"probe_type": probe_type} if len(query) == 0: - print(f"No ProbeType found with probe_type '{probe_type}'. Aborting.") + print( + f"No ProbeType found with probe_type '{probe_type}'. Aborting." + ) return new_probe_dict = dict() @@ -649,7 +663,9 @@ def create_from_nwbfile( # build the dictionary of Probe.Shank data shank_dict[shank_index] = dict() - shank_dict[shank_index]["probe_id"] = new_probe_dict["probe_id"] + shank_dict[shank_index]["probe_id"] = new_probe_dict[ + "probe_id" + ] shank_dict[shank_index]["probe_shank"] = shank_index # get the probe shank index associated with this Electrode diff --git a/src/spyglass/common/common_dio.py b/src/spyglass/common/common_dio.py index b83045156..f5ad37de1 100644 --- a/src/spyglass/common/common_dio.py +++ b/src/spyglass/common/common_dio.py @@ -39,9 +39,9 @@ def make(self, key): return # the times for these events correspond to the valid times for the raw data - key["interval_list_name"] = (Raw() & {"nwb_file_name": nwb_file_name}).fetch1( - "interval_list_name" - ) + key["interval_list_name"] = ( + Raw() & {"nwb_file_name": nwb_file_name} + ).fetch1("interval_list_name") for event_series in behav_events.time_series.values(): key["dio_event_name"] = event_series.name key["dio_object_id"] = event_series.object_id @@ -66,7 +66,10 @@ def plot_all_dio_events(self): epoch_valid_times = ( pd.DataFrame( IntervalList() - & [{"nwb_file_name": nwb_file_name} for nwb_file_name in nwb_file_names] + & [ + {"nwb_file_name": nwb_file_name} + for nwb_file_name in nwb_file_names + ] ) .set_index("interval_list_name") .filter(regex=r"^[0-9]", axis=0) @@ -102,7 +105,9 @@ def plot_all_dio_events(self): where="post", color="black", ) - ax.set_ylabel(event["dio_event_name"], rotation=0, ha="right", va="center") + ax.set_ylabel( + event["dio_event_name"], rotation=0, ha="right", va="center" + ) ax.set_yticks([]) ax.set_xlabel("Time") diff --git a/src/spyglass/common/common_ephys.py b/src/spyglass/common/common_ephys.py index e993d9a6d..e05acd56e 100644 --- a/src/spyglass/common/common_ephys.py +++ b/src/spyglass/common/common_ephys.py @@ -56,9 +56,13 @@ def make(self, key): if isinstance(electrode_group.device, ndx_franklab_novela.Probe): key["probe_id"] = electrode_group.device.probe_type key["description"] = electrode_group.description - if isinstance(electrode_group, ndx_franklab_novela.NwbElectrodeGroup): + if isinstance( + electrode_group, ndx_franklab_novela.NwbElectrodeGroup + ): # Define target_hemisphere based on targeted x coordinate - if electrode_group.targeted_x >= 0: # if positive or zero x coordinate + if ( + electrode_group.targeted_x >= 0 + ): # if positive or zero x coordinate # define target location as right hemisphere key["target_hemisphere"] = "Right" else: # if negative x coordinate @@ -134,7 +138,9 @@ def make(self, key): key["probe_id"] = elect_data.group.device.probe_type key["probe_shank"] = elect_data.probe_shank key["probe_electrode"] = elect_data.probe_electrode - key["bad_channel"] = "True" if elect_data.bad_channel else "False" + key["bad_channel"] = ( + "True" if elect_data.bad_channel else "False" + ) key["original_reference_electrode"] = elect_data.ref_elect_id # override with information from the config YAML based on primary key (electrode id) @@ -199,7 +205,9 @@ def create_from_config(cls, nwb_file_name: str): cls.update1(key) print(f"Updated Electrode with ID {nwbfile_elect_id}.") else: - cls.insert1(key, skip_duplicates=True, allow_direct_insert=True) + cls.insert1( + key, skip_duplicates=True, allow_direct_insert=True + ) print(f"Inserted Electrode with ID {nwbfile_elect_id}.") else: warnings.warn( @@ -232,7 +240,9 @@ def make(self, key): rawdata = nwbf.get_acquisition() assert isinstance(rawdata, pynwb.ecephys.ElectricalSeries) except (ValueError, AssertionError): - warnings.warn(f"Unable to get acquisition object in: {nwb_file_abspath}") + warnings.warn( + f"Unable to get acquisition object in: {nwb_file_abspath}" + ) return if rawdata.rate is not None: sampling_rate = rawdata.rate @@ -266,7 +276,9 @@ def make(self, key): key["raw_object_id"] = rawdata.object_id key["sampling_rate"] = sampling_rate print(f'Importing raw data: Sampling rate:\t{key["sampling_rate"]} Hz') - print(f'Number of valid intervals:\t{len(interval_dict["valid_times"])}') + print( + f'Number of valid intervals:\t{len(interval_dict["valid_times"])}' + ) key["interval_list_name"] = raw_interval_name key["comments"] = rawdata.comments key["description"] = rawdata.description @@ -341,19 +353,26 @@ def set_lfp_electrodes(self, nwb_file_name, electrode_list): # remove the session and then recreate the session and Electrode list (LFPSelection() & {"nwb_file_name": nwb_file_name}).delete() # check to see if the user allowed the deletion - if len((LFPSelection() & {"nwb_file_name": nwb_file_name}).fetch()) == 0: + if ( + len((LFPSelection() & {"nwb_file_name": nwb_file_name}).fetch()) + == 0 + ): LFPSelection().insert1({"nwb_file_name": nwb_file_name}) # TODO: do this in a better way - all_electrodes = (Electrode() & {"nwb_file_name": nwb_file_name}).fetch( - as_dict=True - ) + all_electrodes = ( + Electrode() & {"nwb_file_name": nwb_file_name} + ).fetch(as_dict=True) primary_key = Electrode.primary_key for e in all_electrodes: # create a dictionary so we can insert new elects if e["electrode_id"] in electrode_list: - lfpelectdict = {k: v for k, v in e.items() if k in primary_key} - LFPSelection().LFPElectrode.insert1(lfpelectdict, replace=True) + lfpelectdict = { + k: v for k, v in e.items() if k in primary_key + } + LFPSelection().LFPElectrode.insert1( + lfpelectdict, replace=True + ) @schema @@ -422,7 +441,10 @@ def make(self, key): lfp_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name) - lfp_object_id, timestamp_interval = FirFilterParameters().filter_data_nwb( + ( + lfp_object_id, + timestamp_interval, + ) = FirFilterParameters().filter_data_nwb( lfp_file_abspath, rawdata, filter_coeff, @@ -454,9 +476,9 @@ def make(self, key): def nwb_object(self, key): # return the NWB object in the raw NWB file - lfp_file_name = (LFP() & {"nwb_file_name": key["nwb_file_name"]}).fetch1( - "analysis_file_name" - ) + lfp_file_name = ( + LFP() & {"nwb_file_name": key["nwb_file_name"]} + ).fetch1("analysis_file_name") lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name) lfp_nwbf = get_nwb_file(lfp_file_abspath) # get the object id @@ -473,7 +495,8 @@ def fetch_nwb(self, *attrs, **kwargs): def fetch1_dataframe(self, *attrs, **kwargs): nwb_lfp = self.fetch_nwb()[0] return pd.DataFrame( - nwb_lfp["lfp"].data, index=pd.Index(nwb_lfp["lfp"].timestamps, name="time") + nwb_lfp["lfp"].data, + index=pd.Index(nwb_lfp["lfp"].timestamps, name="time"), ) @@ -558,9 +581,9 @@ def set_lfp_band_electrodes( "added before this function is called" ) # reference_electrode_list - if len(reference_electrode_list) != 1 and len(reference_electrode_list) != len( - electrode_list - ): + if len(reference_electrode_list) != 1 and len( + reference_electrode_list + ) != len(electrode_list): raise ValueError( "reference_electrode_list must contain either 1 or len(electrode_list) elements" ) @@ -598,7 +621,10 @@ def set_lfp_band_electrodes( # iterate through all of the new elements and add them for e, r in zip(electrode_list, ref_list): key["electrode_id"] = e - query = Electrode & {"nwb_file_name": nwb_file_name, "electrode_id": e} + query = Electrode & { + "nwb_file_name": nwb_file_name, + "electrode_id": e, + } key["electrode_group_name"] = query.fetch1("electrode_group_name") key["reference_elect_id"] = r self.LFPBandElectrode().insert1(key, skip_duplicates=True) @@ -616,9 +642,9 @@ class LFPBand(dj.Computed): def make(self, key): # get the NWB object with the lfp data; FIX: change to fetch with additional infrastructure - lfp_object = (LFP() & {"nwb_file_name": key["nwb_file_name"]}).fetch_nwb()[0][ - "lfp" - ] + lfp_object = ( + LFP() & {"nwb_file_name": key["nwb_file_name"]} + ).fetch_nwb()[0]["lfp"] # get the electrodes to be filtered and their references lfp_band_elect_id, lfp_band_ref_id = ( @@ -632,12 +658,12 @@ def make(self, key): lfp_band_elect_id = lfp_band_elect_id[lfp_sort_order] lfp_band_ref_id = lfp_band_ref_id[lfp_sort_order] - lfp_sampling_rate = (LFP() & {"nwb_file_name": key["nwb_file_name"]}).fetch1( - "lfp_sampling_rate" - ) - interval_list_name, lfp_band_sampling_rate = (LFPBandSelection() & key).fetch1( - "target_interval_list_name", "lfp_band_sampling_rate" - ) + lfp_sampling_rate = ( + LFP() & {"nwb_file_name": key["nwb_file_name"]} + ).fetch1("lfp_sampling_rate") + interval_list_name, lfp_band_sampling_rate = ( + LFPBandSelection() & key + ).fetch1("target_interval_list_name", "lfp_band_sampling_rate") valid_times = ( IntervalList() & { @@ -647,9 +673,9 @@ def make(self, key): ).fetch1("valid_times") # the valid_times for this interval may be slightly beyond the valid times for the lfp itself, # so we have to intersect the two - lfp_interval_list = (LFP() & {"nwb_file_name": key["nwb_file_name"]}).fetch1( - "interval_list_name" - ) + lfp_interval_list = ( + LFP() & {"nwb_file_name": key["nwb_file_name"]} + ).fetch1("interval_list_name") lfp_valid_times = ( IntervalList() & { @@ -664,14 +690,18 @@ def make(self, key): filter_name, filter_sampling_rate, lfp_band_sampling_rate = ( LFPBandSelection() & key - ).fetch1("filter_name", "filter_sampling_rate", "lfp_band_sampling_rate") + ).fetch1( + "filter_name", "filter_sampling_rate", "lfp_band_sampling_rate" + ) decimation = int(lfp_sampling_rate) // lfp_band_sampling_rate # load in the timestamps timestamps = np.asarray(lfp_object.timestamps) # get the indices of the first timestamp and the last timestamp that are within the valid times - included_indices = interval_list_contains_ind(lfp_band_valid_times, timestamps) + included_indices = interval_list_contains_ind( + lfp_band_valid_times, timestamps + ) # pad the indices by 1 on each side to avoid message in filter_data if included_indices[0] > 0: included_indices[0] -= 1 @@ -687,14 +717,17 @@ def make(self, key): ) # get the indices of the electrodes to be filtered and the references - lfp_band_elect_index = get_electrode_indices(lfp_object, lfp_band_elect_id) + lfp_band_elect_index = get_electrode_indices( + lfp_object, lfp_band_elect_id + ) lfp_band_ref_index = get_electrode_indices(lfp_object, lfp_band_ref_id) # subtract off the references for the selected channels for index, elect_index in enumerate(lfp_band_elect_index): if lfp_band_ref_id[index] != -1: lfp_data[:, elect_index] = ( - lfp_data[:, elect_index] - lfp_data[:, lfp_band_ref_index[index]] + lfp_data[:, elect_index] + - lfp_data[:, lfp_band_ref_index[index]] ) # get the LFP filter that matches the raw data @@ -718,7 +751,9 @@ def make(self, key): # create the analysis nwb file to store the results. lfp_band_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) - lfp_band_file_abspath = AnalysisNwbfile().get_abs_path(lfp_band_file_name) + lfp_band_file_abspath = AnalysisNwbfile().get_abs_path( + lfp_band_file_name + ) # filter the data and write to an the nwb file filtered_data, new_timestamps = FirFilterParameters().filter_data( timestamps, @@ -760,7 +795,10 @@ def make(self, key): # finally, we need to censor the valid times to account for the downsampling if this is the first time we've # downsampled these data key["interval_list_name"] = ( - interval_list_name + " lfp band " + str(lfp_band_sampling_rate) + "Hz" + interval_list_name + + " lfp band " + + str(lfp_band_sampling_rate) + + "Hz" ) tmp_valid_times = ( IntervalList @@ -785,7 +823,9 @@ def make(self, key): # check that the valid times are the same assert np.isclose( tmp_valid_times[0], lfp_band_valid_times - ).all(), "previously saved lfp band times do not match current times" + ).all(), ( + "previously saved lfp band times do not match current times" + ) self.insert1(key) @@ -798,7 +838,9 @@ def fetch1_dataframe(self, *attrs, **kwargs): filtered_nwb = self.fetch_nwb()[0] return pd.DataFrame( filtered_nwb["filtered_data"].data, - index=pd.Index(filtered_nwb["filtered_data"].timestamps, name="time"), + index=pd.Index( + filtered_nwb["filtered_data"].timestamps, name="time" + ), ) diff --git a/src/spyglass/common/common_filter.py b/src/spyglass/common/common_filter.py index 3636eda25..f7f741984 100644 --- a/src/spyglass/common/common_filter.py +++ b/src/spyglass/common/common_filter.py @@ -79,7 +79,8 @@ def add_filter(self, filter_name, fs, filter_type, band_edges, comments=""): return None # the transition width is the mean of the widths of left and right transition regions tw = ( - (band_edges[1] - band_edges[0]) + (band_edges[3] - band_edges[2]) + (band_edges[1] - band_edges[0]) + + (band_edges[3] - band_edges[2]) ) / 2.0 else: @@ -287,12 +288,14 @@ def filter_data_nwb( mem = psutil.virtual_memory() interval_samples = stop - start if ( - interval_samples * (timestamp_size + n_electrodes * data_size) + interval_samples + * (timestamp_size + n_electrodes * data_size) < 0.9 * mem.available ): print(f"Interval {ii}: loading data into memory") timestamps = np.asarray( - timestamps_on_disk[start:stop], dtype=timestamp_dtype + timestamps_on_disk[start:stop], + dtype=timestamp_dtype, ) if time_axis == 0: data = np.asarray( @@ -354,7 +357,13 @@ def filter_data_nwb( return es.object_id, start_end def filter_data( - self, timestamps, data, filter_coeff, valid_times, electrodes, decimation + self, + timestamps, + data, + filter_coeff, + valid_times, + electrodes, + decimation, ): """ :param timestamps: numpy array with list of timestamps for data @@ -414,7 +423,9 @@ def filter_data( # create the dataset and the timestamps array filtered_data = np.empty(tuple(output_shape_list), dtype=data.dtype) - new_timestamps = np.empty((output_shape_list[time_axis],), timestamps.dtype) + new_timestamps = np.empty( + (output_shape_list[time_axis],), timestamps.dtype + ) indices = np.array(indices, ndmin=2) @@ -425,7 +436,9 @@ def filter_data( extracted_ts = timestamps[start:stop:decimation] # print(f"Diffs {np.diff(extracted_ts)}") - new_timestamps[ts_offset : ts_offset + len(extracted_ts)] = extracted_ts + new_timestamps[ + ts_offset : ts_offset + len(extracted_ts) + ] = extracted_ts ts_offset += len(extracted_ts) # finally ready to filter data! diff --git a/src/spyglass/common/common_interval.py b/src/spyglass/common/common_interval.py index 95081679a..9aa184e9e 100644 --- a/src/spyglass/common/common_interval.py +++ b/src/spyglass/common/common_interval.py @@ -50,7 +50,9 @@ def insert_from_nwbfile(cls, nwbf, *, nwb_file_name): if epoch_data.tags[0]: epoch_dict["interval_list_name"] = epoch_data.tags[0] else: - epoch_dict["interval_list_name"] = "interval_" + str(epoch_index) + epoch_dict["interval_list_name"] = "interval_" + str( + epoch_index + ) epoch_dict["valid_times"] = np.asarray( [[epoch_data.start_time, epoch_data.stop_time]] ) @@ -64,7 +66,10 @@ def plot_intervals(self, figsize=(20, 5)): for interval in row.valid_times: ax.plot(interval, [interval_count, interval_count]) ax.scatter( - interval, [interval_count, interval_count], alpha=0.8, zorder=2 + interval, + [interval_count, interval_count], + alpha=0.8, + zorder=2, ) interval_count += 1 ax.set_yticks(np.arange(interval_list.shape[0])) @@ -91,10 +96,14 @@ def plot_epoch_pos_raw_intervals(self, figsize=(20, 5)): .valid_times ) interval_y = 2 - for epoch, valid_times in zip(epoch_valid_times.index, epoch_valid_times): + for epoch, valid_times in zip( + epoch_valid_times.index, epoch_valid_times + ): for interval in valid_times: ax.plot(interval, [interval_y, interval_y]) - ax.scatter(interval, [interval_y, interval_y], alpha=0.8, zorder=2) + ax.scatter( + interval, [interval_y, interval_y], alpha=0.8, zorder=2 + ) ax.text( interval[0] + np.diff(interval)[0] / 2, interval_y, @@ -112,7 +121,9 @@ def plot_epoch_pos_raw_intervals(self, figsize=(20, 5)): for epoch, valid_times in zip(pos_valid_times.index, pos_valid_times): for interval in valid_times: ax.plot(interval, [interval_y, interval_y]) - ax.scatter(interval, [interval_y, interval_y], alpha=0.8, zorder=2) + ax.scatter( + interval, [interval_y, interval_y], alpha=0.8, zorder=2 + ) ax.text( interval[0] + np.diff(interval)[0] / 2, interval_y, @@ -141,7 +152,9 @@ def intervals_by_length(interval_list, min_length=0.0, max_length=1e10): Maximum interval length in seconds. Defaults to 1e10. """ lengths = np.ravel(np.diff(interval_list)) - return interval_list[np.logical_and(lengths > min_length, lengths < max_length)] + return interval_list[ + np.logical_and(lengths > min_length, lengths < max_length) + ] def interval_list_contains_ind(interval_list, timestamps): @@ -157,7 +170,9 @@ def interval_list_contains_ind(interval_list, timestamps): for interval in interval_list: ind += np.ravel( np.argwhere( - np.logical_and(timestamps >= interval[0], timestamps <= interval[1]) + np.logical_and( + timestamps >= interval[0], timestamps <= interval[1] + ) ) ).tolist() return np.asarray(ind) @@ -176,7 +191,9 @@ def interval_list_contains(interval_list, timestamps): for interval in interval_list: ind += np.ravel( np.argwhere( - np.logical_and(timestamps >= interval[0], timestamps <= interval[1]) + np.logical_and( + timestamps >= interval[0], timestamps <= interval[1] + ) ) ).tolist() return timestamps[ind] @@ -272,7 +289,9 @@ def interval_list_intersect(interval_list1, interval_list2, min_length=0): for interval2 in interval_list2: for interval1 in interval_list1: if _intersection(interval2, interval1) is not None: - intersecting_intervals.append(_intersection(interval1, interval2)) + intersecting_intervals.append( + _intersection(interval1, interval2) + ) # if no intersection, then return an empty list if not intersecting_intervals: @@ -283,7 +302,9 @@ def interval_list_intersect(interval_list1, interval_list2, min_length=0): np.argsort(intersecting_intervals[:, 0]) ] - return intervals_by_length(intersecting_intervals, min_length=min_length) + return intervals_by_length( + intersecting_intervals, min_length=min_length + ) def _intersection(interval1, interval2): @@ -303,7 +324,10 @@ def _union(interval1, interval2): return np.array([interval1, interval2]) else: return np.array( - [min([interval1[0], interval2[0]]), max([interval1[1], interval2[1]])] + [ + min([interval1[0], interval2[0]]), + max([interval1[1], interval2[1]]), + ] ) @@ -436,7 +460,9 @@ def interval_from_inds(list_frames): """ list_frames = np.unique(list_frames) interval_list = [] - for key, group in itertools.groupby(enumerate(list_frames), lambda t: t[1] - t[0]): + for key, group in itertools.groupby( + enumerate(list_frames), lambda t: t[1] - t[0] + ): group = list(group) interval_list.append([group[0][1], group[-1][1]]) return np.asarray(interval_list) diff --git a/src/spyglass/common/common_lab.py b/src/spyglass/common/common_lab.py index 99c3a9382..95f332b69 100644 --- a/src/spyglass/common/common_lab.py +++ b/src/spyglass/common/common_lab.py @@ -40,7 +40,9 @@ def insert_from_nwbfile(cls, nwbf): for experimenter in nwbf.experimenter: cls.insert_from_name(experimenter) # each person is by default the member of their own LabTeam (same as their name) - LabTeam.create_new_team(team_name=experimenter, team_members=[experimenter]) + LabTeam.create_new_team( + team_name=experimenter, team_members=[experimenter] + ) @classmethod def insert_from_name(cls, full_name): @@ -133,7 +135,9 @@ def insert_from_nwbfile(cls, nwbf): if nwbf.institution is None: print("No institution metadata found.\n") return - cls.insert1(dict(institution_name=nwbf.institution), skip_duplicates=True) + cls.insert1( + dict(institution_name=nwbf.institution), skip_duplicates=True + ) @schema diff --git a/src/spyglass/common/common_nwbfile.py b/src/spyglass/common/common_nwbfile.py index 5e60b3ff0..c86087973 100644 --- a/src/spyglass/common/common_nwbfile.py +++ b/src/spyglass/common/common_nwbfile.py @@ -172,7 +172,9 @@ def create(self, nwb_file_name): analysis_file_name = self.__get_new_file_name(nwb_file_name) # write the new file print(f"Writing new NWB file {analysis_file_name}") - analysis_file_abs_path = AnalysisNwbfile.get_abs_path(analysis_file_name) + analysis_file_abs_path = AnalysisNwbfile.get_abs_path( + analysis_file_name + ) # export the new NWB file with pynwb.NWBHDF5IO( path=analysis_file_abs_path, mode="w", manager=io.manager @@ -193,10 +195,14 @@ def __get_new_file_name(cls, nwb_file_name): while file_in_table: analysis_file_name = ( os.path.splitext(nwb_file_name)[0] - + "".join(random.choices(string.ascii_uppercase + string.digits, k=10)) + + "".join( + random.choices(string.ascii_uppercase + string.digits, k=10) + ) + ".nwb" ) - file_in_table = AnalysisNwbfile & {"analysis_file_name": analysis_file_name} + file_in_table = AnalysisNwbfile & { + "analysis_file_name": analysis_file_name + } return analysis_file_name @@ -232,7 +238,9 @@ def copy(cls, nwb_file_name): analysis_file_name = cls.__get_new_file_name(original_nwb_file_name) # write the new file print(f"Writing new NWB file {analysis_file_name}...") - analysis_file_abs_path = AnalysisNwbfile.get_abs_path(analysis_file_name) + analysis_file_abs_path = AnalysisNwbfile.get_abs_path( + analysis_file_name + ) # export the new NWB file with pynwb.NWBHDF5IO( path=analysis_file_abs_path, mode="w", manager=io.manager @@ -255,7 +263,9 @@ def add(self, nwb_file_name, analysis_file_name): key["nwb_file_name"] = nwb_file_name key["analysis_file_name"] = analysis_file_name key["analysis_file_description"] = "" - key["analysis_file_abs_path"] = AnalysisNwbfile.get_abs_path(analysis_file_name) + key["analysis_file_abs_path"] = AnalysisNwbfile.get_abs_path( + analysis_file_name + ) self.insert1(key) @staticmethod @@ -289,13 +299,17 @@ def get_abs_path(analysis_nwb_file_name): analysis_file_base_path = ( base_dir / "analysis" - / AnalysisNwbfile.__get_analysis_file_dir(analysis_nwb_file_name) + / AnalysisNwbfile.__get_analysis_file_dir( + analysis_nwb_file_name + ) ) if not analysis_file_base_path.exists(): os.mkdir(str(analysis_file_base_path)) return str(analysis_file_base_path / analysis_nwb_file_name) - def add_nwb_object(self, analysis_file_name, nwb_object, table_name="pandas_table"): + def add_nwb_object( + self, analysis_file_name, nwb_object, table_name="pandas_table" + ): # TODO: change to add_object with checks for object type and a name parameter, which should be specified if # it is not an NWB container """Add an NWB object to the analysis file in the scratch area and returns the NWB object ID @@ -315,11 +329,15 @@ def add_nwb_object(self, analysis_file_name, nwb_object, table_name="pandas_tabl The NWB object ID of the added object. """ with pynwb.NWBHDF5IO( - path=self.get_abs_path(analysis_file_name), mode="a", load_namespaces=True + path=self.get_abs_path(analysis_file_name), + mode="a", + load_namespaces=True, ) as io: nwbf = io.read() if isinstance(nwb_object, pd.DataFrame): - dt_object = DynamicTable.from_dataframe(name=table_name, df=nwb_object) + dt_object = DynamicTable.from_dataframe( + name=table_name, df=nwb_object + ) nwbf.add_scratch(dt_object) io.write(nwbf) return dt_object.object_id @@ -363,7 +381,9 @@ def add_units( The NWB object id of the Units object and the object id of the waveforms object ('' if None) """ with pynwb.NWBHDF5IO( - path=self.get_abs_path(analysis_file_name), mode="a", load_namespaces=True + path=self.get_abs_path(analysis_file_name), + mode="a", + load_namespaces=True, ) as io: nwbf = io.read() sort_intervals = list() @@ -388,7 +408,9 @@ def add_units( for metric in metrics: if metrics[metric]: unit_ids = np.array(list(metrics[metric].keys())) - metric_values = np.array(list(metrics[metric].values())) + metric_values = np.array( + list(metrics[metric].values()) + ) # sort by unit_ids and apply that sorting to values to ensure that things go in the right order metric_values = metric_values[np.argsort(unit_ids)] print(f"Adding metric {metric} : {metric_values}") @@ -421,7 +443,9 @@ def add_units( name="units_waveforms", notes="spike waveforms for each unit", ) - waveforms_object_id = nwbf.scratch["units_waveforms"].object_id + waveforms_object_id = nwbf.scratch[ + "units_waveforms" + ].object_id io.write(nwbf) return nwbf.units.object_id, waveforms_object_id @@ -454,7 +478,9 @@ def add_units_waveforms( """ with pynwb.NWBHDF5IO( - path=self.get_abs_path(analysis_file_name), mode="a", load_namespaces=True + path=self.get_abs_path(analysis_file_name), + mode="a", + load_namespaces=True, ) as io: nwbf = io.read() for id in waveform_extractor.sorting.get_unit_ids(): @@ -503,11 +529,15 @@ def add_units_waveforms( print(f"Adding metric {metric_name} : {metric_dict}") metric_data = metric_dict.values().to_list() nwbf.add_unit_column( - name=metric_name, description=metric_name, data=metric_data + name=metric_name, + description=metric_name, + data=metric_data, ) if labels is not None: nwbf.add_unit_column( - name="label", description="label given during curation", data=labels + name="label", + description="label given during curation", + data=labels, ) io.write(nwbf) @@ -520,11 +550,8 @@ def add_units_metrics(self, analysis_file_name, metrics): ---------- analysis_file_name : str The name of the analysis NWB file. - waveform_extractor : si.WaveformExtractor object metrics : dict, optional Cluster metrics. - labels : dict, optional - Curation labels for clusters Returns ------- @@ -534,7 +561,9 @@ def add_units_metrics(self, analysis_file_name, metrics): metric_names = list(metrics.keys()) unit_ids = list(metrics[metric_names[0]].keys()) with pynwb.NWBHDF5IO( - path=self.get_abs_path(analysis_file_name), mode="a", load_namespaces=True + path=self.get_abs_path(analysis_file_name), + mode="a", + load_namespaces=True, ) as io: nwbf = io.read() for id in unit_ids: @@ -605,7 +634,9 @@ class NwbfileKachery(dj.Computed): def make(self, key): print(f'Linking {key["nwb_file_name"]} and storing in kachery...') - key["nwb_file_uri"] = kc.link_file(Nwbfile().get_abs_path(key["nwb_file_name"])) + key["nwb_file_uri"] = kc.link_file( + Nwbfile().get_abs_path(key["nwb_file_name"]) + ) self.insert1(key) diff --git a/src/spyglass/common/common_position.py b/src/spyglass/common/common_position.py index d37e1c22e..75ef28998 100644 --- a/src/spyglass/common/common_position.py +++ b/src/spyglass/common/common_position.py @@ -86,7 +86,9 @@ class IntervalPositionInfo(dj.Computed): def make(self, key): print(f"Computing position for: {key}") - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) raw_position = ( RawPosition() & { @@ -148,7 +150,10 @@ def make(self, key): conversion=METERS_PER_CM, unit="m/s", data=np.concatenate( - (position_info["velocity"], position_info["speed"][:, np.newaxis]), + ( + position_info["velocity"], + position_info["speed"][:, np.newaxis], + ), axis=1, ), comments=spatial_series.comments, @@ -193,7 +198,8 @@ def calculate_position_info_from_spatial_series( time = np.asarray(spatial_series.timestamps) # seconds position = np.asarray( pd.DataFrame( - spatial_series.data, columns=spatial_series.description.split(", ") + spatial_series.data, + columns=spatial_series.description.split(", "), ).loc[:, ["xloc", "yloc", "xloc2", "yloc2"]] ) # meters @@ -286,7 +292,8 @@ def calculate_position_info_from_spatial_series( upsampling_start_time, upsampling_end_time, n_samples ) new_index = pd.Index( - np.unique(np.concatenate((position_df.index, new_time))), name="time" + np.unique(np.concatenate((position_df.index, new_time))), + name="time", ) position_df = ( position_df.reindex(index=new_index) @@ -295,8 +302,12 @@ def calculate_position_info_from_spatial_series( ) time = np.asarray(position_df.index) - back_LED = np.asarray(position_df.loc[:, ["back_LED_x", "back_LED_y"]]) - front_LED = np.asarray(position_df.loc[:, ["front_LED_x", "front_LED_y"]]) + back_LED = np.asarray( + position_df.loc[:, ["back_LED_x", "back_LED_y"]] + ) + front_LED = np.asarray( + position_df.loc[:, ["front_LED_x", "front_LED_y"]] + ) sampling_rate = upsampling_sampling_rate @@ -316,7 +327,9 @@ def calculate_position_info_from_spatial_series( truncate=8, ) # convert back to between -pi and pi - head_orientation[~is_nan] = np.angle(np.exp(1j * head_orientation[~is_nan])) + head_orientation[~is_nan] = np.angle( + np.exp(1j * head_orientation[~is_nan]) + ) velocity = get_velocity( head_position, @@ -342,7 +355,9 @@ def fetch_nwb(self, *attrs, **kwargs): def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] index = pd.Index( - np.asarray(nwb_data["head_position"].get_spatial_series().timestamps), + np.asarray( + nwb_data["head_position"].get_spatial_series().timestamps + ), name="time", ) COLUMNS = [ @@ -356,12 +371,16 @@ def fetch1_dataframe(self): return pd.DataFrame( np.concatenate( ( - np.asarray(nwb_data["head_position"].get_spatial_series().data), - np.asarray(nwb_data["head_orientation"].get_spatial_series().data)[ - :, np.newaxis - ], np.asarray( - nwb_data["head_velocity"].time_series["head_velocity"].data + nwb_data["head_position"].get_spatial_series().data + ), + np.asarray( + nwb_data["head_orientation"].get_spatial_series().data + )[:, np.newaxis], + np.asarray( + nwb_data["head_velocity"] + .time_series["head_velocity"] + .data ), ), axis=1, @@ -415,7 +434,9 @@ def get_networkx_track_graph(self, track_graph_parameters=None): def plot_track_graph(self, ax=None, draw_edge_labels=False, **kwds): """Plot the track graph in 2D position space.""" track_graph = self.get_networkx_track_graph() - plot_track_graph(track_graph, ax=ax, draw_edge_labels=draw_edge_labels, **kwds) + plot_track_graph( + track_graph, ax=ax, draw_edge_labels=draw_edge_labels, **kwds + ) def plot_track_graph_as_1D( self, @@ -468,7 +489,9 @@ class IntervalLinearizedPosition(dj.Computed): def make(self, key): print(f"Computing linear position for: {key}") - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) position_nwb = ( IntervalPositionInfo @@ -479,8 +502,12 @@ def make(self, key): } ).fetch_nwb()[0] - position = np.asarray(position_nwb["head_position"].get_spatial_series().data) - time = np.asarray(position_nwb["head_position"].get_spatial_series().timestamps) + position = np.asarray( + position_nwb["head_position"].get_spatial_series().data + ) + time = np.asarray( + position_nwb["head_position"].get_spatial_series().timestamps + ) linearization_parameters = ( LinearizationParameters() @@ -547,7 +574,9 @@ def __init__( self.cid = None self._nodes = [] self.node_color = node_color - self._nodes_plot = ax.scatter([], [], zorder=5, s=node_size, color=node_color) + self._nodes_plot = ax.scatter( + [], [], zorder=5, s=node_size, color=node_color + ) self.edges = [[]] self.video_filename = video_filename @@ -568,7 +597,9 @@ def node_positions(self): def connect(self): if self.cid is None: - self.cid = self.canvas.mpl_connect("button_press_event", self.click_event) + self.cid = self.canvas.mpl_connect( + "button_press_event", self.click_event + ) def disconnect(self): if self.cid is not None: @@ -578,15 +609,21 @@ def disconnect(self): def click_event(self, event): if not event.inaxes: return - if (event.key not in ["control", "shift"]) & (event.button == 1): # left click + if (event.key not in ["control", "shift"]) & ( + event.button == 1 + ): # left click self._nodes.append((event.xdata, event.ydata)) - if (event.key not in ["control", "shift"]) & (event.button == 3): # right click + if (event.key not in ["control", "shift"]) & ( + event.button == 3 + ): # right click self.remove_point((event.xdata, event.ydata)) if (event.key == "shift") & (event.button == 1): self.clear() if (event.key == "control") & (event.button == 1): point = (event.xdata, event.ydata) - distance_to_nodes = np.linalg.norm(self.node_positions - point, axis=1) + distance_to_nodes = np.linalg.norm( + self.node_positions - point, axis=1 + ) closest_node_ind = np.argmin(distance_to_nodes) if len(self.edges[-1]) < 2: self.edges[-1].append(closest_node_ind) @@ -623,13 +660,17 @@ def redraw(self): if len(edge) > 1: x1, y1 = self.node_positions[edge[0]] x2, y2 = self.node_positions[edge[1]] - self.ax.plot([x1, x2], [y1, y2], color=self.node_color, linewidth=2) + self.ax.plot( + [x1, x2], [y1, y2], color=self.node_color, linewidth=2 + ) self.canvas.draw_idle() def remove_point(self, point): if len(self._nodes) > 0: - distance_to_nodes = np.linalg.norm(self.node_positions - point, axis=1) + distance_to_nodes = np.linalg.norm( + self.node_positions - point, axis=1 + ) closest_node_ind = np.argmin(distance_to_nodes) self._nodes.pop(closest_node_ind) @@ -686,16 +727,20 @@ def make(self, key): + 1 ) video_info = ( - VideoFile() & {"nwb_file_name": key["nwb_file_name"], "epoch": epoch} + VideoFile() + & {"nwb_file_name": key["nwb_file_name"], "epoch": epoch} ).fetch1() - io = pynwb.NWBHDF5IO("/stelmo/nwb/raw/" + video_info["nwb_file_name"], "r") + io = pynwb.NWBHDF5IO( + "/stelmo/nwb/raw/" + video_info["nwb_file_name"], "r" + ) nwb_file = io.read() nwb_video = nwb_file.objects[video_info["video_file_object_id"]] video_filename = nwb_video.external_file.value[0] nwb_base_filename = key["nwb_file_name"].replace(".nwb", "") output_video_filename = ( - f"{nwb_base_filename}_{epoch:02d}_" f'{key["position_info_param_name"]}.mp4' + f"{nwb_base_filename}_{epoch:02d}_" + f'{key["position_info_param_name"]}.mp4' ) centroids = { @@ -705,7 +750,9 @@ def make(self, key): head_position_mean = np.asarray( position_info_df[["head_position_x", "head_position_y"]] ) - head_orientation_mean = np.asarray(position_info_df[["head_orientation"]]) + head_orientation_mean = np.asarray( + position_info_df[["head_orientation"]] + ) video_time = np.asarray(nwb_video.timestamps) position_time = np.asarray(position_info_df.index) cm_per_pixel = nwb_video.device.meters_per_pixel * M_TO_CM @@ -731,6 +778,7 @@ def convert_to_pixels(data, frame_size, cm_to_pixels=1.0): data : ndarray, shape (n_time, 2) frame_size : array_like, shape (2,) cm_to_pixels : float + Returns ------- converted_data : ndarray, shape (n_time, 2) @@ -830,8 +878,14 @@ def make_video( ~np.isnan(head_orientation) ): arrow_tip = ( - int(head_position[0] + arrow_radius * np.cos(head_orientation)), - int(head_position[1] + arrow_radius * np.sin(head_orientation)), + int( + head_position[0] + + arrow_radius * np.cos(head_orientation) + ), + int( + head_position[1] + + arrow_radius * np.sin(head_orientation) + ), ) cv2.arrowedLine( img=frame, @@ -876,7 +930,7 @@ class SelectFromCollection: Note that this tool selects collection objects based on their *origins* (i.e., `offsets`). - Parameters + Attributes ---------- ax : `~matplotlib.axes.Axes` Axes to interact with. @@ -897,7 +951,12 @@ def __init__(self, ax, video_filename, alpha_other=0.3): color="white", linestyle="-", linewidth=2, alpha=0.5, zorder=10 ), markerprops=dict( - marker="o", markersize=7, mec="white", mfc="white", alpha=0.5, zorder=10 + marker="o", + markersize=7, + mec="white", + mfc="white", + alpha=0.5, + zorder=10, ), ) self.ind = [] @@ -958,11 +1017,15 @@ def get_graph(self): skeleton = skimage.morphology.skeletonize(mask) csr_graph, coordinates, _ = skeleton_to_csgraph(skeleton) nx_graph = nx.from_scipy_sparse_matrix(csr_graph) - node_positions = dict(zip(range(coordinates.shape[0]), coordinates[:, ::-1])) + node_positions = dict( + zip(range(coordinates.shape[0]), coordinates[:, ::-1]) + ) _clean_positions_dict(node_positions, nx_graph) node_ind = np.asarray(list(node_positions.keys())) temp_node_positions = np.full((node_ind.max() + 1, 2), np.nan) - temp_node_positions[node_ind] = np.asarray(list(node_positions.values())) + temp_node_positions[node_ind] = np.asarray( + list(node_positions.values()) + ) node_positions = temp_node_positions edges = list(nx_graph.edges) diff --git a/src/spyglass/common/common_region.py b/src/spyglass/common/common_region.py index 4326d16f3..b21e99cfa 100644 --- a/src/spyglass/common/common_region.py +++ b/src/spyglass/common/common_region.py @@ -17,7 +17,9 @@ class BrainRegion(dj.Lookup): # subregion_name='' and subsubregion_name='' will be necessary but that seems OK @classmethod - def fetch_add(cls, region_name, subregion_name=None, subsubregion_name=None): + def fetch_add( + cls, region_name, subregion_name=None, subsubregion_name=None + ): """Return the region ID for the given names, and if no match exists, first add it to the BrainRegion table. The combination of (region_name, subregion_name, subsubregion_name) is effectively unique, then. diff --git a/src/spyglass/common/common_ripple.py b/src/spyglass/common/common_ripple.py index dce0c0479..431d374d8 100644 --- a/src/spyglass/common/common_ripple.py +++ b/src/spyglass/common/common_ripple.py @@ -20,9 +20,13 @@ } -def interpolate_to_new_time(df, new_time, upsampling_interpolation_method="linear"): +def interpolate_to_new_time( + df, new_time, upsampling_interpolation_method="linear" +): old_time = df.index - new_index = pd.Index(np.unique(np.concatenate((old_time, new_time))), name="time") + new_index = pd.Index( + np.unique(np.concatenate((old_time, new_time))), name="time" + ) return ( df.reindex(index=new_index) .interpolate(method=upsampling_interpolation_method) @@ -160,7 +164,9 @@ def make(self, key): # Insert into analysis nwb file nwb_analysis_file = AnalysisNwbfile() - key["analysis_file_name"] = nwb_analysis_file.create(key["nwb_file_name"]) + key["analysis_file_name"] = nwb_analysis_file.create( + key["nwb_file_name"] + ) key["ripple_times_object_id"] = nwb_analysis_file.add_nwb_object( analysis_file_name=key["analysis_file_name"], nwb_object=ripple_times, @@ -203,7 +209,9 @@ def get_ripple_lfps_and_position_info(key): lfp_key = key.copy() del lfp_key["interval_list_name"] ripple_lfp_nwb = (LFPBand & lfp_key).fetch_nwb()[0] - ripple_lfp_electrodes = ripple_lfp_nwb["filtered_data"].electrodes.data[:] + ripple_lfp_electrodes = ripple_lfp_nwb["filtered_data"].electrodes.data[ + : + ] elec_mask = np.full_like(ripple_lfp_electrodes, 0, dtype=bool) elec_mask[ [ @@ -214,7 +222,9 @@ def get_ripple_lfps_and_position_info(key): ] = True ripple_lfp = pd.DataFrame( ripple_lfp_nwb["filtered_data"].data, - index=pd.Index(ripple_lfp_nwb["filtered_data"].timestamps, name="time"), + index=pd.Index( + ripple_lfp_nwb["filtered_data"].timestamps, name="time" + ), ) sampling_frequency = ripple_lfp_nwb["lfp_band_sampling_rate"] @@ -222,7 +232,10 @@ def get_ripple_lfps_and_position_info(key): position_valid_times = ( IntervalList - & {"nwb_file_name": nwb_file_name, "interval_list_name": interval_list_name} + & { + "nwb_file_name": nwb_file_name, + "interval_list_name": interval_list_name, + } ).fetch1("valid_times") position_info = ( @@ -271,7 +284,9 @@ def get_Kay_ripple_consensus_trace( ) ripple_consensus_trace = np.sum(ripple_consensus_trace**2, axis=1) ripple_consensus_trace[not_null] = gaussian_smooth( - ripple_consensus_trace[not_null], smoothing_sigma, sampling_frequency + ripple_consensus_trace[not_null], + smoothing_sigma, + sampling_frequency, ) return pd.DataFrame( np.sqrt(ripple_consensus_trace), index=ripple_filtered_lfps.index @@ -305,7 +320,9 @@ def plot_ripple_consensus_trace( color="lightgrey", ) ax.set_xlabel("Time [s]") - ax.set_xlim((time_slice.start - start_offset, time_slice.stop - start_offset)) + ax.set_xlim( + (time_slice.start - start_offset, time_slice.stop - start_offset) + ) @staticmethod def plot_ripple( @@ -337,6 +354,8 @@ def plot_ripple( color="lightgrey", ) ax.set_ylim((-1, n_lfps)) - ax.set_xlim((time_slice.start - start_offset, time_slice.stop - start_offset)) + ax.set_xlim( + (time_slice.start - start_offset, time_slice.stop - start_offset) + ) ax.set_ylabel("LFPs") ax.set_xlabel("Time [s]") diff --git a/src/spyglass/common/common_sensors.py b/src/spyglass/common/common_sensors.py index 7ca2993d5..2507abb82 100644 --- a/src/spyglass/common/common_sensors.py +++ b/src/spyglass/common/common_sensors.py @@ -27,16 +27,18 @@ def make(self, key): nwb_file_abspath = Nwbfile().get_abs_path(nwb_file_name) nwbf = get_nwb_file(nwb_file_abspath) - sensor = get_data_interface(nwbf, "analog", pynwb.behavior.BehavioralEvents) + sensor = get_data_interface( + nwbf, "analog", pynwb.behavior.BehavioralEvents + ) if sensor is None: print(f"No conforming sensor data found in {nwb_file_name}\n") return key["sensor_data_object_id"] = sensor.time_series["analog"].object_id # the valid times for these data are the same as the valid times for the raw ephys data - key["interval_list_name"] = (Raw & {"nwb_file_name": nwb_file_name}).fetch1( - "interval_list_name" - ) + key["interval_list_name"] = ( + Raw & {"nwb_file_name": nwb_file_name} + ).fetch1("interval_list_name") self.insert1(key) def fetch_nwb(self, *attrs, **kwargs): diff --git a/src/spyglass/common/common_session.py b/src/spyglass/common/common_session.py index 031103630..4054de52e 100644 --- a/src/spyglass/common/common_session.py +++ b/src/spyglass/common/common_session.py @@ -128,7 +128,9 @@ def make(self, key): def _add_data_acquisition_device_part(self, nwb_file_name, nwbf, config): # get device names from both the NWB file and the associated config file - device_names, _, _ = DataAcquisitionDevice.get_all_device_names(nwbf, config) + device_names, _, _ = DataAcquisitionDevice.get_all_device_names( + nwbf, config + ) for device_name in device_names: # ensure that the foreign key exists and do nothing if not @@ -202,10 +204,16 @@ def update_session_group_description( @staticmethod def add_session_to_group( - nwb_file_name: str, session_group_name: str, *, skip_duplicates: bool = False + nwb_file_name: str, + session_group_name: str, + *, + skip_duplicates: bool = False, ): SessionGroupSession.insert1( - {"session_group_name": session_group_name, "nwb_file_name": nwb_file_name}, + { + "session_group_name": session_group_name, + "nwb_file_name": nwb_file_name, + }, skip_duplicates=skip_duplicates, ) @@ -227,7 +235,9 @@ def get_group_sessions(session_group_name: str): results = ( SessionGroupSession & {"session_group_name": session_group_name} ).fetch(as_dict=True) - return [{"nwb_file_name": result["nwb_file_name"]} for result in results] + return [ + {"nwb_file_name": result["nwb_file_name"]} for result in results + ] @staticmethod def create_spyglass_view(session_group_name: str): diff --git a/src/spyglass/common/populate_all_common.py b/src/spyglass/common/populate_all_common.py index 5c20ce1b3..da563ab23 100644 --- a/src/spyglass/common/populate_all_common.py +++ b/src/spyglass/common/populate_all_common.py @@ -1,4 +1,9 @@ -from .common_behav import PositionSource, RawPosition, StateScriptFile, VideoFile +from .common_behav import ( + PositionSource, + RawPosition, + StateScriptFile, + VideoFile, +) from .common_dio import DIOEvents from .common_ephys import Electrode, ElectrodeGroup, Raw, SampleCount from .common_nwbfile import Nwbfile diff --git a/src/spyglass/common/prepopulate/prepopulate.py b/src/spyglass/common/prepopulate/prepopulate.py index 3f168e525..62b292872 100644 --- a/src/spyglass/common/prepopulate/prepopulate.py +++ b/src/spyglass/common/prepopulate/prepopulate.py @@ -47,7 +47,9 @@ def populate_from_yaml(yaml_path: str): continue primary_key_values = { - k: v for k, v in entry_dict.items() if k in table_cls.primary_key + k: v + for k, v in entry_dict.items() + if k in table_cls.primary_key } if not primary_key_values: print( @@ -72,7 +74,9 @@ def _get_table_cls(table_name): if "." in table_name: # part table master_table_name = table_name[0 : table_name.index(".")] part_table_name = table_name[table_name.index(".") + 1 :] - master_table_cls = getattr(sys.modules["spyglass.common"], master_table_name) + master_table_cls = getattr( + sys.modules["spyglass.common"], master_table_name + ) part_table_cls = getattr(master_table_cls, part_table_name) return part_table_cls else: diff --git a/src/spyglass/data_import/storage_dirs.py b/src/spyglass/data_import/storage_dirs.py index bd7e8df3a..0a1d4da9c 100644 --- a/src/spyglass/data_import/storage_dirs.py +++ b/src/spyglass/data_import/storage_dirs.py @@ -11,8 +11,10 @@ def check_env(): def base_dir(): """Get the base directory from $SPYGLASS_BASE_DIR - Returns: - str: The base directory + Returns + ------- + str + base directory """ p = os.getenv("SPYGLASS_BASE_DIR", None) assert ( diff --git a/src/spyglass/decoding/clusterless.py b/src/spyglass/decoding/clusterless.py index d5a2b858c..32f21bd1d 100644 --- a/src/spyglass/decoding/clusterless.py +++ b/src/spyglass/decoding/clusterless.py @@ -26,8 +26,12 @@ _DEFAULT_CONTINUOUS_TRANSITIONS, _DEFAULT_ENVIRONMENT, ) -from replay_trajectory_classification.discrete_state_transitions import DiagonalDiscrete -from replay_trajectory_classification.initial_conditions import UniformInitialConditions +from replay_trajectory_classification.discrete_state_transitions import ( + DiagonalDiscrete, +) +from replay_trajectory_classification.initial_conditions import ( + UniformInitialConditions, +) from ripple_detection import ( get_multiunit_population_firing_rate, @@ -130,7 +134,9 @@ def make(self, key): # check that the mark type is supported if not MarkParameters().supported_mark_type(mark_param["mark_type"]): - Warning(f'Mark type {mark_param["mark_type"]} not supported; skipping') + Warning( + f'Mark type {mark_param["mark_type"]} not supported; skipping' + ) return # retrieve the units from the NWB file @@ -145,7 +151,8 @@ def make(self, key): f'{key["curation_id"]}_clusterless_waveforms' ) waveform_extractor_path = str( - Path(os.environ["SPYGLASS_WAVEFORMS_DIR"]) / Path(waveform_extractor_name) + Path(os.environ["SPYGLASS_WAVEFORMS_DIR"]) + / Path(waveform_extractor_name) ) if os.path.exists(waveform_extractor_path): shutil.rmtree(waveform_extractor_path) @@ -199,7 +206,9 @@ def make(self, key): ) # create a new AnalysisNwbfile and a timeseries for the marks and save - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) nwb_object = pynwb.TimeSeries( name="marks", data=marks, @@ -236,7 +245,9 @@ def _convert_to_dataframe(nwb_data): ) @staticmethod - def _get_peak_amplitude(waveform, peak_sign="neg", estimate_peak_time=False): + def _get_peak_amplitude( + waveform, peak_sign="neg", estimate_peak_time=False + ): """Returns the amplitudes of all channels at the time of the peak amplitude across channels. @@ -291,7 +302,9 @@ def _threshold(timestamps, marks, mark_param_dict): elif mark_param_dict["peak_sign"] == "pos": include = np.max(marks, axis=1) >= mark_param_dict["threshold"] elif mark_param_dict["peak_sign"] == "both": - include = np.max(np.abs(marks), axis=1) >= mark_param_dict["threshold"] + include = ( + np.max(np.abs(marks), axis=1) >= mark_param_dict["threshold"] + ) return timestamps[include], marks[include] @@ -327,7 +340,9 @@ def make(self, key): # TODO: intersection of sort interval and interval list interval_times = (IntervalList & key).fetch1("valid_times") - sampling_rate = (UnitMarksIndicatorSelection & key).fetch("sampling_rate") + sampling_rate = (UnitMarksIndicatorSelection & key).fetch( + "sampling_rate" + ) marks_df = (UnitMarks & key).fetch1_dataframe() @@ -344,7 +359,9 @@ def make(self, key): # Insert into analysis nwb file nwb_analysis_file = AnalysisNwbfile() - key["analysis_file_name"] = nwb_analysis_file.create(key["nwb_file_name"]) + key["analysis_file_name"] = nwb_analysis_file.create( + key["nwb_file_name"] + ) key["marks_indicator_object_id"] = nwb_analysis_file.add_nwb_object( analysis_file_name=key["analysis_file_name"], @@ -395,11 +412,15 @@ def plot_all_marks(marks_indicators: xr.DataArray, plot_size=5, s=10): for ax_ind2, feature2 in enumerate(marks.marks): try: axes[ax_ind1, ax_ind2].scatter( - marks.sel(marks=feature1), marks.sel(marks=feature2), s=s + marks.sel(marks=feature1), + marks.sel(marks=feature2), + s=s, ) except TypeError: axes.scatter( - marks.sel(marks=feature1), marks.sel(marks=feature2), s=s + marks.sel(marks=feature1), + marks.sel(marks=feature2), + s=s, ) def fetch_nwb(self, *attrs, **kwargs): @@ -411,7 +432,10 @@ def fetch1_dataframe(self): return self.fetch_dataframe()[0] def fetch_dataframe(self): - return [data["marks_indicator"].set_index("time") for data in self.fetch_nwb()] + return [ + data["marks_indicator"].set_index("time") + for data in self.fetch_nwb() + ] def fetch_xarray(self): # sort_group_electrodes = ( @@ -422,7 +446,10 @@ def fetch_xarray(self): marks_indicators = ( xr.concat( - [df.to_xarray().to_array("marks") for df in self.fetch_dataframe()], + [ + df.to_xarray().to_array("marks") + for df in self.fetch_dataframe() + ], dim="electrodes", ) .transpose("time", "marks", "electrodes") @@ -435,7 +462,9 @@ def reformat_name(name): mark_type, number = name.split("_") return f"{mark_type}_{int(number):04d}" - new_mark_names = [reformat_name(name) for name in marks_indicators.marks.values] + new_mark_names = [ + reformat_name(name) for name in marks_indicators.marks.values + ] return marks_indicators.assign_coords({"marks": new_mark_names}).sortby( ["electrodes", "marks"] @@ -553,7 +582,9 @@ class MultiunitFiringRate(dj.Computed): def make(self, key): marks = (UnitMarksIndicator & key).fetch_xarray() - multiunit_spikes = (np.any(~np.isnan(marks.values), axis=1)).astype(float) + multiunit_spikes = (np.any(~np.isnan(marks.values), axis=1)).astype( + float + ) multiunit_firing_rate = pd.DataFrame( get_multiunit_population_firing_rate( multiunit_spikes, key["sampling_rate"] @@ -564,9 +595,13 @@ def make(self, key): # Insert into analysis nwb file nwb_analysis_file = AnalysisNwbfile() - key["analysis_file_name"] = nwb_analysis_file.create(key["nwb_file_name"]) + key["analysis_file_name"] = nwb_analysis_file.create( + key["nwb_file_name"] + ) - key["multiunit_firing_rate_object_id"] = nwb_analysis_file.add_nwb_object( + key[ + "multiunit_firing_rate_object_id" + ] = nwb_analysis_file.add_nwb_object( analysis_file_name=key["analysis_file_name"], nwb_object=multiunit_firing_rate.reset_index(), ) @@ -588,7 +623,8 @@ def fetch1_dataframe(self): def fetch_dataframe(self): return [ - data["multiunit_firing_rate"].set_index("time") for data in self.fetch_nwb() + data["multiunit_firing_rate"].set_index("time") + for data in self.fetch_nwb() ] @@ -631,7 +667,9 @@ class MultiunitHighSynchronyEvents(dj.Computed): def make(self, key): marks = (UnitMarksIndicator & key).fetch_xarray() - multiunit_spikes = (np.any(~np.isnan(marks.values), axis=1)).astype(float) + multiunit_spikes = (np.any(~np.isnan(marks.values), axis=1)).astype( + float + ) position_info = (IntervalPositionInfo() & key).fetch1_dataframe() params = (MultiunitHighSynchronyEventsParameters & key).fetch1() @@ -646,7 +684,9 @@ def make(self, key): # Insert into analysis nwb file nwb_analysis_file = AnalysisNwbfile() - key["analysis_file_name"] = nwb_analysis_file.create(key["nwb_file_name"]) + key["analysis_file_name"] = nwb_analysis_file.create( + key["nwb_file_name"] + ) key["multiunit_hse_times_object_id"] = nwb_analysis_file.add_nwb_object( analysis_file_name=key["analysis_file_name"], @@ -684,13 +724,17 @@ def get_decoding_data_for_epoch( """ - valid_ephys_position_times_by_epoch = get_valid_ephys_position_times_by_epoch( - nwb_file_name + valid_ephys_position_times_by_epoch = ( + get_valid_ephys_position_times_by_epoch(nwb_file_name) ) - valid_ephys_position_times = valid_ephys_position_times_by_epoch[interval_list_name] - valid_slices = convert_valid_times_to_slice(valid_ephys_position_times) - position_interval_name = convert_epoch_interval_name_to_position_interval_name( + valid_ephys_position_times = valid_ephys_position_times_by_epoch[ interval_list_name + ] + valid_slices = convert_valid_times_to_slice(valid_ephys_position_times) + position_interval_name = ( + convert_epoch_interval_name_to_position_interval_name( + interval_list_name + ) ) position_info = ( @@ -702,7 +746,9 @@ def get_decoding_data_for_epoch( } ).fetch1_dataframe() - position_info = pd.concat([position_info.loc[times] for times in valid_slices]) + position_info = pd.concat( + [position_info.loc[times] for times in valid_slices] + ) marks = ( ( @@ -715,7 +761,9 @@ def get_decoding_data_for_epoch( ) ).fetch_xarray() - marks = xr.concat([marks.sel(time=times) for times in valid_slices], dim="time") + marks = xr.concat( + [marks.sel(time=times) for times in valid_slices], dim="time" + ) return position_info, marks, valid_slices @@ -763,7 +811,8 @@ def get_data_for_multiple_epochs( position_info = pd.concat(position_info, axis=0) marks = xr.concat(marks, dim="time") valid_slices = { - epoch: valid_slice for epoch, valid_slice in zip(epoch_names, valid_slices) + epoch: valid_slice + for epoch, valid_slice in zip(epoch_names, valid_slices) } assert position_info.shape[0] == marks.shape[0] @@ -839,5 +888,7 @@ def populate_mark_indicators( .loc[:, marks_selection.primary_key] .to_dict("records") ) - UnitMarksIndicatorSelection.insert(marks_selection, skip_duplicates=True) + UnitMarksIndicatorSelection.insert( + marks_selection, skip_duplicates=True + ) UnitMarksIndicator.populate(marks_selection) diff --git a/src/spyglass/decoding/core.py b/src/spyglass/decoding/core.py index e19da2cc9..39ceea62f 100644 --- a/src/spyglass/decoding/core.py +++ b/src/spyglass/decoding/core.py @@ -1,7 +1,10 @@ import numpy as np import pandas as pd from spyglass.common.common_behav import RawPosition -from spyglass.common.common_interval import IntervalList, interval_list_intersect +from spyglass.common.common_interval import ( + IntervalList, + interval_list_intersect, +) from replay_trajectory_classification.observation_model import ObservationModel from replay_trajectory_classification.continuous_state_transitions import ( RandomWalk, @@ -28,7 +31,10 @@ def get_valid_ephys_position_times_from_interval( """ interval_valid_times = ( IntervalList - & {"nwb_file_name": nwb_file_name, "interval_list_name": interval_list_name} + & { + "nwb_file_name": nwb_file_name, + "interval_list_name": interval_list_name, + } ).fetch1("valid_times") position_interval_names = ( @@ -39,20 +45,29 @@ def get_valid_ephys_position_times_from_interval( ).fetch("interval_list_name") position_interval_names = position_interval_names[ np.argsort( - [int(name.strip("pos valid time")) for name in position_interval_names] + [ + int(name.strip("pos valid time")) + for name in position_interval_names + ] ) ] valid_pos_times = [ ( IntervalList - & {"nwb_file_name": nwb_file_name, "interval_list_name": pos_interval_name} + & { + "nwb_file_name": nwb_file_name, + "interval_list_name": pos_interval_name, + } ).fetch1("valid_times") for pos_interval_name in position_interval_names ] valid_ephys_times = ( IntervalList - & {"nwb_file_name": nwb_file_name, "interval_list_name": "raw data valid times"} + & { + "nwb_file_name": nwb_file_name, + "interval_list_name": "raw data valid times", + } ).fetch1("valid_times") return interval_list_intersect( @@ -73,7 +88,9 @@ def get_epoch_interval_names(nwb_file_name: str) -> list[str]: epoch_names : list[str] List of interval names that are epochs. """ - interval_list = pd.DataFrame(IntervalList() & {"nwb_file_name": nwb_file_name}) + interval_list = pd.DataFrame( + IntervalList() & {"nwb_file_name": nwb_file_name} + ) interval_list = interval_list.loc[ interval_list.interval_list_name.str.contains( @@ -100,7 +117,9 @@ def get_valid_ephys_position_times_by_epoch( """ return { - epoch: get_valid_ephys_position_times_from_interval(epoch, nwb_file_name) + epoch: get_valid_ephys_position_times_from_interval( + epoch, nwb_file_name + ) for epoch in get_epoch_interval_names(nwb_file_name) } diff --git a/src/spyglass/decoding/dj_decoder_conversion.py b/src/spyglass/decoding/dj_decoder_conversion.py index 411e4e25a..804ca0ef7 100644 --- a/src/spyglass/decoding/dj_decoder_conversion.py +++ b/src/spyglass/decoding/dj_decoder_conversion.py @@ -53,7 +53,9 @@ def _convert_env_dict(env_params: dict) -> Environment: environment : Environment """ if env_params["track_graph"] is not None: - env_params["track_graph"] = make_track_graph(**env_params["track_graph"]) + env_params["track_graph"] = make_track_graph( + **env_params["track_graph"] + ) return Environment(**env_params) @@ -66,7 +68,9 @@ def _to_dict(transition: object) -> dict: return parameters -def _convert_transitions_to_dict(transitions: list[list[object]]) -> list[list[dict]]: +def _convert_transitions_to_dict( + transitions: list[list[object]], +) -> list[list[dict]]: """Converts a list of lists of transition classes into a list of lists of dictionaries""" return [ [_to_dict(transition) for transition in transition_rows] @@ -97,19 +101,27 @@ def restore_classes(params: dict) -> dict: } params["classifier_params"]["continuous_transition_types"] = [ - [_convert_dict_to_class(st, continuous_state_transition_types) for st in sts] + [ + _convert_dict_to_class(st, continuous_state_transition_types) + for st in sts + ] for sts in params["classifier_params"]["continuous_transition_types"] ] params["classifier_params"]["environments"] = [ _convert_env_dict(env_params) for env_params in params["classifier_params"]["environments"] ] - params["classifier_params"]["discrete_transition_type"] = _convert_dict_to_class( + params["classifier_params"][ + "discrete_transition_type" + ] = _convert_dict_to_class( params["classifier_params"]["discrete_transition_type"], discrete_state_transition_types, ) - params["classifier_params"]["initial_conditions_type"] = _convert_dict_to_class( - params["classifier_params"]["initial_conditions_type"], initial_conditions_types + params["classifier_params"][ + "initial_conditions_type" + ] = _convert_dict_to_class( + params["classifier_params"]["initial_conditions_type"], + initial_conditions_types, ) if params["classifier_params"]["observation_models"] is not None: @@ -137,7 +149,9 @@ def _convert_environment_to_dict(env: Environment) -> dict: if env.track_graph is not None: track_graph = env.track_graph env.track_graph = { - "node_positions": [v["pos"] for v in dict(track_graph.nodes).values()], + "node_positions": [ + v["pos"] for v in dict(track_graph.nodes).values() + ], "edges": list(track_graph.edges), } @@ -153,7 +167,9 @@ def convert_classes_to_dict(key: dict) -> dict: ] except TypeError: key["classifier_params"]["environments"] = [ - _convert_environment_to_dict(key["classifier_params"]["environments"]) + _convert_environment_to_dict( + key["classifier_params"]["environments"] + ) ] key["classifier_params"][ "continuous_transition_types" diff --git a/src/spyglass/decoding/sorted_spikes.py b/src/spyglass/decoding/sorted_spikes.py index bc368fcd0..e418a2330 100644 --- a/src/spyglass/decoding/sorted_spikes.py +++ b/src/spyglass/decoding/sorted_spikes.py @@ -18,8 +18,12 @@ _DEFAULT_ENVIRONMENT, _DEFAULT_SORTED_SPIKES_MODEL_KWARGS, ) -from replay_trajectory_classification.discrete_state_transitions import DiagonalDiscrete -from replay_trajectory_classification.initial_conditions import UniformInitialConditions +from replay_trajectory_classification.discrete_state_transitions import ( + DiagonalDiscrete, +) +from replay_trajectory_classification.initial_conditions import ( + UniformInitialConditions, +) from spyglass.common.common_interval import IntervalList from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.common.common_position import IntervalPositionInfo @@ -71,7 +75,9 @@ def make(self, key): # TODO: intersection of sort interval and interval list interval_times = (IntervalList & key).fetch1("valid_times") - sampling_rate = (SortedSpikesIndicatorSelection & key).fetch("sampling_rate") + sampling_rate = (SortedSpikesIndicatorSelection & key).fetch( + "sampling_rate" + ) time = self.get_time_bins_from_interval(interval_times, sampling_rate) @@ -79,7 +85,8 @@ def make(self, key): # restrict to cases with units spikes_nwb = [entry for entry in spikes_nwb if "units" in entry] spike_times_list = [ - np.asarray(n_trode["units"]["spike_times"]) for n_trode in spikes_nwb + np.asarray(n_trode["units"]["spike_times"]) + for n_trode in spikes_nwb ] if len(spike_times_list) > 0: # if units spikes = np.concatenate(spike_times_list) @@ -92,7 +99,8 @@ def make(self, key): ] spike_indicator.append( np.bincount( - np.digitize(spike_times, time[1:-1]), minlength=time.shape[0] + np.digitize(spike_times, time[1:-1]), + minlength=time.shape[0], ) ) @@ -113,7 +121,9 @@ def make(self, key): # Insert into analysis nwb file nwb_analysis_file = AnalysisNwbfile() - key["analysis_file_name"] = nwb_analysis_file.create(key["nwb_file_name"]) + key["analysis_file_name"] = nwb_analysis_file.create( + key["nwb_file_name"] + ) key["spike_indicator_object_id"] = nwb_analysis_file.add_nwb_object( analysis_file_name=key["analysis_file_name"], @@ -145,7 +155,10 @@ def fetch1_dataframe(self): def fetch_dataframe(self): return pd.concat( - [data["spike_indicator"].set_index("time") for data in self.fetch_nwb()], + [ + data["spike_indicator"].set_index("time") + for data in self.fetch_nwb() + ], axis=1, ) @@ -271,13 +284,17 @@ def get_spike_indicator( for n_trode in spikes_nwb_table.fetch_nwb(): try: - for unit_id, unit_spike_times in n_trode["units"]["spike_times"].items(): + for unit_id, unit_spike_times in n_trode["units"][ + "spike_times" + ].items(): unit_spike_times = unit_spike_times[ - (unit_spike_times > time[0]) & (unit_spike_times <= time[-1]) + (unit_spike_times > time[0]) + & (unit_spike_times <= time[-1]) ] unit_name = f'{n_trode["sort_group_id"]:04d}_{unit_id:04d}' spike_indicator[unit_name] = np.bincount( - np.digitize(unit_spike_times, time[1:-1]), minlength=time.shape[0] + np.digitize(unit_spike_times, time[1:-1]), + minlength=time.shape[0], ) except KeyError: pass @@ -311,26 +328,34 @@ def get_decoding_data_for_epoch( """ # valid slices - valid_ephys_position_times_by_epoch = get_valid_ephys_position_times_by_epoch( - nwb_file_name + valid_ephys_position_times_by_epoch = ( + get_valid_ephys_position_times_by_epoch(nwb_file_name) ) - valid_ephys_position_times = valid_ephys_position_times_by_epoch[interval_list_name] + valid_ephys_position_times = valid_ephys_position_times_by_epoch[ + interval_list_name + ] valid_slices = convert_valid_times_to_slice(valid_ephys_position_times) # position interval - position_interval_name = convert_epoch_interval_name_to_position_interval_name( - interval_list_name + position_interval_name = ( + convert_epoch_interval_name_to_position_interval_name( + interval_list_name + ) ) # spikes - valid_times = np.asarray([(times.start, times.stop) for times in valid_slices]) + valid_times = np.asarray( + [(times.start, times.stop) for times in valid_slices] + ) curated_spikes_key = { "nwb_file_name": nwb_file_name, **additional_spike_keys, } spikes = get_spike_indicator( - curated_spikes_key, (valid_times.min(), valid_times.max()), sampling_rate=500 + curated_spikes_key, + (valid_times.min(), valid_times.max()), + sampling_rate=500, ) spikes = pd.concat([spikes.loc[times] for times in valid_slices]) @@ -402,11 +427,20 @@ def get_data_for_multiple_epochs( position_info = pd.concat(position_info, axis=0) spikes = pd.concat(spikes, axis=0) valid_slices = { - epoch: valid_slice for epoch, valid_slice in zip(epoch_names, valid_slices) + epoch: valid_slice + for epoch, valid_slice in zip(epoch_names, valid_slices) } assert position_info.shape[0] == spikes.shape[0] - sort_group_ids = np.asarray([int(col.split("_")[0]) for col in spikes.columns]) + sort_group_ids = np.asarray( + [int(col.split("_")[0]) for col in spikes.columns] + ) - return position_info, spikes, valid_slices, environment_labels, sort_group_ids + return ( + position_info, + spikes, + valid_slices, + environment_labels, + sort_group_ids, + ) diff --git a/src/spyglass/decoding/visualization.py b/src/spyglass/decoding/visualization.py index 927c5c77b..2afb065a9 100644 --- a/src/spyglass/decoding/visualization.py +++ b/src/spyglass/decoding/visualization.py @@ -32,7 +32,9 @@ def make_single_environment_movie( else: multiunit_spikes = np.asarray(marks, dtype=float) multiunit_firing_rate = pd.DataFrame( - get_multiunit_population_firing_rate(multiunit_spikes, sampling_frequency), + get_multiunit_population_firing_rate( + multiunit_spikes, sampling_frequency + ), index=position_info.index, columns=["firing_rate"], ) @@ -65,7 +67,9 @@ def make_single_environment_movie( window_ind = np.arange(window_size) - window_size // 2 rate = multiunit_firing_rate.iloc[ - slice(time_slice.start + window_ind[0], time_slice.stop + window_ind[-1]) + slice( + time_slice.start + window_ind[0], time_slice.stop + window_ind[-1] + ) ] with plt.style.context("dark_background"): @@ -150,7 +154,10 @@ def make_single_environment_movie( ) axes[1].set_ylim((0.0, np.asarray(rate.max()))) axes[1].set_xlim( - (window_ind[0] / sampling_frequency, window_ind[-1] / sampling_frequency) + ( + window_ind[0] / sampling_frequency, + window_ind[-1] / sampling_frequency, + ) ) axes[1].set_xlabel("Time [s]") axes[1].set_ylabel("Multiunit\n[spikes/s]") @@ -181,16 +188,24 @@ def _update_plot(time_ind): ) map_dot.set_offsets(map_position[time_ind]) - map_line.set_data(map_position[time_slice, 0], map_position[time_slice, 1]) + map_line.set_data( + map_position[time_slice, 0], map_position[time_slice, 1] + ) - mesh.set_array(posterior.isel(time=time_ind).values.ravel(order="F")) + mesh.set_array( + posterior.isel(time=time_ind).values.ravel(order="F") + ) - title.set_text(f"time = {posterior.isel(time=time_ind).time.values:0.2f}") + title.set_text( + f"time = {posterior.isel(time=time_ind).time.values:0.2f}" + ) try: multiunit_firing_line.set_data( window_ind / sampling_frequency, - np.asarray(rate.iloc[time_ind + (window_size // 2) + window_ind]), + np.asarray( + rate.iloc[time_ind + (window_size // 2) + window_ind] + ), ) except IndexError: pass @@ -216,7 +231,9 @@ def _update_plot(time_ind): return fig, movie -def setup_subplots(classifier, window_ind=None, rate=None, sampling_frequency=None): +def setup_subplots( + classifier, window_ind=None, rate=None, sampling_frequency=None +): env_names = [env.environment_name for env in classifier.environments] mosaic = [] @@ -263,7 +280,10 @@ def setup_subplots(classifier, window_ind=None, rate=None, sampling_frequency=No if window_ind is not None and sampling_frequency is not None: ax.set_xlim( - (window_ind[0] / sampling_frequency, window_ind[-1] / sampling_frequency) + ( + window_ind[0] / sampling_frequency, + window_ind[-1] / sampling_frequency, + ) ) sns.despine(ax=ax) @@ -290,7 +310,9 @@ def make_multi_environment_movie( writer = Writer(fps=fps, bitrate=-1) # Set up neural data - probability = results.isel(time=time_slice).acausal_posterior.sum("position") + probability = results.isel(time=time_slice).acausal_posterior.sum( + "position" + ) most_prob_env = probability.idxmax("state") env_names = [env.environment_name for env in classifier.environments] @@ -316,7 +338,9 @@ def make_multi_environment_movie( multiunit_spikes = np.asarray(marks, dtype=float) multiunit_firing_rate = pd.DataFrame( - get_multiunit_population_firing_rate(multiunit_spikes, sampling_frequency), + get_multiunit_population_firing_rate( + multiunit_spikes, sampling_frequency + ), index=position_info.index, columns=["firing_rate"], ) @@ -325,7 +349,9 @@ def make_multi_environment_movie( window_ind = np.arange(window_size) - window_size // 2 rate = multiunit_firing_rate.iloc[ - slice(time_slice.start + window_ind[0], time_slice.stop + window_ind[-1]) + slice( + time_slice.start + window_ind[0], time_slice.stop + window_ind[-1] + ) ] # Set up behavioral data @@ -399,10 +425,14 @@ def _update_plot(time_ind): for env_name, mesh in meshes.items(): posterior = ( - env_posteriors[env_name].isel(time=time_ind).values.ravel(order="F") + env_posteriors[env_name] + .isel(time=time_ind) + .values.ravel(order="F") ) mesh.set_array(posterior) - prob = float(probability.isel(time=time_ind).sel(state=env_name)) + prob = float( + probability.isel(time=time_ind).sel(state=env_name) + ) titles[env_name].set_text( f"environment = {env_name}\nprob. = {prob:0.2f}" ) @@ -419,7 +449,9 @@ def _update_plot(time_ind): multiunit_firing_line.set_data( window_ind / sampling_frequency, - np.asarray(rate.iloc[time_ind + (window_size // 2) + window_ind]), + np.asarray( + rate.iloc[time_ind + (window_size // 2) + window_ind] + ), ) progress_bar.update() @@ -504,7 +536,9 @@ def create_interactive_1D_decoding_figurl( ) vertical_panel_content = [ vv.LayoutItem(decode_view, stretch=3, title="Decode"), - vv.LayoutItem(probability_view, stretch=1, title="Probability of State"), + vv.LayoutItem( + probability_view, stretch=1, title="Probability of State" + ), vv.LayoutItem(speed_view, stretch=1, title="Speed"), vv.LayoutItem(multiunit_firing_rate_view, stretch=1, title="Multiunit"), ] @@ -600,7 +634,9 @@ def create_interactive_2D_decoding_figurl( ] vertical_panel2_content = [ - vv.LayoutItem(probability_view, stretch=1, title="Probability of State"), + vv.LayoutItem( + probability_view, stretch=1, title="Probability of State" + ), vv.LayoutItem(speed_view, stretch=1, title="Speed"), vv.LayoutItem(multiunit_firing_rate_view, stretch=1, title="Multiunit"), ] diff --git a/src/spyglass/decoding/visualization_1D_view.py b/src/spyglass/decoding/visualization_1D_view.py index e0e7e7578..43981c9a4 100644 --- a/src/spyglass/decoding/visualization_1D_view.py +++ b/src/spyglass/decoding/visualization_1D_view.py @@ -61,7 +61,9 @@ def create_1D_decode_view( linear_position = np.asarray(linear_position).squeeze() trimmed_posterior = discretize_and_trim(posterior) - observations_per_time = get_observations_per_time(trimmed_posterior, posterior) + observations_per_time = get_observations_per_time( + trimmed_posterior, posterior + ) sampling_freq = get_sampling_freq(posterior.time) start_time_sec = posterior.time.values[0] if ref_time_sec is not None: diff --git a/src/spyglass/decoding/visualization_2D_view.py b/src/spyglass/decoding/visualization_2D_view.py index 8a2783ffc..4c9a6bbec 100644 --- a/src/spyglass/decoding/visualization_2D_view.py +++ b/src/spyglass/decoding/visualization_2D_view.py @@ -3,7 +3,10 @@ import numpy as np import sortingview.views.franklab as vvf import xarray as xr -from replay_trajectory_classification.environments import get_grid, get_track_interior +from replay_trajectory_classification.environments import ( + get_grid, + get_track_interior, +) def create_static_track_animation( @@ -56,10 +59,12 @@ def get_base_track_information(base_probabilities: xr.Dataset): x_min = np.min(base_probabilities.x_position).item() y_min = np.min(base_probabilities.y_position).item() x_width = round( - (np.max(base_probabilities.x_position).item() - x_min) / (x_count - 1), 6 + (np.max(base_probabilities.x_position).item() - x_min) / (x_count - 1), + 6, ) y_width = round( - (np.max(base_probabilities.y_position).item() - y_min) / (y_count - 1), 6 + (np.max(base_probabilities.y_position).item() - y_min) / (y_count - 1), + 6, ) return (x_count, x_min, x_width, y_count, y_min, y_width) @@ -77,7 +82,9 @@ def memo_linearize( (_, y, x) = t my_tuple = (x, y) if my_tuple not in location_lookup: - lin = x_count * round((y - y_min) / y_width) + round((x - x_min) / x_width) + lin = x_count * round((y - y_min) / y_width) + round( + (x - x_min) / x_width + ) location_lookup[my_tuple] = lin return location_lookup[my_tuple] @@ -123,7 +130,9 @@ def get_observations_per_frame(i_trim: xr.DataArray, base_slice: xr.DataArray): (times, time_counts_np) = np.unique(i_trim.time.values, return_counts=True) time_counts = xr.DataArray(time_counts_np, coords={"time": times}) raw_times = base_slice.time - (_, good_counts) = xr.align(raw_times, time_counts, join="left", fill_value=0) + (_, good_counts) = xr.align( + raw_times, time_counts, join="left", fill_value=0 + ) observations_per_frame = good_counts.values.astype(np.uint8) return observations_per_frame @@ -142,9 +151,14 @@ def process_decoded_data(posterior: xr.DataArray): frame_step_size = 100_000 location_lookup = {} - (x_count, x_min, x_width, y_count, y_min, y_width) = get_base_track_information( - posterior - ) + ( + x_count, + x_min, + x_width, + y_count, + y_min, + y_width, + ) = get_base_track_information(posterior) location_fn = generate_linearization_function( location_lookup, x_count, x_min, x_width, y_min, y_width ) diff --git a/src/spyglass/figurl_views/SpikeSortingRecordingView.py b/src/spyglass/figurl_views/SpikeSortingRecordingView.py index 8afe7c7d7..9b4b2a778 100644 --- a/src/spyglass/figurl_views/SpikeSortingRecordingView.py +++ b/src/spyglass/figurl_views/SpikeSortingRecordingView.py @@ -102,5 +102,7 @@ def create_mountain_layout( def _upload_data_and_return_sha1(data): data_uri = kc.store_json(data) data_hash = data_uri.split("/")[2] - kc.upload_file(data_uri, channel=os.environ["FIGURL_CHANNEL"], single_chunk=True) + kc.upload_file( + data_uri, channel=os.environ["FIGURL_CHANNEL"], single_chunk=True + ) return data_hash diff --git a/src/spyglass/figurl_views/SpikeSortingView.py b/src/spyglass/figurl_views/SpikeSortingView.py index 316036a90..7f460d767 100644 --- a/src/spyglass/figurl_views/SpikeSortingView.py +++ b/src/spyglass/figurl_views/SpikeSortingView.py @@ -1,7 +1,9 @@ import datajoint as dj import kachery_client as kc import spikeinterface as si -from sortingview.SpikeSortingView import SpikeSortingView as SortingViewSpikeSortingView +from sortingview.SpikeSortingView import ( + SpikeSortingView as SortingViewSpikeSortingView, +) from ..common.common_spikesorting import SpikeSorting, SpikeSortingRecording from .prepare_spikesortingview_data import prepare_spikesortingview_data @@ -22,7 +24,9 @@ def make(self, key): recording_record = ( SpikeSortingRecording & {"recording_id": key["recording_id"]} ).fetch1() - sorting_record = (SpikeSorting & {"sorting_id": key["sorting_id"]}).fetch1() + sorting_record = ( + SpikeSorting & {"sorting_id": key["sorting_id"]} + ).fetch1() recording_path = recording_record["recording_path"] sorting_path = sorting_record["sorting_path"] diff --git a/src/spyglass/figurl_views/prepare_spikesortingview_data.py b/src/spyglass/figurl_views/prepare_spikesortingview_data.py index 7e6e8c419..601c277fd 100644 --- a/src/spyglass/figurl_views/prepare_spikesortingview_data.py +++ b/src/spyglass/figurl_views/prepare_spikesortingview_data.py @@ -21,18 +21,25 @@ def prepare_spikesortingview_data( channel_ids = np.array(recording.get_channel_ids()).astype(np.int32) sampling_frequency = recording.get_sampling_frequency() num_frames = recording.get_num_frames() - num_frames_per_segment = math.ceil(segment_duration_sec * sampling_frequency) + num_frames_per_segment = math.ceil( + segment_duration_sec * sampling_frequency + ) num_segments = math.ceil(num_frames / num_frames_per_segment) with h5py.File(output_file_name, "w") as f: f.create_dataset("unit_ids", data=unit_ids) f.create_dataset( - "sampling_frequency", data=np.array([sampling_frequency]).astype(np.float32) + "sampling_frequency", + data=np.array([sampling_frequency]).astype(np.float32), ) f.create_dataset("channel_ids", data=channel_ids) - f.create_dataset("num_frames", data=np.array([num_frames]).astype(np.int32)) + f.create_dataset( + "num_frames", data=np.array([num_frames]).astype(np.int32) + ) channel_locations = recording.get_channel_locations() f.create_dataset("channel_locations", data=np.array(channel_locations)) - f.create_dataset("num_segments", data=np.array([num_segments]).astype(np.int32)) + f.create_dataset( + "num_segments", data=np.array([num_segments]).astype(np.int32) + ) f.create_dataset( "num_frames_per_segment", data=np.array([num_frames_per_segment]).astype(np.int32), @@ -69,12 +76,15 @@ def prepare_spikesortingview_data( start_frame_with_padding = max(start_frame - snippet_len[0], 0) end_frame_with_padding = min(end_frame + snippet_len[1], num_frames) traces_with_padding = recording.get_traces( - start_frame=start_frame_with_padding, end_frame=end_frame_with_padding + start_frame=start_frame_with_padding, + end_frame=end_frame_with_padding, ) for unit_id in unit_ids: if str(unit_id) not in unit_peak_channel_ids: spike_train = sorting.get_unit_spike_train( - unit_id=unit_id, start_frame=start_frame, end_frame=end_frame + unit_id=unit_id, + start_frame=start_frame, + end_frame=end_frame, ) if len(spike_train) > 0: values = traces_with_padding[ @@ -90,16 +100,22 @@ def prepare_spikesortingview_data( channel_neighborhood_size=channel_neighborhood_size, ) if len(spike_train) >= 10: - unit_peak_channel_ids[str(unit_id)] = peak_channel_id + unit_peak_channel_ids[ + str(unit_id) + ] = peak_channel_id else: fallback_unit_peak_channel_ids[ str(unit_id) ] = peak_channel_id - unit_channel_neighborhoods[str(unit_id)] = channel_neighborhood + unit_channel_neighborhoods[ + str(unit_id) + ] = channel_neighborhood for unit_id in unit_ids: peak_channel_id = unit_peak_channel_ids.get(str(unit_id), None) if peak_channel_id is None: - peak_channel_id = fallback_unit_peak_channel_ids.get(str(unit_id), None) + peak_channel_id = fallback_unit_peak_channel_ids.get( + str(unit_id), None + ) if peak_channel_id is None: raise Exception( f"Peak channel not found for unit {unit_id}. This is probably because no spikes were found in any segment for this unit." @@ -121,7 +137,8 @@ def prepare_spikesortingview_data( start_frame_with_padding = max(start_frame - snippet_len[0], 0) end_frame_with_padding = min(end_frame + snippet_len[1], num_frames) traces_with_padding = recording.get_traces( - start_frame=start_frame_with_padding, end_frame=end_frame_with_padding + start_frame=start_frame_with_padding, + end_frame=end_frame_with_padding, ) traces_sample = traces_with_padding[ start_frame @@ -130,7 +147,9 @@ def prepare_spikesortingview_data( + int(sampling_frequency * 1), :, ] - f.create_dataset(f"segment/{iseg}/traces_sample", data=traces_sample) + f.create_dataset( + f"segment/{iseg}/traces_sample", data=traces_sample + ) all_subsampled_spike_trains = [] for unit_id in unit_ids: peak_channel_id = unit_peak_channel_ids.get(str(unit_id), None) @@ -143,10 +162,13 @@ def prepare_spikesortingview_data( f"Peak channel not found for unit {unit_id}. This is probably because no spikes were found in any segment for this unit." ) spike_train = sorting.get_unit_spike_train( - unit_id=unit_id, start_frame=start_frame, end_frame=end_frame + unit_id=unit_id, + start_frame=start_frame, + end_frame=end_frame, ).astype(np.int32) f.create_dataset( - f"segment/{iseg}/unit/{unit_id}/spike_train", data=spike_train + f"segment/{iseg}/unit/{unit_id}/spike_train", + data=spike_train, ) channel_neighborhood = unit_channel_neighborhoods[str(unit_id)] peak_channel_ind = channel_ids.tolist().index(peak_channel_id) @@ -185,7 +207,8 @@ def prepare_spikesortingview_data( for ii, unit_id in enumerate(unit_ids): channel_neighborhood = unit_channel_neighborhoods[str(unit_id)] channel_neighborhood_indices = [ - channel_ids.tolist().index(ch_id) for ch_id in channel_neighborhood + channel_ids.tolist().index(ch_id) + for ch_id in channel_neighborhood ] num = len(all_subsampled_spike_trains[ii]) spike_snippets = spike_snippets_concat[ diff --git a/src/spyglass/lfp/__init__.py b/src/spyglass/lfp/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/src/spyglass/lfp/v1/lfp.py b/src/spyglass/lfp/v1/lfp.py index bd79f168b..781baedcc 100644 --- a/src/spyglass/lfp/v1/lfp.py +++ b/src/spyglass/lfp/v1/lfp.py @@ -24,7 +24,10 @@ from spyglass.common.common_nwbfile import AnalysisNwbfile from spyglass.common.common_session import Session # noqa: F401 from spyglass.utils.dj_helper_fn import fetch_nwb # dj_replace -from spyglass.utils.nwb_helper_fn import get_electrode_indices, get_valid_intervals +from spyglass.utils.nwb_helper_fn import ( + get_electrode_indices, + get_valid_intervals, +) schema = dj.schema("lfp_v1") @@ -59,7 +62,10 @@ def create_lfp_electrode_group(nwb_file_name, group_name, electrode_list): """ # remove the session and then recreate the session and Electrode list # check to see if the user allowed the deletion - key = {"nwb_file_name": nwb_file_name, "lfp_electrode_group_name": group_name} + key = { + "nwb_file_name": nwb_file_name, + "lfp_electrode_group_name": group_name, + } LFPElectrodeGroup().insert1(key, skip_duplicates=True) # TODO: do this in a better way @@ -122,7 +128,9 @@ def make(self, key): } ).fetch1("valid_times") valid_times = interval_list_intersect( - user_valid_times, raw_valid_times, min_length=min_lfp_interval_length + user_valid_times, + raw_valid_times, + min_length=min_lfp_interval_length, ) print( f"LFP: found {len(valid_times)} intervals > {min_lfp_interval_length} sec long." @@ -156,7 +164,10 @@ def make(self, key): lfp_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) lfp_file_abspath = AnalysisNwbfile().get_abs_path(lfp_file_name) - lfp_object_id, timestamp_interval = FirFilterParameters().filter_data_nwb( + ( + lfp_object_id, + timestamp_interval, + ) = FirFilterParameters().filter_data_nwb( lfp_file_abspath, rawdata, filter_coeff, @@ -207,7 +218,8 @@ def fetch_nwb(self, *attrs, **kwargs): def fetch1_dataframe(self, *attrs, **kwargs): nwb_lfp = self.fetch_nwb()[0] return pd.DataFrame( - nwb_lfp["lfp"].data, index=pd.Index(nwb_lfp["lfp"].timestamps, name="time") + nwb_lfp["lfp"].data, + index=pd.Index(nwb_lfp["lfp"].timestamps, name="time"), ) @@ -359,7 +371,9 @@ def make(self, key): # also insert into IntervalList tmp_key = {} tmp_key["nwb_file_name"] = key["nwb_file_name"] - tmp_key["interval_list_name"] = key["artifact_removed_interval_list_name"] + tmp_key["interval_list_name"] = key[ + "artifact_removed_interval_list_name" + ] tmp_key["valid_times"] = key["artifact_removed_valid_times"] IntervalList.insert1(tmp_key, replace=True) @@ -425,7 +439,9 @@ def _get_artifact_times( # if both thresholds are None, we skip artifact detection if amplitude_thresh_1st is None: - recording_interval = np.asarray([valid_timestamps[0], valid_timestamps[-1]]) + recording_interval = np.asarray( + [valid_timestamps[0], valid_timestamps[-1]] + ) artifact_times_empty = np.asarray([]) print("Amplitude threshold is None, skipping artifact detection") return recording_interval, artifact_times_empty @@ -445,8 +461,12 @@ def _get_artifact_times( # want to detect frames without parallel processing # compute the number of electrodes that have to be above threshold - nelect_above_1st = np.ceil(proportion_above_thresh_1st * recording.data.shape[1]) - nelect_above_2nd = np.ceil(proportion_above_thresh_2nd * recording.data.shape[1]) + nelect_above_1st = np.ceil( + proportion_above_thresh_1st * recording.data.shape[1] + ) + nelect_above_2nd = np.ceil( + proportion_above_thresh_2nd * recording.data.shape[1] + ) # find the artifact occurrences using one or both thresholds, across channels # replace with LFP artifact code @@ -454,12 +474,15 @@ def _get_artifact_times( if amplitude_thresh_1st is not None: # first find times with large amp change artifact_boolean = np.sum( - (np.abs(np.diff(recording.data, axis=0)) > amplitude_thresh_1st), axis=1 + (np.abs(np.diff(recording.data, axis=0)) > amplitude_thresh_1st), + axis=1, ) above_thresh_1st = np.where(artifact_boolean >= nelect_above_1st)[0] # second, find artifacts with large baseline change - big_artifacts = np.zeros((recording.data.shape[1], above_thresh_1st.shape[0])) + big_artifacts = np.zeros( + (recording.data.shape[1], above_thresh_1st.shape[0]) + ) for art_count in np.arange(above_thresh_1st.shape[0]): if above_thresh_1st[art_count] <= local_window: local_min = local_max = above_thresh_1st[art_count] @@ -499,14 +522,18 @@ def _get_artifact_times( half_removal_window_s = removal_window_ms / 1000 * 0.5 if len(artifact_frames) == 0: - recording_interval = np.asarray([[valid_timestamps[0], valid_timestamps[-1]]]) + recording_interval = np.asarray( + [[valid_timestamps[0], valid_timestamps[-1]]] + ) artifact_times_empty = np.asarray([]) print("No artifacts detected.") return recording_interval, artifact_times_empty artifact_intervals = interval_from_inds(artifact_frames) - artifact_intervals_s = np.zeros((len(artifact_intervals), 2), dtype=np.float64) + artifact_intervals_s = np.zeros( + (len(artifact_intervals), 2), dtype=np.float64 + ) for interval_idx, interval in enumerate(artifact_intervals): artifact_intervals_s[interval_idx] = [ valid_timestamps[interval[0]] - half_removal_window_s, @@ -700,9 +727,9 @@ def set_lfp_band_electrodes( "added before this function is called" ) # reference_electrode_list - if len(reference_electrode_list) != 1 and len(reference_electrode_list) != len( - electrode_list - ): + if len(reference_electrode_list) != 1 and len( + reference_electrode_list + ) != len(electrode_list): raise ValueError( "reference_electrode_list must contain either 1 or len(electrode_list) elements" ) @@ -728,7 +755,9 @@ def set_lfp_band_electrodes( # insert an entry into the main LFPBandSelectionTable self.insert1(key, skip_duplicates=True) - key["lfp_electrode_group_name"] = lfp_object.fetch1("lfp_electrode_group_name") + key["lfp_electrode_group_name"] = lfp_object.fetch1( + "lfp_electrode_group_name" + ) # iterate through all of the new elements and add them for e, r in zip(electrode_list, ref_list): elect_key = ( @@ -741,7 +770,10 @@ def set_lfp_band_electrodes( ).fetch1("KEY") for item in elect_key: key[item] = elect_key[item] - query = Electrode & {"nwb_file_name": nwb_file_name, "electrode_id": e} + query = Electrode & { + "nwb_file_name": nwb_file_name, + "electrode_id": e, + } key["reference_elect_id"] = r self.LFPBandElectrode().insert1(key, skip_duplicates=True) @@ -758,9 +790,9 @@ class LFPBand(dj.Computed): def make(self, key): # get the NWB object with the lfp data; FIX: change to fetch with additional infrastructure - lfp_object = (LFP() & {"nwb_file_name": key["nwb_file_name"]}).fetch_nwb()[0][ - "lfp" - ] + lfp_object = ( + LFP() & {"nwb_file_name": key["nwb_file_name"]} + ).fetch_nwb()[0]["lfp"] # get the electrodes to be filtered and their references lfp_band_elect_id, lfp_band_ref_id = ( @@ -779,9 +811,9 @@ def make(self, key): .get_lfp_object({"lfp_id": key["lfp_id"]}) .fetch1("lfp_sampling_rate") ) - interval_list_name, lfp_band_sampling_rate = (LFPBandSelection() & key).fetch1( - "target_interval_list_name", "lfp_band_sampling_rate" - ) + interval_list_name, lfp_band_sampling_rate = ( + LFPBandSelection() & key + ).fetch1("target_interval_list_name", "lfp_band_sampling_rate") valid_times = ( IntervalList() & { @@ -810,14 +842,18 @@ def make(self, key): filter_name, filter_sampling_rate, lfp_band_sampling_rate = ( LFPBandSelection() & key - ).fetch1("filter_name", "filter_sampling_rate", "lfp_band_sampling_rate") + ).fetch1( + "filter_name", "filter_sampling_rate", "lfp_band_sampling_rate" + ) decimation = int(lfp_sampling_rate) // lfp_band_sampling_rate # load in the timestamps timestamps = np.asarray(lfp_object.timestamps) # get the indices of the first timestamp and the last timestamp that are within the valid times - included_indices = interval_list_contains_ind(lfp_band_valid_times, timestamps) + included_indices = interval_list_contains_ind( + lfp_band_valid_times, timestamps + ) # pad the indices by 1 on each side to avoid message in filter_data if included_indices[0] > 0: included_indices[0] -= 1 @@ -833,14 +869,17 @@ def make(self, key): ) # get the indices of the electrodes to be filtered and the references - lfp_band_elect_index = get_electrode_indices(lfp_object, lfp_band_elect_id) + lfp_band_elect_index = get_electrode_indices( + lfp_object, lfp_band_elect_id + ) lfp_band_ref_index = get_electrode_indices(lfp_object, lfp_band_ref_id) # subtract off the references for the selected channels for index, elect_index in enumerate(lfp_band_elect_index): if lfp_band_ref_id[index] != -1: lfp_data[:, elect_index] = ( - lfp_data[:, elect_index] - lfp_data[:, lfp_band_ref_index[index]] + lfp_data[:, elect_index] + - lfp_data[:, lfp_band_ref_index[index]] ) # get the LFP filter that matches the raw data @@ -864,7 +903,9 @@ def make(self, key): # create the analysis nwb file to store the results. lfp_band_file_name = AnalysisNwbfile().create(key["nwb_file_name"]) - lfp_band_file_abspath = AnalysisNwbfile().get_abs_path(lfp_band_file_name) + lfp_band_file_abspath = AnalysisNwbfile().get_abs_path( + lfp_band_file_name + ) # filter the data and write to an the nwb file filtered_data, new_timestamps = FirFilterParameters().filter_data( timestamps, @@ -895,7 +936,8 @@ def make(self, key): ) lfp = pynwb.ecephys.LFP(electrical_series=es) ecephys_module = nwbf.create_processing_module( - name="ecephys", description=f"LFP data processed with {filter_name}" + name="ecephys", + description=f"LFP data processed with {filter_name}", ) ecephys_module.add(lfp) io.write(nwbf) @@ -909,7 +951,10 @@ def make(self, key): # finally, we need to censor the valid times to account for the downsampling if this is the first time we've # downsampled these data key["interval_list_name"] = ( - interval_list_name + " lfp band " + str(lfp_band_sampling_rate) + "Hz" + interval_list_name + + " lfp band " + + str(lfp_band_sampling_rate) + + "Hz" ) tmp_valid_times = ( IntervalList @@ -934,7 +979,9 @@ def make(self, key): # check that the valid times are the same assert np.isclose( tmp_valid_times[0], lfp_band_valid_times - ).all(), "previously saved lfp band times do not match current times" + ).all(), ( + "previously saved lfp band times do not match current times" + ) self.insert1(key) @@ -947,7 +994,9 @@ def fetch1_dataframe(self, *attrs, **kwargs): filtered_nwb = self.fetch_nwb()[0] return pd.DataFrame( filtered_nwb["filtered_data"].data, - index=pd.Index(filtered_nwb["filtered_data"].timestamps, name="time"), + index=pd.Index( + filtered_nwb["filtered_data"].timestamps, name="time" + ), ) def compute_analytic_signal(self, electrode_list, **kwargs): @@ -970,7 +1019,9 @@ def compute_analytic_signal(self, electrode_list, **kwargs): """ filtered_band = self.fetch_nwb()[0]["filtered_data"] - electrode_index = np.isin(filtered_band.electrodes.data[:], electrode_list) + electrode_index = np.isin( + filtered_band.electrodes.data[:], electrode_list + ) if len(electrode_list) != np.sum(electrode_index): raise ValueError( "Some of the electrodes specified in electrode_list are missing in the current LFPBand table." @@ -983,7 +1034,9 @@ def compute_analytic_signal(self, electrode_list, **kwargs): return analytic_signal_df def compute_signal_phase(self, electrode_list=[], **kwargs): - analytic_signal_df = self.compute_analytic_signal(electrode_list, **kwargs) + analytic_signal_df = self.compute_analytic_signal( + electrode_list, **kwargs + ) return pd.DataFrame( np.angle(analytic_signal_df) + math.pi, columns=analytic_signal_df.columns, @@ -991,7 +1044,9 @@ def compute_signal_phase(self, electrode_list=[], **kwargs): ) def compute_signal_power(self, electrode_list=[], **kwargs): - analytic_signal_df = self.compute_analytic_signal(electrode_list, **kwargs) + analytic_signal_df = self.compute_analytic_signal( + electrode_list, **kwargs + ) return pd.DataFrame( np.abs(analytic_signal_df) ** 2, columns=analytic_signal_df.columns, diff --git a/src/spyglass/position/position_merge.py b/src/spyglass/position/position_merge.py index 7a0054292..512927971 100644 --- a/src/spyglass/position/position_merge.py +++ b/src/spyglass/position/position_merge.py @@ -9,7 +9,9 @@ from ..common.common_interval import IntervalList from ..common.common_nwbfile import AnalysisNwbfile -from ..common.common_position import IntervalPositionInfo as CommonIntervalPositionInfo +from ..common.common_position import ( + IntervalPositionInfo as CommonIntervalPositionInfo, +) from ..utils.dj_helper_fn import fetch_nwb from .v1.dlc_utils import check_videofile, get_video_path, make_video from .v1.position_dlc_pose_estimation import DLCPoseEstimationSelection @@ -57,7 +59,10 @@ class DLCPosV1(dj.Part): def fetch_nwb(self, *attrs, **kwargs): return fetch_nwb( - self, (AnalysisNwbfile, "analysis_file_abs_path"), *attrs, **kwargs + self, + (AnalysisNwbfile, "analysis_file_abs_path"), + *attrs, + **kwargs, ) class TrodesPosV1(dj.Part): @@ -77,7 +82,10 @@ class TrodesPosV1(dj.Part): def fetch_nwb(self, *attrs, **kwargs): return fetch_nwb( - self, (AnalysisNwbfile, "analysis_file_abs_path"), *attrs, **kwargs + self, + (AnalysisNwbfile, "analysis_file_abs_path"), + *attrs, + **kwargs, ) class CommonPos(dj.Part): @@ -97,7 +105,10 @@ class CommonPos(dj.Part): def fetch_nwb(self, *attrs, **kwargs): return fetch_nwb( - self, (AnalysisNwbfile, "analysis_file_abs_path"), *attrs, **kwargs + self, + (AnalysisNwbfile, "analysis_file_abs_path"), + *attrs, + **kwargs, ) def insert1(self, key, params: Dict = None, **kwargs): @@ -144,9 +155,13 @@ def insert1(self, key, params: Dict = None, **kwargs): ) else: table_query = ( - dj.FreeTable(dj.conn(), full_table_name=part_table.parents()[1]) & key + dj.FreeTable(dj.conn(), full_table_name=part_table.parents()[1]) + & key ) - if any("head" in col for col in list(table_query.fetch().dtype.fields.keys())): + if any( + "head" in col + for col in list(table_query.fetch().dtype.fields.keys()) + ): ( analysis_file_name, position_object_id, @@ -195,7 +210,10 @@ def fetch1_dataframe(self): np.asarray(nwb_data["position"].get_spatial_series().timestamps), name="time", ) - if "video_frame_ind" in nwb_data["velocity"].fields["time_series"].keys(): + if ( + "video_frame_ind" + in nwb_data["velocity"].fields["time_series"].keys() + ): COLUMNS = [ "video_frame_ind", "position_x", @@ -209,13 +227,17 @@ def fetch1_dataframe(self): np.concatenate( ( np.asarray( - nwb_data["velocity"].get_timeseries("video_frame_ind").data, + nwb_data["velocity"] + .get_timeseries("video_frame_ind") + .data, dtype=int, )[:, np.newaxis], - np.asarray(nwb_data["position"].get_spatial_series().data), - np.asarray(nwb_data["orientation"].get_spatial_series().data)[ - :, np.newaxis - ], + np.asarray( + nwb_data["position"].get_spatial_series().data + ), + np.asarray( + nwb_data["orientation"].get_spatial_series().data + )[:, np.newaxis], np.asarray( nwb_data["velocity"].get_timeseries("velocity").data ), @@ -237,10 +259,12 @@ def fetch1_dataframe(self): return pd.DataFrame( np.concatenate( ( - np.asarray(nwb_data["position"].get_spatial_series().data), - np.asarray(nwb_data["orientation"].get_spatial_series().data)[ - :, np.newaxis - ], + np.asarray( + nwb_data["position"].get_spatial_series().data + ), + np.asarray( + nwb_data["orientation"].get_spatial_series().data + )[:, np.newaxis], np.asarray(nwb_data["velocity"].get_timeseries().data), ), axis=1, @@ -377,21 +401,30 @@ def make(self, key): + 1 ) - video_path, video_filename, meters_per_pixel, video_time = get_video_path( + ( + video_path, + video_filename, + meters_per_pixel, + video_time, + ) = get_video_path( {"nwb_file_name": key["nwb_file_name"], "epoch": epoch} ) video_dir = os.path.dirname(video_path) + "/" video_frame_col_name = [ col for col in pos_df.columns if "video_frame_ind" in col ] - video_frame_inds = pos_df[video_frame_col_name[0]].astype(int).to_numpy() + video_frame_inds = ( + pos_df[video_frame_col_name[0]].astype(int).to_numpy() + ) if key["plot"] in ["DLC", "All"]: temp_key = (PositionOutput.DLCPosV1 & key).fetch1("KEY") - video_path = (DLCPoseEstimationSelection & temp_key).fetch1("video_path") + video_path = (DLCPoseEstimationSelection & temp_key).fetch1( + "video_path" + ) else: - video_path = check_videofile(video_dir, key["output_dir"], video_filename)[ - 0 - ] + video_path = check_videofile( + video_dir, key["output_dir"], video_filename + )[0] nwb_base_filename = key["nwb_file_name"].replace(".nwb", "") output_video_filename = Path( @@ -407,16 +440,22 @@ def make(self, key): position_mean_dict[key["plot"]] = np.asarray( pos_df[["position_x", "position_y"]] ) - orientation_mean_dict[key["plot"]] = np.asarray(pos_df[["orientation"]]) + orientation_mean_dict[key["plot"]] = np.asarray( + pos_df[["orientation"]] + ) elif key["plot"] == "All": position_mean_dict["DLC"] = np.asarray( pos_df[["position_x_DLC", "position_y_DLC"]] ) - orientation_mean_dict["DLC"] = np.asarray(pos_df[["orientation_DLC"]]) + orientation_mean_dict["DLC"] = np.asarray( + pos_df[["orientation_DLC"]] + ) position_mean_dict["Trodes"] = np.asarray( pos_df[["position_x_Trodes", "position_y_Trodes"]] ) - orientation_mean_dict["Trodes"] = np.asarray(pos_df[["orientation_Trodes"]]) + orientation_mean_dict["Trodes"] = np.asarray( + pos_df[["orientation_Trodes"]] + ) position_time = np.asarray(pos_df.index) cm_per_pixel = meters_per_pixel * M_TO_CM print("Making video...") diff --git a/src/spyglass/position/v1/__init__.py b/src/spyglass/position/v1/__init__.py index 8eb0b39fb..d551fcc42 100644 --- a/src/spyglass/position/v1/__init__.py +++ b/src/spyglass/position/v1/__init__.py @@ -8,8 +8,15 @@ get_dlc_root_data_dir, get_video_path, ) -from .position_dlc_centroid import DLCCentroid, DLCCentroidParams, DLCCentroidSelection -from .position_dlc_cohort import DLCSmoothInterpCohort, DLCSmoothInterpCohortSelection +from .position_dlc_centroid import ( + DLCCentroid, + DLCCentroidParams, + DLCCentroidSelection, +) +from .position_dlc_cohort import ( + DLCSmoothInterpCohort, + DLCSmoothInterpCohortSelection, +) from .position_dlc_model import ( DLCModel, DLCModelEvaluation, @@ -23,7 +30,10 @@ DLCOrientationParams, DLCOrientationSelection, ) -from .position_dlc_pose_estimation import DLCPoseEstimation, DLCPoseEstimationSelection +from .position_dlc_pose_estimation import ( + DLCPoseEstimation, + DLCPoseEstimationSelection, +) from .position_dlc_position import ( DLCSmoothInterp, DLCSmoothInterpParams, diff --git a/src/spyglass/position/v1/dlc_reader.py b/src/spyglass/position/v1/dlc_reader.py index 54c5f2b56..0a62bdfea 100644 --- a/src/spyglass/position/v1/dlc_reader.py +++ b/src/spyglass/position/v1/dlc_reader.py @@ -29,7 +29,9 @@ def __init__( # meta file: pkl - info about this DLC run (input video, configuration, etc.) if pkl_path is None: - pkl_paths = list(self.dlc_dir.rglob(f"{filename_prefix}*meta.pickle")) + pkl_paths = list( + self.dlc_dir.rglob(f"{filename_prefix}*meta.pickle") + ) assert len(pkl_paths) == 1, ( "Unable to find one unique .pickle file in: " + f"{dlc_dir} - Found: {len(pkl_paths)}" @@ -60,7 +62,9 @@ def __init__( yml_paths = list(self.dlc_dir.glob(f"{filename_prefix}*.y*ml")) # If multiple, defer to the one we save. if len(yml_paths) > 1: - yml_paths = [val for val in yml_paths if val.stem == "dj_dlc_config"] + yml_paths = [ + val for val in yml_paths if val.stem == "dj_dlc_config" + ] assert len(yml_paths) == 1, ( "Unable to find one unique .yaml file in: " + f"{dlc_dir} - Found: {len(yml_paths)}" @@ -86,7 +90,9 @@ def __init__( "Task": self.yml["Task"], "date": self.yml["date"], "iteration": self.pkl["iteration (active-learning)"], - "shuffle": int(re.search("shuffle(\d+)", self.pkl["Scorer"]).groups()[0]), + "shuffle": int( + re.search("shuffle(\d+)", self.pkl["Scorer"]).groups()[0] + ), "snapshotindex": self.yml["snapshotindex"], "trainingsetindex": train_idx, "training_iteration": train_iter, @@ -185,7 +191,10 @@ def save_yaml(output_dir, config_dict, filename="dj_dlc_config", mkdir=True): If extension is included, removed and replaced with "yaml". mkdir (bool): Optional, True. Make new directory if output_dir not exist - Returns: path of saved file as string - due to DLC func preference for strings + Returns + ------- + str + path of saved file as string - due to DLC func preference for strings """ from deeplabcut.utils.auxiliaryfunctions import write_config @@ -216,17 +225,18 @@ def do_pose_estimation( robust_nframes=False, allow_growth=False, use_shelve=False, - modelprefix="", # need from paramset ): """Launch DLC's analyze_videos within element-deeplabcut + Other optional parameters may be set other than those described below. See + deeplabcut.analyze_videos parameters for descriptions/defaults. + Parameters ---------- video_filepaths: list of videos to analyze dlc_model: element-deeplabcut dlc.Model dict project_path: path to project config.yml output_dir: where to save output - OTHERS: Optional, set with defaults. See deeplabcut.analyze_videos parameters """ from deeplabcut.pose_estimation_tensorflow import analyze_videos diff --git a/src/spyglass/position/v1/dlc_utils.py b/src/spyglass/position/v1/dlc_utils.py index 015a1ace7..bfb0ce105 100644 --- a/src/spyglass/position/v1/dlc_utils.py +++ b/src/spyglass/position/v1/dlc_utils.py @@ -136,7 +136,8 @@ def setup_logger( handler.close() logger.removeHandler(handler) if print_console and not any( - type(handler) == logging.StreamHandler for handler in logger.handlers + type(handler) == logging.StreamHandler + for handler in logger.handlers ): logger.addHandler(self._get_stream_handler()) @@ -234,7 +235,8 @@ def find_full_path(root_directories, relative_path): return _to_Path(root_dir) / relative_path raise FileNotFoundError( - f"No valid full-path found (from {root_directories})" f" for {relative_path}" + f"No valid full-path found (from {root_directories})" + f" for {relative_path}" ) @@ -320,9 +322,12 @@ def get_video_path(key): from ...common.common_behav import VideoFile video_info = ( - VideoFile() & {"nwb_file_name": key["nwb_file_name"], "epoch": key["epoch"]} + VideoFile() + & {"nwb_file_name": key["nwb_file_name"], "epoch": key["epoch"]} ).fetch1() - nwb_path = f"{os.getenv('SPYGLASS_BASE_DIR')}/raw/{video_info['nwb_file_name']}" + nwb_path = ( + f"{os.getenv('SPYGLASS_BASE_DIR')}/raw/{video_info['nwb_file_name']}" + ) with pynwb.NWBHDF5IO(path=nwb_path, mode="r") as in_out: nwb_file = in_out.read() nwb_video = nwb_file.objects[video_info["video_file_object_id"]] @@ -412,7 +417,9 @@ def _convert_mp4( """ orig_filename = filename - video_path = pathlib.PurePath(pathlib.Path(video_path), pathlib.Path(filename)) + video_path = pathlib.PurePath( + pathlib.Path(video_path), pathlib.Path(filename) + ) if videotype not in ["mp4"]: raise NotImplementedError dest_filename = os.path.splitext(filename)[0] @@ -474,7 +481,9 @@ def _convert_mp4( if count_frames: try: check_process = subprocess.Popen( - frames_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT + frames_command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as err: raise RuntimeError( @@ -483,7 +492,9 @@ def _convert_mp4( else: try: check_process = subprocess.Popen( - packets_command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT + packets_command, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT, ) except subprocess.CalledProcessError as err: raise RuntimeError( @@ -525,7 +536,9 @@ def get_gpu_memory(): raise RuntimeError( f"command {err.cmd} return with error (code {err.returncode}): {err.output}" ) from err - memory_use_values = {i: int(x.split()[0]) for i, x in enumerate(memory_use_info)} + memory_use_values = { + i: int(x.split()[0]) for i, x in enumerate(memory_use_info) + } return memory_use_values @@ -583,7 +596,9 @@ def interp_pos(dlc_df, spans_to_interp, **kwargs): ): dlc_df.loc[idx[start_time:stop_time], idx["x"]] = np.nan dlc_df.loc[idx[start_time:stop_time], idx["y"]] = np.nan - change = np.linalg.norm(np.array([x[0], y[0]]) - np.array([x[1], y[1]])) + change = np.linalg.norm( + np.array([x[0], y[0]]) - np.array([x[1], y[1]]) + ) print( f"inds {span_start} to {span_stop + 1} " f"with change in position: {change:.2f} not interpolated" @@ -649,6 +664,7 @@ def convert_to_pixels(data, frame_size, cm_to_pixels=1.0): data : ndarray, shape (n_time, 2) frame_size : array_like, shape (2,) cm_to_pixels : float + Returns ------- converted_data : ndarray, shape (n_time, 2) @@ -699,7 +715,9 @@ def make_video( else: n_frames = int(len(video_frame_inds) * percent_frames) frames = np.arange(0, n_frames) - print(f"video save path: {output_video_filename}\n{n_frames} frames in total.") + print( + f"video save path: {output_video_filename}\n{n_frames} frames in total." + ) if crop: crop_offset_x = crop[0] crop_offset_y = crop[2] @@ -725,7 +743,9 @@ def make_video( print( f"frames start: {frames[0]}\nvideo_frames start: {video_frame_inds[0]}\ncv2 frame ind start: {int(video.get(1))}" ) - for time_ind in tqdm(frames, desc="frames", disable=disable_progressbar): + for time_ind in tqdm( + frames, desc="frames", disable=disable_progressbar + ): if time_ind == 0: video.set(1, time_ind + 1) elif int(video.get(1)) != time_ind - 1: @@ -782,16 +802,24 @@ def make_video( # ) # else: # position = convert_to_pixels(position, frame_size, cm_to_pixels) - position = convert_to_pixels(position, frame_size, cm_to_pixels) + position = convert_to_pixels( + position, frame_size, cm_to_pixels + ) orientation = orientation_mean[key][pos_ind] if key == "DLC": color = RGB_BLUE if key == "Trodes": color = RGB_ORANGE - if np.all(~np.isnan(position)) & np.all(~np.isnan(orientation)): + if np.all(~np.isnan(position)) & np.all( + ~np.isnan(orientation) + ): arrow_tip = ( - int(position[0] + arrow_radius * np.cos(orientation)), - int(position[1] + arrow_radius * np.sin(orientation)), + int( + position[0] + arrow_radius * np.cos(orientation) + ), + int( + position[1] + arrow_radius * np.sin(orientation) + ), ) cv2.arrowedLine( img=frame, @@ -915,7 +943,9 @@ def make_video( else: n_frames = int(len(video_frame_inds) * percent_frames) frames = np.arange(0, n_frames) - print(f"video save path: {output_video_filename}\n{n_frames} frames in total.") + print( + f"video save path: {output_video_filename}\n{n_frames} frames in total." + ) fps = int(np.round(frame_rate / video_slowdown)) writer = Writer(fps=fps, bitrate=-1) ret, frame = video.read() @@ -978,7 +1008,9 @@ def make_video( ratio = (crop[3] - crop[2]) / (crop[1] - crop[0]) x_left, x_right = axes[0].get_xlim() y_low, y_high = axes[0].get_ylim() - axes[0].set_aspect(abs((x_right - x_left) / (y_low - y_high)) * ratio) + axes[0].set_aspect( + abs((x_right - x_left) / (y_low - y_high)) * ratio + ) axes[0].spines["top"].set_color("black") axes[0].spines["right"].set_color("black") time_delta = pd.Timedelta( @@ -1039,13 +1071,17 @@ def _update_plot(time_ind): if ret: frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) if crop: - frame = frame[crop[2] : crop[3], crop[0] : crop[1]].copy() + frame = frame[ + crop[2] : crop[3], crop[0] : crop[1] + ].copy() image.set_array(frame) pos_ind = np.where(video_frame_inds == time_ind)[0] if len(pos_ind) == 0: centroid_position_dot.set_offsets((np.NaN, np.NaN)) for bodypart in centroid_plot_objs.keys(): - centroid_plot_objs[bodypart].set_offsets((np.NaN, np.NaN)) + centroid_plot_objs[bodypart].set_offsets( + (np.NaN, np.NaN) + ) orientation_line.set_data((np.NaN, np.NaN)) title.set_text(f"time = {0:3.4f}s\n frame = {time_ind}") else: @@ -1073,7 +1109,9 @@ def _update_plot(time_ind): for bodypart in centroid_plot_objs.keys(): centroid_plot_objs[bodypart].set_offsets( convert_to_pixels( - centroids[bodypart][pos_ind], frame, cm_to_pixels + centroids[bodypart][pos_ind], + frame, + cm_to_pixels, ) ) centroid_position_dot.set_offsets(dlc_centroid_data) @@ -1096,12 +1134,15 @@ def _update_plot(time_ind): pd.to_datetime(position_time[pos_ind] * 1e9, unit="ns") - pd.to_datetime(position_time[0] * 1e9, unit="ns") ).total_seconds() - title.set_text(f"time = {time_delta:3.4f}s\n frame = {time_ind}") + title.set_text( + f"time = {time_delta:3.4f}s\n frame = {time_ind}" + ) likelihood_inds = pos_ind + window_ind neg_inds = np.where(likelihood_inds < 0)[0] over_inds = np.where( likelihood_inds - > (len(likelihoods[list(likelihood_objs.keys())[0]])) - 1 + > (len(likelihoods[list(likelihood_objs.keys())[0]])) + - 1 )[0] if len(neg_inds) > 0: likelihood_inds[neg_inds] = 0 diff --git a/src/spyglass/position/v1/position_dlc_centroid.py b/src/spyglass/position/v1/position_dlc_centroid.py index da904c7d5..48af005c3 100644 --- a/src/spyglass/position/v1/position_dlc_centroid.py +++ b/src/spyglass/position/v1/position_dlc_centroid.py @@ -63,7 +63,9 @@ def insert_default(cls, **kwargs): "max_LED_separation": 12, "speed_smoothing_std_dev": 0.100, } - cls.insert1({"dlc_centroid_params_name": "default", "params": params}, **kwargs) + cls.insert1( + {"dlc_centroid_params_name": "default", "params": params}, **kwargs + ) @classmethod def get_default(cls): @@ -84,14 +86,18 @@ def insert1(self, key, **kwargs): if "centroid_method" in params: if params["centroid_method"] in self._available_centroid_methods: if params["centroid_method"] == "four_led_centroid": - if any(x not in self._four_led_labels for x in params["points"]): + if any( + x not in self._four_led_labels for x in params["points"] + ): raise KeyError( f"Please make sure to specify all necessary labels: " f"{self._four_led_labels} " f"if using the 'four_led_centroid' method" ) elif params["centroid_method"] == "two_pt_centroid": - if any(x not in self._two_pt_labels for x in params["points"]): + if any( + x not in self._two_pt_labels for x in params["points"] + ): raise KeyError( f"Please make sure to specify all necessary labels: " f"{self._two_pt_labels} " @@ -113,7 +119,9 @@ def insert1(self, key, **kwargs): f"{self._available_centroid_methods}" ) else: - raise KeyError("'centroid_method' needs to be provided as a parameter") + raise KeyError( + "'centroid_method' needs to be provided as a parameter" + ) if "max_LED_separation" in params: if not isinstance(params["max_LED_separation"], (int, float)): @@ -126,7 +134,9 @@ def insert1(self, key, **kwargs): if params["smooth"]: if "smoothing_params" in params: if "smooth_method" in params["smoothing_params"]: - smooth_method = params["smoothing_params"]["smooth_method"] + smooth_method = params["smoothing_params"][ + "smooth_method" + ] if smooth_method not in _key_to_smooth_func_dict: raise KeyError( f"smooth_method: {smooth_method} not an available method." @@ -283,7 +293,9 @@ def make(self, key): ) dt = np.median(np.diff(pos_df.index.to_numpy())) sampling_rate = 1 / dt - logger.logger.info("Calculating centroid with %s", str(centroid_method)) + logger.logger.info( + "Calculating centroid with %s", str(centroid_method) + ) centroid = centroid_func(pos_df, **params) centroid_df = pd.DataFrame( centroid, @@ -349,7 +361,9 @@ def make(self, key): columns=["velocity_x", "velocity_y", "speed"], index=pos_df.index.to_numpy(), ) - total_nan = np.sum(final_df.loc[:, idx[("x", "y")]].isna().any(axis=1)) + total_nan = np.sum( + final_df.loc[:, idx[("x", "y")]].isna().any(axis=1) + ) pretrack_nan = np.sum( final_df.iloc[:1000].loc[:, idx[("x", "y")]].isna().any(axis=1) ) @@ -359,7 +373,9 @@ def make(self, key): ) position = pynwb.behavior.Position() velocity = pynwb.behavior.BehavioralTimeSeries() - spatial_series = (RawPosition() & key).fetch_nwb()[0]["raw_position"] + spatial_series = (RawPosition() & key).fetch_nwb()[0][ + "raw_position" + ] METERS_PER_CM = 0.01 position.create_spatial_series( name="position", @@ -385,12 +401,16 @@ def make(self, key): name="video_frame_ind", unit="index", timestamps=final_df.index.to_numpy(), - data=pos_df[pos_df.columns.levels[0][0]].video_frame_ind.to_numpy(), + data=pos_df[ + pos_df.columns.levels[0][0] + ].video_frame_ind.to_numpy(), description="video_frame_ind", comments="no comments", ) # Add to Analysis NWB file - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) nwb_analysis_file = AnalysisNwbfile() key["dlc_position_object_id"] = nwb_analysis_file.add_nwb_object( key["analysis_file_name"], position @@ -414,7 +434,9 @@ def fetch_nwb(self, *attrs, **kwargs): def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] index = pd.Index( - np.asarray(nwb_data["dlc_position"].get_spatial_series().timestamps), + np.asarray( + nwb_data["dlc_position"].get_spatial_series().timestamps + ), name="time", ) COLUMNS = [ @@ -429,11 +451,17 @@ def fetch1_dataframe(self): np.concatenate( ( np.asarray( - nwb_data["dlc_velocity"].time_series["video_frame_ind"].data, + nwb_data["dlc_velocity"] + .time_series["video_frame_ind"] + .data, dtype=int, )[:, np.newaxis], - np.asarray(nwb_data["dlc_position"].get_spatial_series().data), - np.asarray(nwb_data["dlc_velocity"].time_series["velocity"].data), + np.asarray( + nwb_data["dlc_position"].get_spatial_series().data + ), + np.asarray( + nwb_data["dlc_velocity"].time_series["velocity"].data + ), ), axis=1, ), @@ -466,7 +494,7 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): pos_df : pd.DataFrame dataframe containing x and y position for each LED of interest, index is timestamps. Column names specified by params - **kwargs : dict + **params : dict contains 'greenLED' and 'redLED_C', 'redLED_R', 'redLED_L' keys, whose values specify the column names in `pos_df` @@ -493,10 +521,18 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): pos_df.loc[:, idx[red_led_C, ("x", "y")]].to_numpy(), pos_df.loc[:, idx[green_led, ("x", "y")]].to_numpy(), ) - g_c_is_too_separated = dist_between_green_red >= params["max_LED_separation"] + g_c_is_too_separated = ( + dist_between_green_red >= params["max_LED_separation"] + ) all_good_mask = reduce( np.logical_and, - (~green_nans, ~red_C_nans, ~red_L_nans, ~red_R_nans, ~g_c_is_too_separated), + ( + ~green_nans, + ~red_C_nans, + ~red_L_nans, + ~red_R_nans, + ~g_c_is_too_separated, + ), ) centroid[all_good_mask] = [ *zip( @@ -513,7 +549,9 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): ) ] # If green LED and red center LED are both not NaN - green_red_C = np.logical_and(~green_nans, ~red_C_nans, ~g_c_is_too_separated) + green_red_C = np.logical_and( + ~green_nans, ~red_C_nans, ~g_c_is_too_separated + ) if np.sum(green_red_C) > 0: centroid[green_red_C] = [ *zip( @@ -548,23 +586,35 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): pos_df.loc[:, idx[red_led_L, ("x", "y")]].to_numpy(), pos_df.loc[:, idx[red_led_R, ("x", "y")]].to_numpy(), ) - l_r_is_too_separated = dist_between_left_right >= params["max_LED_separation"] + l_r_is_too_separated = ( + dist_between_left_right >= params["max_LED_separation"] + ) no_green_no_red_C_red_L_red_R = reduce( np.logical_and, - (green_nans, red_C_nans, ~red_L_nans, ~red_R_nans, ~l_r_is_too_separated), + ( + green_nans, + red_C_nans, + ~red_L_nans, + ~red_R_nans, + ~l_r_is_too_separated, + ), ) if np.sum(no_green_no_red_C_red_L_red_R) > 0: centroid[no_green_no_red_C_red_L_red_R] = [ *zip( ( - pos_df.loc[idx[no_green_no_red_C_red_L_red_R], idx[red_led_L, "x"]] + pos_df.loc[ + idx[no_green_no_red_C_red_L_red_R], idx[red_led_L, "x"] + ] + pos_df.loc[ idx[no_green_no_red_C_red_L_red_R], idx[red_led_R, "x"] ] ) / 2, ( - pos_df.loc[idx[no_green_no_red_C_red_L_red_R], idx[red_led_L, "y"]] + pos_df.loc[ + idx[no_green_no_red_C_red_L_red_R], idx[red_led_L, "y"] + ] + pos_df.loc[ idx[no_green_no_red_C_red_L_red_R], idx[red_led_R, "y"] ] @@ -581,8 +631,12 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): pos_df.loc[:, idx[red_led_R, ("x", "y")]].to_numpy(), pos_df.loc[:, idx[green_led, ("x", "y")]].to_numpy(), ) - l_g_is_too_separated = dist_between_left_green >= params["max_LED_separation"] - r_g_is_too_separated = dist_between_right_green >= params["max_LED_separation"] + l_g_is_too_separated = ( + dist_between_left_green >= params["max_LED_separation"] + ) + r_g_is_too_separated = ( + dist_between_right_green >= params["max_LED_separation"] + ) green_red_L_red_R_no_red_C = reduce( np.logical_and, ( @@ -599,12 +653,16 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): midpoint = ( ( pos_df.loc[idx[green_red_L_red_R_no_red_C], idx[red_led_L, "x"]] - + pos_df.loc[idx[green_red_L_red_R_no_red_C], idx[red_led_R, "x"]] + + pos_df.loc[ + idx[green_red_L_red_R_no_red_C], idx[red_led_R, "x"] + ] ) / 2, ( pos_df.loc[idx[green_red_L_red_R_no_red_C], idx[red_led_L, "y"]] - + pos_df.loc[idx[green_red_L_red_R_no_red_C], idx[red_led_R, "y"]] + + pos_df.loc[ + idx[green_red_L_red_R_no_red_C], idx[red_led_R, "y"] + ] ) / 2, ) @@ -612,12 +670,16 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): *zip( ( midpoint[0] - + pos_df.loc[idx[green_red_L_red_R_no_red_C], idx[green_led, "x"]] + + pos_df.loc[ + idx[green_red_L_red_R_no_red_C], idx[green_led, "x"] + ] ) / 2, ( midpoint[1] - + pos_df.loc[idx[green_red_L_red_R_no_red_C], idx[green_led, "y"]] + + pos_df.loc[ + idx[green_red_L_red_R_no_red_C], idx[green_led, "y"] + ] ) / 2, ) @@ -625,20 +687,30 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): # If red center and left LED is NaN, but green and red right LED are not green_red_R_no_red_C_no_red_L = reduce( np.logical_and, - (~green_nans, red_C_nans, red_L_nans, ~red_R_nans, ~r_g_is_too_separated), + ( + ~green_nans, + red_C_nans, + red_L_nans, + ~red_R_nans, + ~r_g_is_too_separated, + ), ) if np.sum(green_red_R_no_red_C_no_red_L) > 0: centroid[green_red_R_no_red_C_no_red_L] = [ *zip( ( - pos_df.loc[idx[green_red_R_no_red_C_no_red_L], idx[red_led_R, "x"]] + pos_df.loc[ + idx[green_red_R_no_red_C_no_red_L], idx[red_led_R, "x"] + ] + pos_df.loc[ idx[green_red_R_no_red_C_no_red_L], idx[green_led, "x"] ] ) / 2, ( - pos_df.loc[idx[green_red_R_no_red_C_no_red_L], idx[red_led_R, "y"]] + pos_df.loc[ + idx[green_red_R_no_red_C_no_red_L], idx[red_led_R, "y"] + ] + pos_df.loc[ idx[green_red_R_no_red_C_no_red_L], idx[green_led, "y"] ] @@ -649,20 +721,30 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): # If red center and right LED is NaN, but green and red left LED are not green_red_L_no_red_C_no_red_R = reduce( np.logical_and, - (~green_nans, red_C_nans, ~red_L_nans, red_R_nans, ~l_g_is_too_separated), + ( + ~green_nans, + red_C_nans, + ~red_L_nans, + red_R_nans, + ~l_g_is_too_separated, + ), ) if np.sum(green_red_L_no_red_C_no_red_R) > 0: centroid[green_red_L_no_red_C_no_red_R] = [ *zip( ( - pos_df.loc[idx[green_red_L_no_red_C_no_red_R], idx[red_led_L, "x"]] + pos_df.loc[ + idx[green_red_L_no_red_C_no_red_R], idx[red_led_L, "x"] + ] + pos_df.loc[ idx[green_red_L_no_red_C_no_red_R], idx[green_led, "x"] ] ) / 2, ( - pos_df.loc[idx[green_red_L_no_red_C_no_red_R], idx[red_led_L, "y"]] + pos_df.loc[ + idx[green_red_L_no_red_C_no_red_R], idx[red_led_L, "y"] + ] + pos_df.loc[ idx[green_red_L_no_red_C_no_red_R], idx[green_led, "y"] ] @@ -677,8 +759,12 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): if np.sum(red_L_no_green_no_red_C_no_red_R) > 0: centroid[red_L_no_green_no_red_C_no_red_R] = [ *zip( - pos_df.loc[idx[red_L_no_green_no_red_C_no_red_R], idx[red_led_L, "x"]], - pos_df.loc[idx[red_L_no_green_no_red_C_no_red_R], idx[red_led_L, "y"]], + pos_df.loc[ + idx[red_L_no_green_no_red_C_no_red_R], idx[red_led_L, "x"] + ], + pos_df.loc[ + idx[red_L_no_green_no_red_C_no_red_R], idx[red_led_L, "y"] + ], ) ] # If all LEDS are NaN except red right LED @@ -688,8 +774,12 @@ def four_led_centroid(pos_df: pd.DataFrame, **params): if np.sum(red_R_no_green_no_red_C_no_red_L) > 0: centroid[red_R_no_green_no_red_C_no_red_L] = [ *zip( - pos_df.loc[idx[red_R_no_green_no_red_C_no_red_L], idx[red_led_R, "x"]], - pos_df.loc[idx[red_R_no_green_no_red_C_no_red_L], idx[red_led_R, "y"]], + pos_df.loc[ + idx[red_R_no_green_no_red_C_no_red_L], idx[red_led_R, "x"] + ], + pos_df.loc[ + idx[red_R_no_green_no_red_C_no_red_L], idx[red_led_R, "y"] + ], ) ] # If all red LEDs are NaN, but green LED is not @@ -729,7 +819,7 @@ def two_pt_centroid(pos_df: pd.DataFrame, **params): pos_df : pd.DataFrame dataframe containing x and y position for each point of interest, index is timestamps. Column names specified by params - **kwargs : dict + **params : dict contains 'point1' and 'point2' keys, whose values specify the column names in `pos_df` diff --git a/src/spyglass/position/v1/position_dlc_cohort.py b/src/spyglass/position/v1/position_dlc_cohort.py index 854cd86e1..cf802e865 100644 --- a/src/spyglass/position/v1/position_dlc_cohort.py +++ b/src/spyglass/position/v1/position_dlc_cohort.py @@ -47,7 +47,10 @@ class BodyPart(dj.Part): def fetch_nwb(self, *attrs, **kwargs): return fetch_nwb( - self, (AnalysisNwbfile, "analysis_file_abs_path"), *attrs, **kwargs + self, + (AnalysisNwbfile, "analysis_file_abs_path"), + *attrs, + **kwargs, ) def fetch1_dataframe(self): @@ -68,7 +71,9 @@ def make(self, key): self.insert1(key) cohort_selection = (DLCSmoothInterpCohortSelection & key).fetch1() table_entries = [] - bodyparts_params_dict = cohort_selection.pop("bodyparts_params_dict") + bodyparts_params_dict = cohort_selection.pop( + "bodyparts_params_dict" + ) temp_key = cohort_selection.copy() for bodypart, params in bodyparts_params_dict.items(): temp_key["bodypart"] = bodypart @@ -80,8 +85,12 @@ def make(self, key): table_column_names = list(table_entries[0].dtype.fields.keys()) for table_entry in table_entries: entry_key = { - **{k: v for k, v in zip(table_column_names, table_entry[0])}, + **{ + k: v for k, v in zip(table_column_names, table_entry[0]) + }, **key, } - DLCSmoothInterpCohort.BodyPart.insert1(entry_key, skip_duplicates=True) + DLCSmoothInterpCohort.BodyPart.insert1( + entry_key, skip_duplicates=True + ) logger.logger.info("Inserted entry into DLCSmoothInterpCohort") diff --git a/src/spyglass/position/v1/position_dlc_model.py b/src/spyglass/position/v1/position_dlc_model.py index cd9f2296a..8fa2695be 100644 --- a/src/spyglass/position/v1/position_dlc_model.py +++ b/src/spyglass/position/v1/position_dlc_model.py @@ -121,7 +121,9 @@ def insert_default(cls, **kwargs): "trainingsetindex": 0, "model_prefix": "", } - cls.insert1({"dlc_model_params_name": "default", "params": params}, **kwargs) + cls.insert1( + {"dlc_model_params_name": "default", "params": params}, **kwargs + ) @classmethod def get_default(cls): @@ -218,14 +220,18 @@ def make(self, key): "TrainingFraction", ] for attribute in needed_attributes: - assert attribute in dlc_config, f"Couldn't find {attribute} in config" + assert ( + attribute in dlc_config + ), f"Couldn't find {attribute} in config" scorer_legacy = str_to_bool(dlc_config.get("scorer_legacy", "f")) dlc_scorer = GetScorerName( cfg=dlc_config, shuffle=shuffle, - trainFraction=dlc_config["TrainingFraction"][int(trainingsetindex)], + trainFraction=dlc_config["TrainingFraction"][ + int(trainingsetindex) + ], modelprefix=model_prefix, )[scorer_legacy] if dlc_config["snapshotindex"] == -1: @@ -307,7 +313,9 @@ def make(self, key): modelprefix=model_prefix, ) eval_path = project_path / eval_folder - assert eval_path.exists(), f"Couldn't find evaluation folder:\n{eval_path}" + assert ( + eval_path.exists() + ), f"Couldn't find evaluation folder:\n{eval_path}" eval_csvs = list(eval_path.glob("*csv")) max_modified_time = 0 diff --git a/src/spyglass/position/v1/position_dlc_orient.py b/src/spyglass/position/v1/position_dlc_orient.py index 7663acc90..09dd862dc 100644 --- a/src/spyglass/position/v1/position_dlc_orient.py +++ b/src/spyglass/position/v1/position_dlc_orient.py @@ -28,7 +28,8 @@ class DLCOrientationParams(dj.Manual): @classmethod def insert_params(cls, params_name: str, params: dict, **kwargs): cls.insert1( - {"dlc_orientation_params_name": params_name, "params": params}, **kwargs + {"dlc_orientation_params_name": params_name, "params": params}, + **kwargs, ) @classmethod @@ -40,7 +41,8 @@ def insert_default(cls, **kwargs): "orientation_smoothing_std_dev": 0.001, } cls.insert1( - {"dlc_orientation_params_name": "default", "params": params}, **kwargs + {"dlc_orientation_params_name": "default", "params": params}, + **kwargs, ) @classmethod @@ -48,7 +50,9 @@ def get_default(cls): query = cls & {"dlc_orientation_params_name": "default"} if not len(query) > 0: cls().insert_default(skip_duplicates=True) - default = (cls & {"dlc_orientation_params_name": "default"}).fetch1() + default = ( + cls & {"dlc_orientation_params_name": "default"} + ).fetch1() else: default = query.fetch1() return default @@ -84,7 +88,8 @@ def make(self, key): pos_df = pd.concat( { bodypart: ( - DLCSmoothInterpCohort.BodyPart & {**key, **{"bodypart": bodypart}} + DLCSmoothInterpCohort.BodyPart + & {**key, **{"bodypart": bodypart}} ).fetch1_dataframe() for bodypart in cohort_entries.fetch("bodypart") }, @@ -124,7 +129,9 @@ def make(self, key): final_df = pd.DataFrame( orientation, columns=["orientation"], index=pos_df.index ) - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) spatial_series = (RawPosition() & key).fetch_nwb()[0]["raw_position"] orientation = pynwb.behavior.CompassDirection() orientation.create_spatial_series( @@ -156,7 +163,9 @@ def fetch_nwb(self, *attrs, **kwargs): def fetch1_dataframe(self): nwb_data = self.fetch_nwb()[0] index = pd.Index( - np.asarray(nwb_data["dlc_orientation"].get_spatial_series().timestamps), + np.asarray( + nwb_data["dlc_orientation"].get_spatial_series().timestamps + ), name="time", ) COLUMNS = [ @@ -185,7 +194,9 @@ def two_pt_head_orientation(pos_df: pd.DataFrame, **params): def no_orientation(pos_df: pd.DataFrame, **params): fill_value = params.pop("fill_with", np.nan) n_frames = len(pos_df) - orientation = np.full(shape=(n_frames), fill_value=fill_value, dtype=np.float16) + orientation = np.full( + shape=(n_frames), fill_value=fill_value, dtype=np.float16 + ) return orientation @@ -202,9 +213,13 @@ def red_led_bisector_orientation(pos_df: pd.DataFrame, **params): x_vec = row[LED1]["x"] - row[LED2]["x"] y_vec = row[LED1]["y"] - row[LED2]["y"] if y_vec == 0: - if (row[LED3]["y"] > row[LED1]["y"]) & (row[LED3]["y"] > row[LED2]["y"]): + if (row[LED3]["y"] > row[LED1]["y"]) & ( + row[LED3]["y"] > row[LED2]["y"] + ): orientation.append(np.pi / 2) - elif (row[LED3]["y"] < row[LED1]["y"]) & (row[LED3]["y"] < row[LED2]["y"]): + elif (row[LED3]["y"] < row[LED1]["y"]) & ( + row[LED3]["y"] < row[LED2]["y"] + ): orientation.append(-(np.pi / 2)) else: raise Exception("Cannot determine head direction from bisector") @@ -231,11 +246,15 @@ def interp_orientation(orientation, spans_to_interp, **kwargs): # TODO: add parameters to refine interpolation for ind, (span_start, span_stop) in enumerate(spans_to_interp): if (span_stop + 1) >= len(orientation): - orientation.loc[idx[span_start:span_stop], idx["orientation"]] = np.nan + orientation.loc[ + idx[span_start:span_stop], idx["orientation"] + ] = np.nan print(f"ind: {ind} has no endpoint with which to interpolate") continue if span_start < 1: - orientation.loc[idx[span_start:span_stop], idx["orientation"]] = np.nan + orientation.loc[ + idx[span_start:span_stop], idx["orientation"] + ] = np.nan print(f"ind: {ind} has no startpoint with which to interpolate") continue orient = [ @@ -249,5 +268,7 @@ def interp_orientation(orientation, spans_to_interp, **kwargs): xp=[start_time, stop_time], fp=[orient[0], orient[-1]], ) - orientation.loc[idx[start_time:stop_time], idx["orientation"]] = orientnew + orientation.loc[ + idx[start_time:stop_time], idx["orientation"] + ] = orientnew return orientation diff --git a/src/spyglass/position/v1/position_dlc_pose_estimation.py b/src/spyglass/position/v1/position_dlc_pose_estimation.py index 5600a674c..dcc23ea46 100644 --- a/src/spyglass/position/v1/position_dlc_pose_estimation.py +++ b/src/spyglass/position/v1/position_dlc_pose_estimation.py @@ -138,7 +138,10 @@ class BodyPart(dj.Part): def fetch_nwb(self, *attrs, **kwargs): return fetch_nwb( - self, (AnalysisNwbfile, "analysis_file_abs_path"), *attrs, **kwargs + self, + (AnalysisNwbfile, "analysis_file_abs_path"), + *attrs, + **kwargs, ) def fetch1_dataframe(self): @@ -180,9 +183,9 @@ def make(self, key): **analyze_video_params, ) dlc_result = dlc_reader.PoseEstimation(output_dir) - creation_time = datetime.fromtimestamp(dlc_result.creation_time).strftime( - "%Y-%m-%d %H:%M:%S" - ) + creation_time = datetime.fromtimestamp( + dlc_result.creation_time + ).strftime("%Y-%m-%d %H:%M:%S") logger.logger.info("getting raw position") interval_list_name = f"pos {key['epoch']-1} valid times" @@ -195,7 +198,9 @@ def make(self, key): ).fetch_nwb()[0] raw_pos_df = pd.DataFrame( data=raw_position["raw_position"].data, - index=pd.Index(raw_position["raw_position"].timestamps, name="time"), + index=pd.Index( + raw_position["raw_position"].timestamps, name="time" + ), columns=raw_position["raw_position"].description.split(", "), ) # TODO: should get timestamps from VideoFile, but need the video_frame_ind from RawPosition, @@ -232,7 +237,9 @@ def make(self, key): key["nwb_file_name"] ) nwb_analysis_file = AnalysisNwbfile() - key["dlc_pose_estimation_object_id"] = nwb_analysis_file.add_nwb_object( + key[ + "dlc_pose_estimation_object_id" + ] = nwb_analysis_file.add_nwb_object( analysis_file_name=key["analysis_file_name"], nwb_object=part_df, ) @@ -283,7 +290,9 @@ def add_timestamps(df: pd.DataFrame, raw_pos_df: pd.DataFrame) -> pd.DataFrame: raw_pos_df = raw_pos_df.drop( columns=[ - column for column in raw_pos_df.columns if column not in ["video_frame_ind"] + column + for column in raw_pos_df.columns + if column not in ["video_frame_ind"] ] ) raw_pos_df["time"] = raw_pos_df.index diff --git a/src/spyglass/position/v1/position_dlc_position.py b/src/spyglass/position/v1/position_dlc_position.py index 566c3b8d7..0f12a46d4 100644 --- a/src/spyglass/position/v1/position_dlc_position.py +++ b/src/spyglass/position/v1/position_dlc_position.py @@ -17,9 +17,9 @@ @schema class DLCSmoothInterpParams(dj.Manual): """ - Parameters for extracting the smoothed head position. + Parameters for extracting the smoothed head position. - Parameters + Attributes ---------- interpolate : bool, default True whether to interpolate over NaN spans @@ -66,7 +66,8 @@ def insert_default(cls, **kwargs): "num_inds_to_span": 20, } cls.insert1( - {"dlc_si_params_name": "default", "params": default_params}, **kwargs + {"dlc_si_params_name": "default", "params": default_params}, + **kwargs, ) @classmethod @@ -78,7 +79,9 @@ def insert_nan_params(cls, **kwargs): "max_cm_between_pts": 20, "num_inds_to_span": 20, } - cls.insert1({"dlc_si_params_name": "just_nan", "params": nan_params}, **kwargs) + cls.insert1( + {"dlc_si_params_name": "just_nan", "params": nan_params}, **kwargs + ) @classmethod def get_default(cls): @@ -128,11 +131,15 @@ def insert1(self, key, **kwargs): ) else: assert isinstance( - key["params"]["smoothing_params"]["smoothing_duration"], + key["params"]["smoothing_params"][ + "smoothing_duration" + ], (float, int), ), "smoothing_duration must be a float or int" else: - raise ValueError("smoothing_params not in key['params']") + raise ValueError( + "smoothing_params not in key['params']" + ) if "likelihood_thresh" in key["params"]: assert isinstance( key["params"]["likelihood_thresh"], @@ -235,10 +242,14 @@ def make(self, key): smooth_df = interp_df.copy() final_df = smooth_df.drop(["likelihood"], axis=1) final_df = final_df.rename_axis("time").reset_index() - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) # Add dataframe to AnalysisNwbfile nwb_analysis_file = AnalysisNwbfile() - key["dlc_smooth_interp_object_id"] = nwb_analysis_file.add_nwb_object( + key[ + "dlc_smooth_interp_object_id" + ] = nwb_analysis_file.add_nwb_object( analysis_file_name=key["analysis_file_name"], nwb_object=final_df, ) @@ -259,12 +270,17 @@ def fetch1_dataframe(self): def nan_inds( - dlc_df: pd.DataFrame, max_dist_between, likelihood_thresh: float, inds_to_span: int + dlc_df: pd.DataFrame, + max_dist_between, + likelihood_thresh: float, + inds_to_span: int, ): idx = pd.IndexSlice # Could either NaN sub-likelihood threshold inds here and then not consider in jumping... # OR just keep in back pocket when checking jumps against last good point - subthresh_inds = get_subthresh_inds(dlc_df, likelihood_thresh=likelihood_thresh) + subthresh_inds = get_subthresh_inds( + dlc_df, likelihood_thresh=likelihood_thresh + ) df_subthresh_indices = dlc_df.index[subthresh_inds] dlc_df.loc[idx[df_subthresh_indices], idx[("x", "y")]] = np.nan # To further determine which indices are the original point and which are jump points @@ -273,7 +289,9 @@ def nan_inds( subthresh_inds_mask = np.zeros(len(dlc_df), dtype=bool) subthresh_inds_mask[subthresh_inds] = True jump_inds_mask = np.zeros(len(dlc_df), dtype=bool) - _, good_spans = get_good_spans(subthresh_inds_mask, inds_to_span=inds_to_span) + _, good_spans = get_good_spans( + subthresh_inds_mask, inds_to_span=inds_to_span + ) for span in good_spans[::-1]: if np.sum(np.isnan(dlc_df.iloc[span[0] : span[-1]].x)) > 0: @@ -296,7 +314,9 @@ def nan_inds( last_good_ind = ind + 1 + np.min(previous_good_inds) else: last_good_ind = start_point - good_x, good_y = dlc_df.loc[idx[dlc_df.index[last_good_ind]], ["x", "y"]] + good_x, good_y = dlc_df.loc[ + idx[dlc_df.index[last_good_ind]], ["x", "y"] + ] if ( (dlc_df.y.iloc[ind] < int(good_y - max_dist_between)) | (dlc_df.y.iloc[ind] > int(good_y + max_dist_between)) @@ -319,7 +339,9 @@ def nan_inds( last_good_ind = start_point + np.max(previous_good_inds) else: last_good_ind = start_point - good_x, good_y = dlc_df.loc[idx[dlc_df.index[last_good_ind]], ["x", "y"]] + good_x, good_y = dlc_df.loc[ + idx[dlc_df.index[last_good_ind]], ["x", "y"] + ] if ( (dlc_df.y.iloc[ind] < int(good_y - max_dist_between)) | (dlc_df.y.iloc[ind] > int(good_y + max_dist_between)) @@ -357,14 +379,19 @@ def get_good_spans(bad_inds_mask, inds_to_span: int = 50): modified_spans : list spans that are amended to bridge up to inds_to_span consecutive bad indices """ - good_spans = get_span_start_stop(np.arange(len(bad_inds_mask))[~bad_inds_mask]) + good_spans = get_span_start_stop( + np.arange(len(bad_inds_mask))[~bad_inds_mask] + ) if len(good_spans) > 1: modified_spans = [] - for (start1, stop1), (start2, stop2) in zip(good_spans[:-1], good_spans[1:]): + for (start1, stop1), (start2, stop2) in zip( + good_spans[:-1], good_spans[1:] + ): check_existing = [ entry for entry in modified_spans - if start1 in range(entry[0] - inds_to_span, entry[1] + inds_to_span) + if start1 + in range(entry[0] - inds_to_span, entry[1] + inds_to_span) ] if len(check_existing) > 0: modify_ind = modified_spans.index(check_existing[0]) @@ -390,7 +417,9 @@ def span_length(x): def get_subthresh_inds(dlc_df: pd.DataFrame, likelihood_thresh: float): df_filter = dlc_df["likelihood"] < likelihood_thresh - sub_thresh_inds = np.where(~np.isnan(dlc_df["likelihood"].where(df_filter)))[0] + sub_thresh_inds = np.where( + ~np.isnan(dlc_df["likelihood"].where(df_filter)) + )[0] nand_inds = np.where(np.isnan(dlc_df["x"]))[0] all_nan_inds = list(set(sub_thresh_inds).union(set(nand_inds))) all_nan_inds.sort() diff --git a/src/spyglass/position/v1/position_dlc_project.py b/src/spyglass/position/v1/position_dlc_project.py index 6cb1e940f..1eacb2bc8 100644 --- a/src/spyglass/position/v1/position_dlc_project.py +++ b/src/spyglass/position/v1/position_dlc_project.py @@ -36,7 +36,7 @@ def add_from_config(cls, bodyparts: List, descriptions: List = None): ---------- bodyparts : List list of bodyparts from config - description : List, default None + descriptions : List, default None optional list of descriptions for bodyparts. If None, description is set to bodypart name """ @@ -88,7 +88,9 @@ class File(dj.Part): """ def insert1(self, key, **kwargs): - assert isinstance(key["project_name"], str), "project_name must be a string" + assert isinstance( + key["project_name"], str + ), "project_name must be a string" assert isinstance( key["frames_per_video"], int ), "frames_per_video must be of type `int`" @@ -134,7 +136,9 @@ def insert_existing_project( cfg = read_config(config_path) if bodyparts: bodyparts_to_add = [ - bodypart for bodypart in bodyparts if bodypart not in cfg["bodyparts"] + bodypart + for bodypart in bodyparts + if bodypart not in cfg["bodyparts"] ] all_bodyparts = bodyparts_to_add + cfg["bodyparts"] else: @@ -142,14 +146,18 @@ def insert_existing_project( BodyPart.add_from_config(cfg["bodyparts"]) for bodypart in all_bodyparts: if not bool(BodyPart() & {"bodypart": bodypart}): - raise ValueError(f"bodypart: {bodypart} not found in BodyPart table") + raise ValueError( + f"bodypart: {bodypart} not found in BodyPart table" + ) # check bodyparts are in config, if not add if len(bodyparts_to_add) > 0: add_to_config(config_path, bodyparts=bodyparts_to_add) # Get frames per video from config. If passed as arg, check match if frames_per_video: if frames_per_video != cfg["numframes2pick"]: - add_to_config(config_path, **{"numframes2pick": frames_per_video}) + add_to_config( + config_path, **{"numframes2pick": frames_per_video} + ) config_path = Path(config_path) project_path = config_path.parent dlc_project_path = os.environ["DLC_PROJECT_PATH"] @@ -160,7 +168,8 @@ def insert_existing_project( new_proj_dir = dest_folder.as_posix() else: new_proj_dir = shutil.copytree( - src=project_path, dst=f"{dlc_project_path}/{project_dirname}/" + src=project_path, + dst=f"{dlc_project_path}/{project_dirname}/", ) new_config_path = Path(f"{new_proj_dir}/config.yaml") assert ( @@ -178,7 +187,10 @@ def insert_existing_project( } cls.insert1(key, **kwargs) cls.BodyPart.insert( - [{"project_name": project_name, "bodypart": bp} for bp in all_bodyparts], + [ + {"project_name": project_name, "bodypart": bp} + for bp in all_bodyparts + ], **kwargs, ) if add_to_files: @@ -188,7 +200,10 @@ def insert_existing_project( del key["frames_per_video"] # Check for training files to add cls.add_training_files(key, **kwargs) - return {"project_name": project_name, "config_path": config_path.as_posix()} + return { + "project_name": project_name, + "config_path": config_path.as_posix(), + } @classmethod def insert_new_project( @@ -246,7 +261,9 @@ def insert_new_project( # and pass to get_video_path to reference VideoFile table for path if all(isinstance(n, Dict) for n in video_list): - videos_to_convert = [get_video_path(video_key) for video_key in video_list] + videos_to_convert = [ + get_video_path(video_key) for video_key in video_list + ] videos = [ check_videofile( video_path=video[0], @@ -262,9 +279,9 @@ def insert_new_project( raise OSError("at least one file in video_list does not exist") for video in video_list: video_path = Path(video).parent - video_filename = video.rsplit(video_path.as_posix(), maxsplit=1)[ - -1 - ].split("/")[-1] + video_filename = video.rsplit( + video_path.as_posix(), maxsplit=1 + )[-1].split("/")[-1] videos.extend( [ check_videofile( @@ -288,9 +305,13 @@ def insert_new_project( ) for bodypart in bodyparts: if not bool(BodyPart() & {"bodypart": bodypart}): - raise ValueError(f"bodypart: {bodypart} not found in BodyPart table") + raise ValueError( + f"bodypart: {bodypart} not found in BodyPart table" + ) kwargs.update({"numframes2pick": frames_per_video, "dotsize": 3}) - add_to_config(config_path, bodyparts, skeleton_node=skeleton_node, **kwargs) + add_to_config( + config_path, bodyparts, skeleton_node=skeleton_node, **kwargs + ) key = { "project_name": project_name, "team_name": lab_team, @@ -301,7 +322,11 @@ def insert_new_project( # TODO: make permissions setting more flexible. if set_permissions: permissions = ( - stat.S_IRUSR | stat.S_IWUSR | stat.S_IRGRP | stat.S_IWGRP | stat.S_IROTH + stat.S_IRUSR + | stat.S_IWUSR + | stat.S_IRGRP + | stat.S_IWGRP + | stat.S_IROTH ) username = getpass.getuser() if not groupname: @@ -314,7 +339,10 @@ def insert_new_project( ) cls.insert1(key, **kwargs) cls.BodyPart.insert( - [{"project_name": project_name, "bodypart": bp} for bp in bodyparts], + [ + {"project_name": project_name, "bodypart": bp} + for bp in bodyparts + ], **kwargs, ) if add_to_files: @@ -352,7 +380,9 @@ def add_training_files(cls, key, **kwargs): ) for video in video_names: key["file_name"] = f'{os.path.splitext(video.split("/")[-1])[0]}' - key["file_ext"] = os.path.splitext(video.split("/")[-1])[-1].split(".")[-1] + key["file_ext"] = os.path.splitext(video.split("/")[-1])[-1].split( + "." + )[-1] key["file_path"] = video cls.File.insert1(key, **kwargs) if len(training_files) > 0: @@ -436,8 +466,12 @@ def import_labeled_frames( else: assert Path( import_project_path - ).exists(), f"import_project_path: {import_project_path} does not exist" - import_labeled_data_path = Path(f"{import_project_path}/labeled-data") + ).exists(), ( + f"import_project_path: {import_project_path} does not exist" + ) + import_labeled_data_path = Path( + f"{import_project_path}/labeled-data" + ) assert ( import_labeled_data_path.exists() ), "import_project has no directory 'labeled-data'" @@ -458,7 +492,9 @@ def import_labeled_frames( cls.add_training_files(key, **kwargs) -def add_to_config(config, bodyparts: List = None, skeleton_node: str = None, **kwargs): +def add_to_config( + config, bodyparts: List = None, skeleton_node: str = None, **kwargs +): """ Add necessary items to the config.yaml for the model Parameters diff --git a/src/spyglass/position/v1/position_dlc_selection.py b/src/spyglass/position/v1/position_dlc_selection.py index 83d522688..fec52db24 100644 --- a/src/spyglass/position/v1/position_dlc_selection.py +++ b/src/spyglass/position/v1/position_dlc_selection.py @@ -12,7 +12,10 @@ from .position_dlc_centroid import DLCCentroid from .position_dlc_cohort import DLCSmoothInterpCohort from .position_dlc_orient import DLCOrientation -from .position_dlc_pose_estimation import DLCPoseEstimation, DLCPoseEstimationSelection +from .position_dlc_pose_estimation import ( + DLCPoseEstimation, + DLCPoseEstimationSelection, +) from .position_dlc_position import DLCSmoothInterpParams schema = dj.schema("position_v1_dlc_selection") @@ -52,14 +55,18 @@ def make(self, key): key["pose_eval_result"] = self.evaluate_pose_estimation(key) position_nwb_data = (DLCCentroid & key).fetch_nwb()[0] orientation_nwb_data = (DLCOrientation & key).fetch_nwb()[0] - position_object = position_nwb_data["dlc_position"].spatial_series["position"] - velocity_object = position_nwb_data["dlc_velocity"].time_series["velocity"] + position_object = position_nwb_data["dlc_position"].spatial_series[ + "position" + ] + velocity_object = position_nwb_data["dlc_velocity"].time_series[ + "velocity" + ] video_frame_object = position_nwb_data["dlc_velocity"].time_series[ "video_frame_ind" ] - orientation_object = orientation_nwb_data["dlc_orientation"].spatial_series[ - "orientation" - ] + orientation_object = orientation_nwb_data[ + "dlc_orientation" + ].spatial_series["orientation"] position = pynwb.behavior.Position() orientation = pynwb.behavior.CompassDirection() velocity = pynwb.behavior.BehavioralTimeSeries() @@ -99,7 +106,9 @@ def make(self, key): comments=video_frame_object.comments, ) # Add to Analysis NWB file - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) nwb_analysis_file = AnalysisNwbfile() key["orientation_object_id"] = nwb_analysis_file.add_nwb_object( key["analysis_file_name"], orientation @@ -125,7 +134,9 @@ def make(self, key): del dlc_key["pose_eval_result"] key["interval_list_name"] = f"pos {key['epoch']-1} valid times" valid_fields = PositionOutput().fetch().dtype.fields.keys() - entries_to_delete = [entry for entry in key.keys() if entry not in valid_fields] + entries_to_delete = [ + entry for entry in key.keys() if entry not in valid_fields + ] for entry in entries_to_delete: del key[entry] @@ -155,14 +166,18 @@ def fetch1_dataframe(self): np.concatenate( ( np.asarray( - nwb_data["velocity"].time_series["video_frame_ind"].data, + nwb_data["velocity"] + .time_series["video_frame_ind"] + .data, dtype=int, )[:, np.newaxis], np.asarray(nwb_data["position"].get_spatial_series().data), - np.asarray(nwb_data["orientation"].get_spatial_series().data)[ - :, np.newaxis - ], - np.asarray(nwb_data["velocity"].time_series["velocity"].data), + np.asarray( + nwb_data["orientation"].get_spatial_series().data + )[:, np.newaxis], + np.asarray( + nwb_data["velocity"].time_series["velocity"].data + ), ), axis=1, ), @@ -173,9 +188,13 @@ def fetch1_dataframe(self): @classmethod def evaluate_pose_estimation(cls, key): likelihood_thresh = [] - valid_fields = DLCSmoothInterpCohort.BodyPart().fetch().dtype.fields.keys() + valid_fields = ( + DLCSmoothInterpCohort.BodyPart().fetch().dtype.fields.keys() + ) centroid_key = {k: val for k, val in key.items() if k in valid_fields} - centroid_key["dlc_si_cohort_selection_name"] = key["dlc_si_cohort_centroid"] + centroid_key["dlc_si_cohort_selection_name"] = key[ + "dlc_si_cohort_centroid" + ] orientation_key = centroid_key.copy() orientation_key["dlc_si_cohort_selection_name"] = key[ "dlc_si_cohort_orientation" @@ -190,9 +209,9 @@ def evaluate_pose_estimation(cls, key): np.concatenate((centroid_si_params, orientation_si_params)) ): likelihood_thresh.append( - (DLCSmoothInterpParams() & {"dlc_si_params_name": param}).fetch1( - "params" - )["likelihood_thresh"] + ( + DLCSmoothInterpParams() & {"dlc_si_params_name": param} + ).fetch1("params")["likelihood_thresh"] ) if len(np.unique(likelihood_thresh)) > 1: @@ -324,7 +343,9 @@ def make(self, key): } pose_estimation_params, video_filename, output_dir = ( DLCPoseEstimationSelection() & pose_estimation_key - ).fetch1("pose_estimation_params", "video_path", "pose_estimation_output_dir") + ).fetch1( + "pose_estimation_params", "video_path", "pose_estimation_output_dir" + ) print(f"video filename: {video_filename}") meters_per_pixel = (DLCPoseEstimation() & pose_estimation_key).fetch1( "meters_per_pixel" @@ -341,7 +362,9 @@ def make(self, key): "dlc_si_cohort_centroid": key["dlc_si_cohort_centroid"], "dlc_centroid_params_name": key["dlc_centroid_params_name"], "dlc_si_cohort_orientation": key["dlc_si_cohort_orientation"], - "dlc_orientation_params_name": key["dlc_orientation_params_name"], + "dlc_orientation_params_name": key[ + "dlc_orientation_params_name" + ], } ).fetch1_dataframe() pose_estimation_df = pd.concat( @@ -350,7 +373,9 @@ def make(self, key): DLCPoseEstimation.BodyPart() & {**pose_estimation_key, **{"bodypart": bodypart}} ).fetch1_dataframe() - for bodypart in (DLCSmoothInterpCohort.BodyPart & pose_estimation_key) + for bodypart in ( + DLCSmoothInterpCohort.BodyPart & pose_estimation_key + ) .fetch("bodypart") .tolist() }, @@ -378,9 +403,13 @@ def make(self, key): f'{key["dlc_orientation_params_name"]}.mp4' ) idx = pd.IndexSlice - video_frame_inds = position_info_df["video_frame_ind"].astype(int).to_numpy() + video_frame_inds = ( + position_info_df["video_frame_ind"].astype(int).to_numpy() + ) centroids = { - bodypart: pose_estimation_df.loc[:, idx[bodypart, ("x", "y")]].to_numpy() + bodypart: pose_estimation_df.loc[ + :, idx[bodypart, ("x", "y")] + ].to_numpy() for bodypart in pose_estimation_df.columns.levels[0] } if params.get("incl_likelihood", None): @@ -395,7 +424,9 @@ def make(self, key): position_mean = { "DLC": np.asarray(position_info_df[["position_x", "position_y"]]) } - orientation_mean = {"DLC": np.asarray(position_info_df[["orientation"]])} + orientation_mean = { + "DLC": np.asarray(position_info_df[["orientation"]]) + } position_time = np.asarray(position_info_df.index) cm_per_pixel = meters_per_pixel * M_TO_CM percent_frames = params.get("percent_frames", None) diff --git a/src/spyglass/position/v1/position_dlc_training.py b/src/spyglass/position/v1/position_dlc_training.py index 658fd6c1f..6f50ba5bd 100644 --- a/src/spyglass/position/v1/position_dlc_training.py +++ b/src/spyglass/position/v1/position_dlc_training.py @@ -62,8 +62,12 @@ def insert_new_params(cls, paramset_name: str, params: dict, **kwargs): # If the specified param-set already exists # Not sure we need this part, as much just a check if the name is the same if param_query: - existing_paramset_name = param_query.fetch1("dlc_training_params_name") - if existing_paramset_name == paramset_name: # If existing name same: + existing_paramset_name = param_query.fetch1( + "dlc_training_params_name" + ) + if ( + existing_paramset_name == paramset_name + ): # If existing name same: return print( f"New param set not added\n" f"A param set with name: {paramset_name} already exists" @@ -82,7 +86,9 @@ def get_accepted_params(cls): set( [ *list(inspect.signature(train_network).parameters), - *list(inspect.signature(create_training_dataset).parameters), + *list( + inspect.signature(create_training_dataset).parameters + ), ] ) ) @@ -173,22 +179,32 @@ def make(self, key): inspect.signature(create_training_dataset).parameters ) training_dataset_kwargs = { - k: v for k, v in dlc_config.items() if k in training_dataset_input_args + k: v + for k, v in dlc_config.items() + if k in training_dataset_input_args } logger.logger.info("creating training dataset") create_training_dataset(dlc_cfg_filepath, **training_dataset_kwargs) # ---- Trigger DLC model training job ---- - train_network_input_args = list(inspect.signature(train_network).parameters) + train_network_input_args = list( + inspect.signature(train_network).parameters + ) train_network_kwargs = { - k: v for k, v in dlc_config.items() if k in train_network_input_args + k: v + for k, v in dlc_config.items() + if k in train_network_input_args } for k in ["shuffle", "trainingsetindex", "maxiters"]: if k in train_network_kwargs: train_network_kwargs[k] = int(train_network_kwargs[k]) try: train_network(dlc_cfg_filepath, **train_network_kwargs) - except KeyboardInterrupt: # Instructions indicate to train until interrupt - logger.logger.info("DLC training stopped via Keyboard Interrupt") + except ( + KeyboardInterrupt + ): # Instructions indicate to train until interrupt + logger.logger.info( + "DLC training stopped via Keyboard Interrupt" + ) snapshots = list( ( diff --git a/src/spyglass/position/v1/position_trodes_position.py b/src/spyglass/position/v1/position_trodes_position.py index f1fb05d18..a31aa158d 100644 --- a/src/spyglass/position/v1/position_trodes_position.py +++ b/src/spyglass/position/v1/position_trodes_position.py @@ -107,7 +107,9 @@ class TrodesPosV1(dj.Computed): def make(self, key): print(f"Computing position for: {key}") - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) raw_position = (RawPosition() & key).fetch_nwb()[0] position_info_parameters = (TrodesPosParams() & key).fetch1("params") position = pynwb.behavior.Position() @@ -117,7 +119,9 @@ def make(self, key): METERS_PER_CM = 0.01 raw_pos_df = pd.DataFrame( data=raw_position["raw_position"].data, - index=pd.Index(raw_position["raw_position"].timestamps, name="time"), + index=pd.Index( + raw_position["raw_position"].timestamps, name="time" + ), columns=raw_position["raw_position"].description.split(", "), ) try: @@ -162,7 +166,10 @@ def make(self, key): conversion=METERS_PER_CM, unit="m/s", data=np.concatenate( - (position_info["velocity"], position_info["speed"][:, np.newaxis]), + ( + position_info["velocity"], + position_info["speed"][:, np.newaxis], + ), axis=1, ), comments=spatial_series.comments, @@ -214,10 +221,14 @@ def make(self, key): key["version"] = 1 trodes_key = key.copy() valid_fields = PositionOutput().fetch().dtype.fields.keys() - entries_to_delete = [entry for entry in key.keys() if entry not in valid_fields] + entries_to_delete = [ + entry for entry in key.keys() if entry not in valid_fields + ] for entry in entries_to_delete: del key[entry] - PositionOutput().insert1(key=key, params=trodes_key, skip_duplicates=True) + PositionOutput().insert1( + key=key, params=trodes_key, skip_duplicates=True + ) @staticmethod def calculate_position_info_from_spatial_series( @@ -238,7 +249,8 @@ def calculate_position_info_from_spatial_series( time = np.asarray(spatial_series.timestamps) # seconds position = np.asarray( pd.DataFrame( - spatial_series.data, columns=spatial_series.description.split(", ") + spatial_series.data, + columns=spatial_series.description.split(", "), ).loc[:, ["xloc", "yloc", "xloc2", "yloc2"]] ) # meters @@ -331,7 +343,8 @@ def calculate_position_info_from_spatial_series( upsampling_start_time, upsampling_end_time, n_samples ) new_index = pd.Index( - np.unique(np.concatenate((position_df.index, new_time))), name="time" + np.unique(np.concatenate((position_df.index, new_time))), + name="time", ) position_df = ( position_df.reindex(index=new_index) @@ -340,8 +353,12 @@ def calculate_position_info_from_spatial_series( ) time = np.asarray(position_df.index) - back_LED = np.asarray(position_df.loc[:, ["back_LED_x", "back_LED_y"]]) - front_LED = np.asarray(position_df.loc[:, ["front_LED_x", "front_LED_y"]]) + back_LED = np.asarray( + position_df.loc[:, ["back_LED_x", "back_LED_y"]] + ) + front_LED = np.asarray( + position_df.loc[:, ["front_LED_x", "front_LED_y"]] + ) sampling_rate = upsampling_sampling_rate @@ -403,14 +420,18 @@ def fetch1_dataframe(self): np.concatenate( ( np.asarray( - nwb_data["velocity"].time_series["video_frame_ind"].data, + nwb_data["velocity"] + .time_series["video_frame_ind"] + .data, dtype=int, )[:, np.newaxis], np.asarray(nwb_data["position"].get_spatial_series().data), - np.asarray(nwb_data["orientation"].get_spatial_series().data)[ - :, np.newaxis - ], - np.asarray(nwb_data["velocity"].time_series["velocity"].data), + np.asarray( + nwb_data["orientation"].get_spatial_series().data + )[:, np.newaxis], + np.asarray( + nwb_data["velocity"].time_series["velocity"].data + ), ), axis=1, ), @@ -454,7 +475,12 @@ def make(self, key): + 1 ) - video_path, video_filename, meters_per_pixel, video_time = get_video_path( + ( + video_path, + video_filename, + meters_per_pixel, + video_time, + ) = get_video_path( {"nwb_file_name": key["nwb_file_name"], "epoch": epoch} ) video_dir = os.path.dirname(video_path) + "/" @@ -471,7 +497,9 @@ def make(self, key): "red": np.asarray(raw_position_df[["xloc", "yloc"]]), "green": np.asarray(raw_position_df[["xloc2", "yloc2"]]), } - position_mean = np.asarray(position_info_df[["position_x", "position_y"]]) + position_mean = np.asarray( + position_info_df[["position_x", "position_y"]] + ) orientation_mean = np.asarray(position_info_df[["orientation"]]) position_time = np.asarray(position_info_df.index) cm_per_pixel = meters_per_pixel * M_TO_CM @@ -497,6 +525,7 @@ def convert_to_pixels(data, frame_size, cm_to_pixels=1.0): data : ndarray, shape (n_time, 2) frame_size : array_like, shape (2,) cm_to_pixels : float + Returns ------- converted_data : ndarray, shape (n_time, 2) @@ -550,7 +579,9 @@ def make_video( for color, data in centroids.items() } position_mean = self.fill_nan(position_mean, video_time, position_time) - orientation_mean = self.fill_nan(orientation_mean, video_time, position_time) + orientation_mean = self.fill_nan( + orientation_mean, video_time, position_time + ) for time_ind in tqdm( range(n_frames - 1), desc="frames", disable=disable_progressbar @@ -563,7 +594,9 @@ def make_video( green_centroid = centroids["green"][time_ind] position = position_mean[time_ind] - position = self.convert_to_pixels(position, frame_size, cm_to_pixels) + position = self.convert_to_pixels( + position, frame_size, cm_to_pixels + ) orientation = orientation_mean[time_ind] if np.all(~np.isnan(red_centroid)): diff --git a/src/spyglass/sharing/sharing_kachery.py b/src/spyglass/sharing/sharing_kachery.py index 265d7822c..2c0bf75cc 100644 --- a/src/spyglass/sharing/sharing_kachery.py +++ b/src/spyglass/sharing/sharing_kachery.py @@ -101,7 +101,9 @@ def set_resource_url(key: dict): def reset_resource_url(): KacheryZone.reset_zone() if default_kachery_resource_url is not None: - os.environ[kachery_resource_url_envar] = default_kachery_resource_url + os.environ[ + kachery_resource_url_envar + ] = default_kachery_resource_url @schema @@ -138,7 +140,9 @@ def make(self, key): key["analysis_file_uri"] = kcl.link_file( AnalysisNwbfile().get_abs_path(key["analysis_file_name"]) ) - print(os.environ[kachery_zone_envar], os.environ[kachery_cloud_dir_envar]) + print( + os.environ[kachery_zone_envar], os.environ[kachery_cloud_dir_envar] + ) print(AnalysisNwbfile().get_abs_path(key["analysis_file_name"])) print(kcl.load_file(key["analysis_file_uri"])) self.insert1(key) @@ -188,8 +192,12 @@ def download_file(analysis_file_name: str) -> bool: os.environ["SPYGLASS_BASE_DIR"] + file["linked_file_rel_path"] ) if not kachery_download_file( - uri=uri, dest=linked_file_path, kachery_zone_name=kachery_zone_name + uri=uri, + dest=linked_file_path, + kachery_zone_name=kachery_zone_name, ): - raise Exception(f"Linked file {linked_file_path} cannot be downloaded") + raise Exception( + f"Linked file {linked_file_path} cannot be downloaded" + ) return True diff --git a/src/spyglass/spikesorting/curation_figurl.py b/src/spyglass/spikesorting/curation_figurl.py index 639fc891d..2280f0c2b 100644 --- a/src/spyglass/spikesorting/curation_figurl.py +++ b/src/spyglass/spikesorting/curation_figurl.py @@ -52,14 +52,18 @@ def make(self, key: dict): """ # get new_curation_uri from selection table - new_curation_uri = (CurationFigurlSelection & key).fetch1("new_curation_uri") + new_curation_uri = (CurationFigurlSelection & key).fetch1( + "new_curation_uri" + ) # fetch recording_path = (SpikeSortingRecording & key).fetch1("recording_path") sorting_path = (SpikeSorting & key).fetch1("sorting_path") recording_label = SpikeSortingRecording._get_recording_name(key) sorting_label = SpikeSorting._get_sorting_name(key) - unit_metrics = _reformat_metrics((Curation & key).fetch1("quality_metrics")) + unit_metrics = _reformat_metrics( + (Curation & key).fetch1("quality_metrics") + ) initial_labels = (Curation & key).fetch1("curation_labels") initial_merge_groups = (Curation & key).fetch1("merge_groups") @@ -144,10 +148,14 @@ def _generate_the_figurl( spike_amplitudes_subsample_max_firing_rate = 50 view = vv.MountainLayout( items=[ - vv.MountainLayoutItem(label="Summary", view=X.sorting_summary_view()), + vv.MountainLayoutItem( + label="Summary", view=X.sorting_summary_view() + ), vv.MountainLayoutItem( label="Units table", - view=X.units_table_view(unit_ids=X.unit_ids, unit_metrics=unit_metrics), + view=X.units_table_view( + unit_ids=X.unit_ids, unit_metrics=unit_metrics + ), ), vv.MountainLayoutItem( label="Raster plot", diff --git a/src/spyglass/spikesorting/merged_sorting_extractor.py b/src/spyglass/spikesorting/merged_sorting_extractor.py index 7cff19dd8..e2140bfdc 100644 --- a/src/spyglass/spikesorting/merged_sorting_extractor.py +++ b/src/spyglass/spikesorting/merged_sorting_extractor.py @@ -44,7 +44,9 @@ def __init__( # sort the concatenated spike train (chronological) spike_train = np.sort(spike_train) # add the unit to the new sorting segment - new_sorting_segment.add_unit(representative_unit_id, spike_train) + new_sorting_segment.add_unit( + representative_unit_id, spike_train + ) # Now we'll take care of all of the unit_ids that are not part of a merge group for unit_id in parent_sorting.get_unit_ids(): if unit_id not in used_unit_ids: diff --git a/src/spyglass/spikesorting/sortingview.py b/src/spyglass/spikesorting/sortingview.py index 0e7166a12..404e246f3 100644 --- a/src/spyglass/spikesorting/sortingview.py +++ b/src/spyglass/spikesorting/sortingview.py @@ -80,7 +80,11 @@ def make(self, key: dict): google_user_ids.append(google_user_id[0]) # do - workspace_uri, recording_id, sorting_id = _create_spikesortingview_workspace( + ( + workspace_uri, + recording_id, + sorting_id, + ) = _create_spikesortingview_workspace( recording_path=recording_path, sorting_path=sorting_path, merge_groups=merge_groups, @@ -119,12 +123,14 @@ def remove_sorting_from_workspace(self, key): def url_trythis(self, key: dict, sortingview_sorting_id: str = None): """Generate a URL for visualizing and curating a sorting on the web. Will print instructions on how to do the curation. + Parameters ---------- key : dict An entry from SortingviewWorkspace table sortingview_sorting_id : str, optional - sortingview sorting ID to visualize; if None then chooses the first one + sortingview sorting ID to visualize. If None then chooses the first one + Returns ------- url : str @@ -148,7 +154,9 @@ def url_trythis(self, key: dict, sortingview_sorting_id: str = None): initial_curation = {"labelsByUnit": initial_labels} # custom metrics - unit_metrics = workspace.get_unit_metrics_for_sorting(sortingview_sorting_id) + unit_metrics = workspace.get_unit_metrics_for_sorting( + sortingview_sorting_id + ) # This will print some instructions on how to do the curation # old: sv.trythis_start_sorting_curation diff --git a/src/spyglass/spikesorting/sortingview_helper_fn.py b/src/spyglass/spikesorting/sortingview_helper_fn.py index dbffcbc78..05ba0e822 100644 --- a/src/spyglass/spikesorting/sortingview_helper_fn.py +++ b/src/spyglass/spikesorting/sortingview_helper_fn.py @@ -31,7 +31,9 @@ def _create_spikesortingview_workspace( recording = si.load_extractor(recording_path) if recording.get_num_segments() > 1: recording = si.concatenate_recordings([recording]) - recording_id = workspace.add_recording(label=recording_label, recording=recording) + recording_id = workspace.add_recording( + label=recording_label, recording=recording + ) sorting = si.load_extractor(sorting_path) if len(merge_groups) != 0: @@ -101,10 +103,14 @@ def _generate_url( # You can replace this with other layouts view = vv.MountainLayout( items=[ - vv.MountainLayoutItem(label="Summary", view=X.sorting_summary_view()), + vv.MountainLayoutItem( + label="Summary", view=X.sorting_summary_view() + ), vv.MountainLayoutItem( label="Units table", - view=X.units_table_view(unit_ids=X.unit_ids, unit_metrics=unit_metrics), + view=X.units_table_view( + unit_ids=X.unit_ids, unit_metrics=unit_metrics + ), ), vv.MountainLayoutItem( label="Raster plot", diff --git a/src/spyglass/spikesorting/spikesorting_artifact.py b/src/spyglass/spikesorting/spikesorting_artifact.py index 28b1ea746..9e7a8ee33 100644 --- a/src/spyglass/spikesorting/spikesorting_artifact.py +++ b/src/spyglass/spikesorting/spikesorting_artifact.py @@ -68,17 +68,25 @@ class ArtifactDetection(dj.Computed): """ def make(self, key): - if not (ArtifactDetectionSelection & key).fetch1("custom_artifact_detection"): + if not (ArtifactDetectionSelection & key).fetch1( + "custom_artifact_detection" + ): # get the dict of artifact params associated with this artifact_params_name artifact_params = (ArtifactDetectionParameters & key).fetch1( "artifact_params" ) - recording_path = (SpikeSortingRecording & key).fetch1("recording_path") + recording_path = (SpikeSortingRecording & key).fetch1( + "recording_path" + ) recording_name = SpikeSortingRecording._get_recording_name(key) recording = si.load_extractor(recording_path) - job_kwargs = {"chunk_duration": "10s", "n_jobs": 4, "progress_bar": "True"} + job_kwargs = { + "chunk_duration": "10s", + "n_jobs": 4, + "progress_bar": "True", + } artifact_removed_valid_times, artifact_times = _get_artifact_times( recording, **artifact_params, **job_kwargs @@ -117,7 +125,9 @@ def make(self, key): # also insert into IntervalList tmp_key = {} tmp_key["nwb_file_name"] = key["nwb_file_name"] - tmp_key["interval_list_name"] = key["artifact_removed_interval_list_name"] + tmp_key["interval_list_name"] = key[ + "artifact_removed_interval_list_name" + ] tmp_key["valid_times"] = key["artifact_removed_valid_times"] IntervalList.insert1(tmp_key, replace=True) @@ -186,7 +196,9 @@ def _get_artifact_times( # if both thresholds are None, we skip artifract detection if (amplitude_thresh is None) and (zscore_thresh is None): - recording_interval = np.asarray([valid_timestamps[0], valid_timestamps[-1]]) + recording_interval = np.asarray( + [valid_timestamps[0], valid_timestamps[-1]] + ) artifact_times_empty = np.asarray([]) print( "Amplitude and zscore thresholds are both None, skipping artifact detection" @@ -240,7 +252,9 @@ def _get_artifact_times( half_removal_window_s = removal_window_ms / 1000 * 0.5 if len(artifact_frames) == 0: - recording_interval = np.asarray([[valid_timestamps[0], valid_timestamps[-1]]]) + recording_interval = np.asarray( + [[valid_timestamps[0], valid_timestamps[-1]]] + ) artifact_times_empty = np.asarray([]) print("No artifacts detected.") return recording_interval, artifact_times_empty @@ -249,7 +263,9 @@ def _get_artifact_times( artifact_intervals = interval_from_inds(artifact_frames) # convert to seconds and pad with window - artifact_intervals_s = np.zeros((len(artifact_intervals), 2), dtype=np.float64) + artifact_intervals_s = np.zeros( + (len(artifact_intervals), 2), dtype=np.float64 + ) for interval_idx, interval in enumerate(artifact_intervals): artifact_intervals_s[interval_idx] = [ valid_timestamps[interval[0]] - half_removal_window_s, @@ -281,7 +297,10 @@ def _get_artifact_times( def _init_artifact_worker( - recording, zscore_thresh=None, amplitude_thresh=None, proportion_above_thresh=1.0 + recording, + zscore_thresh=None, + amplitude_thresh=None, + proportion_above_thresh=1.0, ): # create a local dict per worker worker_ctx = {} @@ -301,23 +320,29 @@ def _compute_artifact_chunk(segment_index, start_frame, end_frame, worker_ctx): amplitude_thresh = worker_ctx["amplitude_thresh"] proportion_above_thresh = worker_ctx["proportion_above_thresh"] # compute the number of electrodes that have to be above threshold - nelect_above = np.ceil(proportion_above_thresh * len(recording.get_channel_ids())) + nelect_above = np.ceil( + proportion_above_thresh * len(recording.get_channel_ids()) + ) traces = recording.get_traces( - segment_index=segment_index, start_frame=start_frame, end_frame=end_frame + segment_index=segment_index, + start_frame=start_frame, + end_frame=end_frame, ) # find the artifact occurrences using one or both thresholds, across channels if (amplitude_thresh is not None) and (zscore_thresh is None): above_a = np.abs(traces) > amplitude_thresh above_thresh = ( - np.ravel(np.argwhere(np.sum(above_a, axis=1) >= nelect_above)) + start_frame + np.ravel(np.argwhere(np.sum(above_a, axis=1) >= nelect_above)) + + start_frame ) elif (amplitude_thresh is None) and (zscore_thresh is not None): dataz = np.abs(stats.zscore(traces, axis=1)) above_z = dataz > zscore_thresh above_thresh = ( - np.ravel(np.argwhere(np.sum(above_z, axis=1) >= nelect_above)) + start_frame + np.ravel(np.argwhere(np.sum(above_z, axis=1) >= nelect_above)) + + start_frame ) else: above_a = np.abs(traces) > amplitude_thresh @@ -326,7 +351,8 @@ def _compute_artifact_chunk(segment_index, start_frame, end_frame, worker_ctx): above_thresh = ( np.ravel( np.argwhere( - np.sum(np.logical_or(above_z, above_a), axis=1) >= nelect_above + np.sum(np.logical_or(above_z, above_a), axis=1) + >= nelect_above ) ) + start_frame @@ -357,10 +383,14 @@ def _check_artifact_thresholds( ValueError: if signal thresholds are negative """ # amplitude or zscore thresholds should be negative, as they are applied to an absolute signal - signal_thresholds = [t for t in [amplitude_thresh, zscore_thresh] if t is not None] + signal_thresholds = [ + t for t in [amplitude_thresh, zscore_thresh] if t is not None + ] for t in signal_thresholds: if t < 0: - raise ValueError("Amplitude and Z-Score thresholds must be >= 0, or None") + raise ValueError( + "Amplitude and Z-Score thresholds must be >= 0, or None" + ) # proportion_above_threshold should be in [0:1] inclusive if proportion_above_thresh < 0: diff --git a/src/spyglass/spikesorting/spikesorting_curation.py b/src/spyglass/spikesorting/spikesorting_curation.py index 5e5afe7e0..6eafee5fb 100644 --- a/src/spyglass/spikesorting/spikesorting_curation.py +++ b/src/spyglass/spikesorting/spikesorting_curation.py @@ -32,7 +32,9 @@ def apply_merge_groups_to_sorting( # merge_groups is a list of lists of unit_ids. # for example: merge_groups = [[1, 2], [5, 8, 4]]] - return MergedSortingExtractor(parent_sorting=sorting, merge_groups=merge_groups) + return MergedSortingExtractor( + parent_sorting=sorting, merge_groups=merge_groups + ) @schema @@ -220,7 +222,9 @@ def save_sorting_nwb( unit_ids = sorting.get_unit_ids() for unit_id in unit_ids: - spike_times_in_samples = sorting.get_unit_spike_train(unit_id=unit_id) + spike_times_in_samples = sorting.get_unit_spike_train( + unit_id=unit_id + ) units[unit_id] = timestamps[spike_times_in_samples] units_valid_times[unit_id] = sort_interval_valid_times units_sort_interval[unit_id] = [sort_interval] @@ -271,7 +275,9 @@ def insert_default(self): "total_memory": "5G", "whiten": False, } - self.insert1([waveform_params_name, waveform_params], skip_duplicates=True) + self.insert1( + [waveform_params_name, waveform_params], skip_duplicates=True + ) waveform_params_name = "default_whitened" waveform_params = { "ms_before": 0.5, @@ -281,7 +287,9 @@ def insert_default(self): "total_memory": "5G", "whiten": True, } - self.insert1([waveform_params_name, waveform_params], skip_duplicates=True) + self.insert1( + [waveform_params_name, waveform_params], skip_duplicates=True + ) @schema @@ -318,7 +326,8 @@ def make(self, key): waveform_extractor_name = self._get_waveform_extractor_name(key) key["waveform_extractor_path"] = str( - Path(os.environ["SPYGLASS_WAVEFORMS_DIR"]) / Path(waveform_extractor_name) + Path(os.environ["SPYGLASS_WAVEFORMS_DIR"]) + / Path(waveform_extractor_name) ) if os.path.exists(key["waveform_extractor_path"]): shutil.rmtree(key["waveform_extractor_path"]) @@ -329,7 +338,9 @@ def make(self, key): **waveform_params, ) - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) object_id = AnalysisNwbfile().add_units_waveforms( key["analysis_file_name"], waveform_extractor=waveforms ) @@ -360,7 +371,9 @@ def fetch_nwb(self, key): return NotImplementedError def _get_waveform_extractor_name(self, key): - waveform_params_name = (WaveformParameters & key).fetch1("waveform_params_name") + waveform_params_name = (WaveformParameters & key).fetch1( + "waveform_params_name" + ) return ( f'{key["nwb_file_name"]}_{str(uuid.uuid4())[0:8]}_' @@ -422,7 +435,8 @@ def get_metric_default_params(self, metric: str): def insert_default(self): self.insert1( - ["franklab_default3", self.metric_default_params], skip_duplicates=True + ["franklab_default3", self.metric_default_params], + skip_duplicates=True, ) def get_available_metrics(self): @@ -498,7 +512,9 @@ def make(self, key): print(f"Computed all metrics: {qm}") self._dump_to_json(qm, key["quality_metrics_path"]) - key["analysis_file_name"] = AnalysisNwbfile().create(key["nwb_file_name"]) + key["analysis_file_name"] = AnalysisNwbfile().create( + key["nwb_file_name"] + ) key["object_id"] = AnalysisNwbfile().add_units_metrics( key["analysis_file_name"], metrics=qm ) @@ -555,7 +571,9 @@ def _compute_isi_violation_fractions(waveform_extractor, **metric_params): # Extract the total number of spikes that violated the isi_threshold for each unit isi_violation_counts = sq.compute_isi_violations( - waveform_extractor, isi_threshold_ms=isi_threshold_ms, min_isi_ms=min_isi_ms + waveform_extractor, + isi_threshold_ms=isi_threshold_ms, + min_isi_ms=min_isi_ms, ).isi_violations_count # Extract the total number of spikes from each unit. The number of ISIs is one less than this @@ -575,8 +593,12 @@ def _get_peak_offset( """Computes the shift of the waveform peak from center of window.""" if "peak_sign" in metric_params: del metric_params["peak_sign"] - peak_offset_inds = si.postprocessing.get_template_extremum_channel_peak_shift( - waveform_extractor=waveform_extractor, peak_sign=peak_sign, **metric_params + peak_offset_inds = ( + si.postprocessing.get_template_extremum_channel_peak_shift( + waveform_extractor=waveform_extractor, + peak_sign=peak_sign, + **metric_params, + ) ) peak_offset = {key: int(abs(val)) for key, val in peak_offset_inds.items()} return peak_offset @@ -589,13 +611,17 @@ def _get_peak_channel( if "peak_sign" in metric_params: del metric_params["peak_sign"] peak_channel_dict = si.postprocessing.get_template_extremum_channel( - waveform_extractor=waveform_extractor, peak_sign=peak_sign, **metric_params + waveform_extractor=waveform_extractor, + peak_sign=peak_sign, + **metric_params, ) peak_channel = {key: int(val) for key, val in peak_channel_dict.items()} return peak_channel -def _get_num_spikes(waveform_extractor: si.WaveformExtractor, this_unit_id: int): +def _get_num_spikes( + waveform_extractor: si.WaveformExtractor, this_unit_id: int +): """Computes the number of spikes for each unit.""" all_spikes = sq.compute_num_spikes(waveform_extractor) cluster_spikes = all_spikes[this_unit_id] @@ -654,7 +680,9 @@ def insert_default(self): default_params = { "auto_curation_params_name": "default", "merge_params": {}, - "label_params": {"nn_noise_overlap": [">", 0.1, ["noise", "reject"]]}, + "label_params": { + "nn_noise_overlap": [">", 0.1, ["noise", "reject"]] + }, } self.insert1(default_params, skip_duplicates=True) @@ -705,12 +733,16 @@ def make(self, key): parent_curation_id = parent_curation["curation_id"] parent_sorting = Curation.get_curated_sorting(key) - merge_params = (AutomaticCurationParameters & key).fetch1("merge_params") + merge_params = (AutomaticCurationParameters & key).fetch1( + "merge_params" + ) merge_groups, units_merged = self.get_merge_groups( parent_sorting, parent_merge_groups, quality_metrics, merge_params ) - label_params = (AutomaticCurationParameters & key).fetch1("label_params") + label_params = (AutomaticCurationParameters & key).fetch1( + "label_params" + ) labels = self.get_labels( parent_sorting, parent_labels, quality_metrics, label_params ) @@ -734,7 +766,9 @@ def make(self, key): self.insert1(key) @staticmethod - def get_merge_groups(sorting, parent_merge_groups, quality_metrics, merge_params): + def get_merge_groups( + sorting, parent_merge_groups, quality_metrics, merge_params + ): """Identifies units to be merged based on the quality_metrics and merge parameters and returns an updated list of merges for the curation. @@ -812,13 +846,19 @@ def get_labels(sorting, parent_labels, quality_metrics, label_params): # note that label_params[metric] is a three element list with a comparison operator as a string, # the threshold value, and a list of labels to be applied if the comparison is true if compare( - quality_metrics[metric][unit_id], label_params[metric][1] + quality_metrics[metric][unit_id], + label_params[metric][1], ): if unit_id not in parent_labels: parent_labels[unit_id] = label_params[metric][2] # check if the label is already there, and if not, add it - elif label_params[metric][2] not in parent_labels[unit_id]: - parent_labels[unit_id].extend(label_params[metric][2]) + elif ( + label_params[metric][2] + not in parent_labels[unit_id] + ): + parent_labels[unit_id].extend( + label_params[metric][2] + ) return parent_labels @@ -870,7 +910,10 @@ def make(self, key): accepted_units = [] for unit_id in unit_ids: if unit_id in unit_labels: - if len(set(unit_labels_to_remove) & set(unit_labels[unit_id])) == 0: + if ( + len(set(unit_labels_to_remove) & set(unit_labels[unit_id])) + == 0 + ): accepted_units.append(unit_id) else: accepted_units.append(unit_id) @@ -898,7 +941,9 @@ def make(self, key): recording = Curation.get_recording(key) # get the sort_interval and sorting interval list - sort_interval_name = (SpikeSortingRecording & key).fetch1("sort_interval_name") + sort_interval_name = (SpikeSortingRecording & key).fetch1( + "sort_interval_name" + ) sort_interval = (SortInterval & key).fetch1("sort_interval") sort_interval_list_name = (SpikeSorting & key).fetch1( "artifact_removed_interval_list_name" @@ -988,7 +1033,9 @@ def insert1(self, key, **kwargs): ) super().insert1(key, **kwargs) - def get_included_units(self, curated_sorting_key, unit_inclusion_param_name): + def get_included_units( + self, curated_sorting_key, unit_inclusion_param_name + ): """given a reference to a set of curated sorting units and the name of a unit inclusion parameter list, returns Parameters @@ -1009,7 +1056,9 @@ def get_included_units(self, curated_sorting_key, unit_inclusion_param_name): & {"unit_inclusion_param_name": unit_inclusion_param_name} ).fetch1("inclusion_param_dict") units = (CuratedSpikeSorting().Unit() & curated_sortings).fetch() - units_key = (CuratedSpikeSorting().Unit() & curated_sortings).fetch("KEY") + units_key = (CuratedSpikeSorting().Unit() & curated_sortings).fetch( + "KEY" + ) # get a list of the metrics in the units table metrics_list = CuratedSpikeSorting().metrics_fields() # get the list of labels to exclude if there is one diff --git a/src/spyglass/spikesorting/spikesorting_recording.py b/src/spyglass/spikesorting/spikesorting_recording.py index 3ce93c003..e403638e3 100644 --- a/src/spyglass/spikesorting/spikesorting_recording.py +++ b/src/spyglass/spikesorting/spikesorting_recording.py @@ -72,7 +72,9 @@ def set_group_by_shank( (SortGroup & {"nwb_file_name": nwb_file_name}).delete() # get the electrodes from this NWB file electrodes = ( - Electrode() & {"nwb_file_name": nwb_file_name} & {"bad_channel": "False"} + Electrode() + & {"nwb_file_name": nwb_file_name} + & {"bad_channel": "False"} ).fetch() e_groups = list(np.unique(electrodes["electrode_group_name"])) e_groups.sort(key=int) # sort electrode groups numerically @@ -83,7 +85,9 @@ def set_group_by_shank( for e_group in e_groups: # for each electrode group, get a list of the unique shank numbers shank_list = np.unique( - electrodes["probe_shank"][electrodes["electrode_group_name"] == e_group] + electrodes["probe_shank"][ + electrodes["electrode_group_name"] == e_group + ] ) sge_key["electrode_group_name"] = e_group # get the indices of all electrodes in this group / shank and set their sorting group @@ -91,14 +95,18 @@ def set_group_by_shank( sg_key["sort_group_id"] = sge_key["sort_group_id"] = sort_group # specify reference electrode. Use 'references' if passed, otherwise use reference from config if not references: - shank_elect_ref = electrodes["original_reference_electrode"][ + shank_elect_ref = electrodes[ + "original_reference_electrode" + ][ np.logical_and( electrodes["electrode_group_name"] == e_group, electrodes["probe_shank"] == shank, ) ] if np.max(shank_elect_ref) == np.min(shank_elect_ref): - sg_key["sort_reference_electrode_id"] = shank_elect_ref[0] + sg_key["sort_reference_electrode_id"] = shank_elect_ref[ + 0 + ] else: ValueError( f"Error in electrode group {e_group}: reference electrodes are not all the same" @@ -109,14 +117,19 @@ def set_group_by_shank( f"electrode group {e_group} not a key in references, so cannot set reference" ) else: - sg_key["sort_reference_electrode_id"] = references[e_group] + sg_key["sort_reference_electrode_id"] = references[ + e_group + ] # Insert sort group and sort group electrodes reference_electrode_group = electrodes[ - electrodes["electrode_id"] == sg_key["sort_reference_electrode_id"] + electrodes["electrode_id"] + == sg_key["sort_reference_electrode_id"] ][ "electrode_group_name" ] # reference for this electrode group - if len(reference_electrode_group) == 1: # unpack single reference + if ( + len(reference_electrode_group) == 1 + ): # unpack single reference reference_electrode_group = reference_electrode_group[0] elif (int(sg_key["sort_reference_electrode_id"]) > 0) and ( len(reference_electrode_group) != 1 @@ -164,7 +177,9 @@ def set_group_by_electrode_group(self, nwb_file_name: str): (SortGroup & {"nwb_file_name": nwb_file_name}).delete() # get the electrodes from this NWB file electrodes = ( - Electrode() & {"nwb_file_name": nwb_file_name} & {"bad_channel": "False"} + Electrode() + & {"nwb_file_name": nwb_file_name} + & {"bad_channel": "False"} ).fetch() e_groups = np.unique(electrodes["electrode_group_name"]) sg_key = dict() @@ -229,7 +244,6 @@ def get_geometry(self, sort_group_id, nwb_file_name): ---------- sort_group_id : int nwb_file_name : str - prb_file_name : str Returns ------- @@ -262,7 +276,9 @@ def get_geometry(self, sort_group_id, nwb_file_name): geometry = np.zeros((n_chan, 2), dtype="float") tmp_geom = np.zeros((n_chan, 3), dtype="float") - for i, electrode_id in enumerate(channel_group[sort_group_id]["channels"]): + for i, electrode_id in enumerate( + channel_group[sort_group_id]["channels"] + ): # get the relative x and y locations of this channel from the probe table probe_electrode = int( electrodes["probe_electrode"][ @@ -395,7 +411,9 @@ def _get_recording_timestamps(recording): if recording.get_num_segments() > 1: frames_per_segment = [0] for i in range(recording.get_num_segments()): - frames_per_segment.append(recording.get_num_frames(segment_index=i)) + frames_per_segment.append( + recording.get_num_frames(segment_index=i) + ) cumsum_frames = np.cumsum(frames_per_segment) total_frames = np.sum(frames_per_segment) @@ -441,9 +459,13 @@ def _get_sort_interval_valid_times(self, key): "interval_list_name": interval_list_name, } ).fetch1("valid_times") - valid_sort_times = interval_list_intersect(sort_interval, valid_interval_times) + valid_sort_times = interval_list_intersect( + sort_interval, valid_interval_times + ) # Exclude intervals shorter than specified length - params = (SpikeSortingPreprocessingParameters & key).fetch1("preproc_params") + params = (SpikeSortingPreprocessingParameters & key).fetch1( + "preproc_params" + ) if "min_segment_length" in params: valid_sort_times = intervals_by_length( valid_sort_times, min_length=params["min_segment_length"] @@ -468,7 +490,9 @@ def _get_filtered_recording(self, key: dict): """ nwb_file_abs_path = Nwbfile().get_abs_path(key["nwb_file_name"]) - recording = se.read_nwb_recording(nwb_file_abs_path, load_time_vector=True) + recording = se.read_nwb_recording( + nwb_file_abs_path, load_time_vector=True + ) valid_sort_times = self._get_sort_interval_valid_times(key) # shape is (N, 2) @@ -483,14 +507,17 @@ def _get_filtered_recording(self, key: dict): union_adjacent_index, valid_sort_times_indices ) if valid_sort_times_indices.ndim == 1: - valid_sort_times_indices = np.expand_dims(valid_sort_times_indices, 0) + valid_sort_times_indices = np.expand_dims( + valid_sort_times_indices, 0 + ) # create an AppendRecording if there is more than one disjoint sort interval if len(valid_sort_times_indices) > 1: recordings_list = [] for interval_indices in valid_sort_times_indices: recording_single = recording.frame_slice( - start_frame=interval_indices[0], end_frame=interval_indices[1] + start_frame=interval_indices[0], + end_frame=interval_indices[1], ) recordings_list.append(recording_single) recording = si.append_recordings(recordings_list) diff --git a/src/spyglass/spikesorting/spikesorting_sorting.py b/src/spyglass/spikesorting/spikesorting_sorting.py index c406bb658..3f3ddf80e 100644 --- a/src/spyglass/spikesorting/spikesorting_sorting.py +++ b/src/spyglass/spikesorting/spikesorting_sorting.py @@ -38,7 +38,9 @@ def insert_default(self): sorters = sis.available_sorters() for sorter in sorters: sorter_params = sis.get_default_sorter_params(sorter) - self.insert1([sorter, "default", sorter_params], skip_duplicates=True) + self.insert1( + [sorter, "default", sorter_params], skip_duplicates=True + ) # Insert Frank lab defaults # Hippocampus tetrode default @@ -56,7 +58,9 @@ def insert_default(self): "detect_threshold": 3, "detect_interval": 10, } - self.insert1([sorter, sorter_params_name, sorter_params], skip_duplicates=True) + self.insert1( + [sorter, sorter_params_name, sorter_params], skip_duplicates=True + ) # Cortical probe default sorter = "mountainsort4" @@ -73,7 +77,9 @@ def insert_default(self): "detect_threshold": 3, "detect_interval": 10, } - self.insert1([sorter, sorter_params_name, sorter_params], skip_duplicates=True) + self.insert1( + [sorter, sorter_params_name, sorter_params], skip_duplicates=True + ) # clusterless defaults sorter = "clusterless_thresholder" @@ -91,7 +97,9 @@ def insert_default(self): # output needs to be set to sorting for the rest of the pipeline outputs="sorting", ) - self.insert1([sorter, sorter_params_name, sorter_params], skip_duplicates=True) + self.insert1( + [sorter, sorter_params_name, sorter_params], skip_duplicates=True + ) @schema @@ -234,7 +242,9 @@ def delete(self): current_user_name = dj.config["database.user"] entries = self.fetch() permission_bool = np.zeros((len(entries),)) - print(f"Attempting to delete {len(entries)} entries, checking permission...") + print( + f"Attempting to delete {len(entries)} entries, checking permission..." + ) for entry_idx in range(len(entries)): # check the team name for the entry, then look up the members in that team, @@ -250,10 +260,13 @@ def delete(self): for lab_member_name in lab_member_name_list: datajoint_user_names.append( ( - LabMember.LabMemberInfo & {"lab_member_name": lab_member_name} + LabMember.LabMemberInfo + & {"lab_member_name": lab_member_name} ).fetch1("datajoint_user_name") ) - permission_bool[entry_idx] = current_user_name in datajoint_user_names + permission_bool[entry_idx] = ( + current_user_name in datajoint_user_names + ) if np.sum(permission_bool) == len(entries): print("Permission to delete all specified entries granted.") super().delete() @@ -280,12 +293,16 @@ def nightly_cleanup(self): if dir not in analysis_file_names: full_path = str(Path(os.environ["SPYGLASS_SORTING_DIR"]) / dir) print(f"removing {full_path}") - shutil.rmtree(str(Path(os.environ["SPYGLASS_SORTING_DIR"]) / dir)) + shutil.rmtree( + str(Path(os.environ["SPYGLASS_SORTING_DIR"]) / dir) + ) @staticmethod def _get_sorting_name(key): recording_name = SpikeSortingRecording._get_recording_name(key) - sorting_name = recording_name + "_" + str(uuid.uuid4())[0:8] + "_spikesorting" + sorting_name = ( + recording_name + "_" + str(uuid.uuid4())[0:8] + "_spikesorting" + ) return sorting_name # TODO: write a function to import sorting done outside of dj diff --git a/src/spyglass/utils/dj_helper_fn.py b/src/spyglass/utils/dj_helper_fn.py index ab23013e5..548aa66c7 100644 --- a/src/spyglass/utils/dj_helper_fn.py +++ b/src/spyglass/utils/dj_helper_fn.py @@ -20,8 +20,6 @@ def dj_replace(original_table, new_values, key_column, replace_column): Result of a datajoint .fetch() call on a schema query. new_values : list List of tuples, each containing (key_value, replace_value). - index_column : str - The name of the column where the key_values are located. replace_column : str The name of the column where to-be-replaced values are located. @@ -38,7 +36,9 @@ def dj_replace(original_table, new_values, key_column, replace_column): new_values = tmp new_val_array = np.asarray(new_values) - replace_ind = np.where(np.isin(original_table[key_column], new_val_array[:, 0])) + replace_ind = np.where( + np.isin(original_table[key_column], new_val_array[:, 0]) + ) original_table[replace_column][replace_ind] = new_val_array[:, 1] return original_table @@ -48,16 +48,16 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): Parameters ---------- - query_expression + query_expression : query A DataJoint query expression (e.g., join, restrict) or a table to call fetch on. nwb_master : tuple Tuple (table, attr) to get the NWB filepath from. i.e. absolute path to NWB file can be obtained by looking up attr column of table table is usually Nwbfile or AnalysisNwbfile; attr is usually 'nwb_file_abs_path' or 'analysis_file_abs_path' - attrs : list + *attrs : list Attributes from normal DataJoint fetch call. - kwargs : dict + **kwargs : dict Keyword arguments from normal DataJoint fetch call. Returns @@ -84,20 +84,22 @@ def fetch_nwb(query_expression, nwb_master, *attrs, **kwargs): else Nwbfile.get_abs_path ) - nwb_files = (query_expression * tbl.proj(nwb2load_filepath=attr_name)).fetch( - file_name_str - ) + nwb_files = ( + query_expression * tbl.proj(nwb2load_filepath=attr_name) + ).fetch(file_name_str) for file_name in nwb_files: file_path = file_path_fn(file_name) if not os.path.exists(file_path): # retrieve the file from kachery. This also opens the file and stores the file object get_nwb_file(file_path) - rec_dicts = (query_expression * tbl.proj(nwb2load_filepath=attr_name)).fetch( - *attrs, "nwb2load_filepath", **kwargs - ) + rec_dicts = ( + query_expression * tbl.proj(nwb2load_filepath=attr_name) + ).fetch(*attrs, "nwb2load_filepath", **kwargs) - if not rec_dicts or not np.any(["object_id" in key for key in rec_dicts[0]]): + if not rec_dicts or not np.any( + ["object_id" in key for key in rec_dicts[0]] + ): return rec_dicts ret = [] diff --git a/src/spyglass/utils/nwb_helper_fn.py b/src/spyglass/utils/nwb_helper_fn.py index 71a8e2fec..c4569e0b0 100644 --- a/src/spyglass/utils/nwb_helper_fn.py +++ b/src/spyglass/utils/nwb_helper_fn.py @@ -39,7 +39,9 @@ def get_nwb_file(nwb_file_path): if nwbfile is None: # check to see if the file exists if not os.path.exists(nwb_file_path): - print(f"NWB file {nwb_file_path} does not exist locally; checking kachery") + print( + f"NWB file {nwb_file_path} does not exist locally; checking kachery" + ) # first try the analysis files from ..sharing.sharing_kachery import AnalysisNwbfileKachery @@ -202,7 +204,9 @@ def estimate_sampling_rate(timestamps, multiplier): return np.round(1.0 / np.mean(sample_diff[adjacent])) -def get_valid_intervals(timestamps, sampling_rate, gap_proportion, min_valid_len): +def get_valid_intervals( + timestamps, sampling_rate, gap_proportion, min_valid_len +): """Finds the set of all valid intervals in a list of timestamps. Valid interval: (start time, stop time) during which there are no gaps (i.e. missing samples). @@ -289,7 +293,9 @@ def get_electrode_indices(nwb_object, electrode_ids): # electrodes is a DynamicTable that contains all electrodes selected_elect_ids = list(nwb_object.electrodes.id[:]) else: - raise ValueError("nwb_object must be of type ElectricalSeries or NWBFile") + raise ValueError( + "nwb_object must be of type ElectricalSeries or NWBFile" + ) # for each electrode_id, find its index in selected_elect_ids and return that if it's there and invalid_electrode_index if not. return [ @@ -322,7 +328,9 @@ def get_all_spatial_series(nwbf, verbose=False): # for some reason the spatial_series do not necessarily come out in order, so we need to figure out the right order epoch_start_time = np.zeros(len(position.spatial_series.values())) - for pos_epoch, spatial_series in enumerate(position.spatial_series.values()): + for pos_epoch, spatial_series in enumerate( + position.spatial_series.values() + ): epoch_start_time[pos_epoch] = spatial_series.timestamps[0] sorted_order = np.argsort(epoch_start_time) @@ -338,7 +346,9 @@ def get_all_spatial_series(nwbf, verbose=False): timestamps = np.asarray(spatial_series.timestamps) sampling_rate = estimate_sampling_rate(timestamps, 1.75) if sampling_rate < 0: - raise ValueError(f"Error adding position data for position epoch {index}") + raise ValueError( + f"Error adding position data for position epoch {index}" + ) if verbose: print( "Processing raw position data. Estimated sampling rate: {} Hz".format( @@ -352,7 +362,9 @@ def get_all_spatial_series(nwbf, verbose=False): gap_proportion=2.5, min_valid_len=int(sampling_rate), ) - pos_data_dict[index]["raw_position_object_id"] = spatial_series.object_id + pos_data_dict[index][ + "raw_position_object_id" + ] = spatial_series.object_id return pos_data_dict diff --git a/tests/ci_config.py b/tests/ci_config.py index a0acaa71f..e329df7ed 100644 --- a/tests/ci_config.py +++ b/tests/ci_config.py @@ -13,7 +13,11 @@ dj.config["database.user"] = "root" dj.config["database.password"] = "tutorial" dj.config["stores"] = { - "raw": {"protocol": "file", "location": str(raw_dir), "stage": str(raw_dir)}, + "raw": { + "protocol": "file", + "location": str(raw_dir), + "stage": str(raw_dir), + }, "analysis": { "protocol": "file", "location": str(analysis_dir), diff --git a/tests/conftest.py b/tests/conftest.py index ba251f919..07ddd564f 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -8,7 +8,10 @@ import datajoint as dj from .datajoint._config import DATAJOINT_SERVER_PORT -from .datajoint._datajoint_server import kill_datajoint_server, run_datajoint_server +from .datajoint._datajoint_server import ( + kill_datajoint_server, + run_datajoint_server, +) thisdir = os.path.dirname(os.path.realpath(__file__)) sys.path.append(thisdir) @@ -95,7 +98,11 @@ def _set_env(): dj.config["database.password"] = "tutorial" dj.config["stores"] = { - "raw": {"protocol": "file", "location": str(raw_dir), "stage": str(raw_dir)}, + "raw": { + "protocol": "file", + "location": str(raw_dir), + "stage": str(raw_dir), + }, "analysis": { "protocol": "file", "location": str(analysis_dir), diff --git a/tests/data_import/test_insert_sessions.py b/tests/data_import/test_insert_sessions.py index 93aae2aba..f8c3e309e 100644 --- a/tests/data_import/test_insert_sessions.py +++ b/tests/data_import/test_insert_sessions.py @@ -53,7 +53,11 @@ def new_nwbfile_raw_file_name(tmp_path): os.mkdir(raw_dir) dj.config["stores"] = { - "raw": {"protocol": "file", "location": str(raw_dir), "stage": str(raw_dir)}, + "raw": { + "protocol": "file", + "location": str(raw_dir), + "stage": str(raw_dir), + }, } file_name = "raw.nwb" @@ -79,11 +83,15 @@ def test_copy_nwb( new_nwbfile_no_ephys_file_name, moved_nwbfile_no_ephys_file_path, ): - copy_nwb_link_raw_ephys(new_nwbfile_raw_file_name, new_nwbfile_no_ephys_file_name) + copy_nwb_link_raw_ephys( + new_nwbfile_raw_file_name, new_nwbfile_no_ephys_file_name + ) # new file should not have ephys data base_dir = pathlib.Path(os.getenv("SPYGLASS_BASE_DIR", None)) - new_nwbfile_raw_file_name_abspath = base_dir / "raw" / new_nwbfile_raw_file_name + new_nwbfile_raw_file_name_abspath = ( + base_dir / "raw" / new_nwbfile_raw_file_name + ) out_nwb_file_abspath = base_dir / "raw" / new_nwbfile_no_ephys_file_name with pynwb.NWBHDF5IO(path=str(out_nwb_file_abspath), mode="r") as io: nwbfile = io.read() @@ -97,7 +105,9 @@ def test_copy_nwb( # test readability after moving the linking raw file (paths are stored as relative paths in NWB) # so this should break the link (moving the linked-to file should also break the link) shutil.move(out_nwb_file_abspath, moved_nwbfile_no_ephys_file_path) - with pynwb.NWBHDF5IO(path=str(moved_nwbfile_no_ephys_file_path), mode="r") as io: + with pynwb.NWBHDF5IO( + path=str(moved_nwbfile_no_ephys_file_path), mode="r" + ) as io: with pytest.warns(BrokenLinkWarning): nwbfile = io.read() # should raise BrokenLinkWarning assert "test_ts" not in nwbfile.acquisition diff --git a/tests/test_insert_beans.py b/tests/test_insert_beans.py index dc06e5ea3..d74ecb856 100644 --- a/tests/test_insert_beans.py +++ b/tests/test_insert_beans.py @@ -15,10 +15,17 @@ def test_insert_sessions(): raw_dir = pathlib.Path(os.environ["SPYGLASS_BASE_DIR"]) / "raw" nwbfile_path = raw_dir / "test.nwb" - from spyglass.common import Session, DataAcquisitionDevice, CameraDevice, Probe + from spyglass.common import ( + Session, + DataAcquisitionDevice, + CameraDevice, + Probe, + ) from spyglass.data_import import insert_sessions - test_path = "ipfs://bafybeie4svt3paz5vr7cw7mkgibutbtbzyab4s24hqn5pzim3sgg56m3n4" + test_path = ( + "ipfs://bafybeie4svt3paz5vr7cw7mkgibutbtbzyab4s24hqn5pzim3sgg56m3n4" + ) try: local_test_path = kcl.load_file(test_path) except Exception as e: @@ -32,7 +39,9 @@ def test_insert_sessions(): os.rename(local_test_path, nwbfile_path) # test that the file can be read. this is not used otherwise - with pynwb.NWBHDF5IO(path=str(nwbfile_path), mode="r", load_namespaces=True) as io: + with pynwb.NWBHDF5IO( + path=str(nwbfile_path), mode="r", load_namespaces=True + ) as io: nwbfile = io.read() assert nwbfile is not None diff --git a/tests/test_nwb_helper_fn.py b/tests/test_nwb_helper_fn.py index 780f16150..ad382b0a4 100644 --- a/tests/test_nwb_helper_fn.py +++ b/tests/test_nwb_helper_fn.py @@ -35,7 +35,9 @@ def setUp(self): ) elecs_region = self.nwbfile.electrodes.create_region( - name="electrodes", region=[2, 3, 4, 5], description="description" # indices + name="electrodes", + region=[2, 3, 4, 5], + description="description", # indices ) eseries = pynwb.ecephys.ElectricalSeries( diff --git a/tests/trim_beans.py b/tests/trim_beans.py index 2dec42721..242e65c49 100644 --- a/tests/trim_beans.py +++ b/tests/trim_beans.py @@ -29,7 +29,9 @@ nwbfile.add_acquisition(new_eseries) # create a new analog TimeSeries with a subset of the data and timestamps - orig_analog = nwbfile.processing["analog"]["analog"].time_series.pop("analog") + orig_analog = nwbfile.processing["analog"]["analog"].time_series.pop( + "analog" + ) data = orig_analog.data[0:n_timestamps_to_keep, :] ts = orig_analog.timestamps[0:n_timestamps_to_keep] new_analog = pynwb.TimeSeries( @@ -47,9 +49,9 @@ for spatial_series_name in list( nwbfile.processing["behavior"]["position"].spatial_series ): - spatial_series = nwbfile.processing["behavior"]["position"].spatial_series.pop( - spatial_series_name - ) + spatial_series = nwbfile.processing["behavior"][ + "position" + ].spatial_series.pop(spatial_series_name) assert isinstance(spatial_series, pynwb.behavior.SpatialSeries) data = spatial_series.data[:, 0:2] ts = spatial_series.timestamps[0:n_timestamps_to_keep] @@ -63,7 +65,9 @@ ) ) for spatial_series in new_spatial_series: - nwbfile.processing["behavior"]["position"].add_spatial_series(spatial_series) + nwbfile.processing["behavior"]["position"].add_spatial_series( + spatial_series + ) with pynwb.NWBHDF5IO(file_out, "w") as export_io: export_io.export(io, nwbfile)