From ca2f462733d7fdd8af25f3837e6f4baef2b57928 Mon Sep 17 00:00:00 2001
From: roomrys
Date: Sat, 31 Aug 2024 16:51:44 +0000
Subject: [PATCH] deploy: b9bb300a23b3ad7a09d9f32eed74a9aaf9c9bc5b
---
develop/.buildinfo | 2 +-
develop/CODE_OF_CONDUCT.html | 2 +-
develop/CONTRIBUTING.html | 2 +-
develop/_sources/api.rst | 76 +-
develop/_sources/guides/cli.md | 25 +-
develop/_sources/guides/gui.md | 2 +-
develop/_sources/installation.md | 6 +-
.../_sources/notebooks/Data_structures.ipynb | 2 +-
.../Interactive_and_realtime_inference.ipynb | 2 +-
.../Interactive_and_resumable_training.ipynb | 2 +-
.../_sources/notebooks/Model_evaluation.ipynb | 2 +-
.../notebooks/Post_inference_tracking.ipynb | 2 +-
..._and_inference_on_an_example_dataset.ipynb | 2570 ++++++++---------
...ing_and_inference_using_Google_Drive.ipynb | 726 ++---
develop/_static/documentation_options.js | 2 +-
develop/api.html | 230 +-
develop/api/sleap.info.align.html | 18 +-
.../api/sleap.info.feature_suggestions.html | 50 +-
develop/api/sleap.info.labels.html | 2 +-
develop/api/sleap.info.metrics.html | 22 +-
develop/api/sleap.info.summary.html | 14 +-
develop/api/sleap.info.trackcleaner.html | 4 +-
develop/api/sleap.info.write_tracking_h5.html | 18 +-
develop/api/sleap.instance.html | 154 +-
develop/api/sleap.io.asyncvideo.html | 14 +-
develop/api/sleap.io.convert.html | 4 +-
develop/api/sleap.io.dataset.html | 185 +-
develop/api/sleap.io.format.adaptor.html | 20 +-
develop/api/sleap.io.format.alphatracker.html | 26 +-
develop/api/sleap.io.format.coco.html | 14 +-
develop/api/sleap.io.format.csv.html | 14 +-
develop/api/sleap.io.format.deeplabcut.html | 28 +-
develop/api/sleap.io.format.deepposekit.html | 14 +-
develop/api/sleap.io.format.dispatch.html | 2 +-
develop/api/sleap.io.format.filehandle.html | 8 +-
develop/api/sleap.io.format.genericjson.html | 16 +-
develop/api/sleap.io.format.hdf5.html | 16 +-
develop/api/sleap.io.format.labels_json.html | 18 +-
develop/api/sleap.io.format.leap_matlab.html | 14 +-
develop/api/sleap.io.format.main.html | 6 +-
develop/api/sleap.io.format.ndx_pose.html | 16 +-
develop/api/sleap.io.format.nix.html | 16 +-
.../api/sleap.io.format.sleap_analysis.html | 16 +-
develop/api/sleap.io.format.text.html | 16 +-
develop/api/sleap.io.legacy.html | 6 +-
develop/api/sleap.io.pathutils.html | 8 +-
develop/api/sleap.io.video.html | 90 +-
develop/api/sleap.io.videowriter.html | 10 +-
develop/api/sleap.io.visuals.html | 18 +-
develop/api/sleap.message.html | 18 +-
.../api/sleap.nn.architectures.common.html | 4 +-
...leap.nn.architectures.encoder_decoder.html | 28 +-
.../api/sleap.nn.architectures.hourglass.html | 20 +-
develop/api/sleap.nn.architectures.hrnet.html | 12 +-
develop/api/sleap.nn.architectures.leap.html | 6 +-
....nn.architectures.pretrained_encoders.html | 8 +-
.../api/sleap.nn.architectures.resnet.html | 26 +-
develop/api/sleap.nn.architectures.unet.html | 10 +-
.../sleap.nn.architectures.upsampling.html | 8 +-
develop/api/sleap.nn.callbacks.html | 38 +-
develop/api/sleap.nn.config.data.html | 10 +-
develop/api/sleap.nn.config.model.html | 40 +-
develop/api/sleap.nn.config.optimization.html | 12 +-
develop/api/sleap.nn.config.outputs.html | 18 +-
develop/api/sleap.nn.config.training_job.html | 16 +-
develop/api/sleap.nn.config.utils.html | 4 +-
develop/api/sleap.nn.data.augmentation.html | 56 +-
.../api/sleap.nn.data.confidence_maps.html | 20 +-
develop/api/sleap.nn.data.dataset_ops.html | 30 +-
develop/api/sleap.nn.data.edge_maps.html | 16 +-
develop/api/sleap.nn.data.general.html | 18 +-
develop/api/sleap.nn.data.grouping.html | 6 +-
develop/api/sleap.nn.data.identity.html | 14 +-
develop/api/sleap.nn.data.inference.html | 14 +-
.../api/sleap.nn.data.instance_centroids.html | 12 +-
.../api/sleap.nn.data.instance_cropping.html | 18 +-
develop/api/sleap.nn.data.normalization.html | 28 +-
.../api/sleap.nn.data.offset_regression.html | 6 +-
develop/api/sleap.nn.data.pipelines.html | 68 +-
develop/api/sleap.nn.data.providers.html | 20 +-
develop/api/sleap.nn.data.resizing.html | 22 +-
develop/api/sleap.nn.data.training.html | 12 +-
develop/api/sleap.nn.data.utils.html | 16 +-
develop/api/sleap.nn.evals.html | 30 +-
develop/api/sleap.nn.heads.html | 40 +-
develop/api/sleap.nn.identity.html | 8 +-
develop/api/sleap.nn.inference.html | 160 +-
develop/api/sleap.nn.losses.html | 16 +-
develop/api/sleap.nn.model.html | 8 +-
develop/api/sleap.nn.paf_grouping.html | 46 +-
develop/api/sleap.nn.peak_finding.html | 20 +-
develop/api/sleap.nn.system.html | 32 +-
develop/api/sleap.nn.tracker.components.html | 57 +-
develop/api/sleap.nn.tracker.kalman.html | 8 +-
develop/api/sleap.nn.tracking.html | 50 +-
develop/api/sleap.nn.training.html | 62 +-
develop/api/sleap.nn.utils.html | 10 +-
develop/api/sleap.nn.viz.html | 18 +-
develop/api/sleap.skeleton.html | 84 +-
develop/api/sleap.util.html | 32 +-
develop/datasets.html | 2 +-
develop/genindex.html | 66 +-
develop/guides/choosing-models.html | 2 +-
develop/guides/cli.html | 27 +-
develop/guides/colab.html | 2 +-
develop/guides/custom-training.html | 2 +-
develop/guides/gui.html | 4 +-
develop/guides/index.html | 2 +-
develop/guides/merging.html | 2 +-
develop/guides/proofreading.html | 2 +-
develop/guides/remote.html | 2 +-
develop/guides/skeletons.html | 2 +-
develop/guides/training.html | 2 +-
develop/guides/troubleshooting-workflows.html | 2 +-
develop/help.html | 2 +-
develop/index.html | 8 +-
develop/installation.html | 84 +-
develop/notebooks/Analysis_examples.html | 10 +-
develop/notebooks/Data_structures.html | 18 +-
.../Interactive_and_realtime_inference.html | 16 +-
.../Interactive_and_resumable_training.html | 10 +-
develop/notebooks/Model_evaluation.html | 16 +-
.../notebooks/Post_inference_tracking.html | 10 +-
...g_and_inference_on_an_example_dataset.html | 36 +-
...ning_and_inference_using_Google_Drive.html | 12 +-
.../notebooks/analysis_example/README.html | 2 +-
develop/notebooks/index.html | 2 +-
develop/objects.inv | Bin 20588 -> 20518 bytes
develop/overview.html | 2 +-
develop/py-modindex.html | 2 +-
develop/search.html | 2 +-
develop/searchindex.js | 2 +-
develop/tutorials/analysis.html | 2 +-
develop/tutorials/assisted-labeling.html | 2 +-
develop/tutorials/initial-labeling.html | 2 +-
develop/tutorials/initial-training.html | 2 +-
develop/tutorials/new-project.html | 2 +-
develop/tutorials/proofreading.html | 2 +-
develop/tutorials/tutorial.html | 2 +-
139 files changed, 3036 insertions(+), 3196 deletions(-)
diff --git a/develop/.buildinfo b/develop/.buildinfo
index ff92ce9d0..5fcb3d65d 100644
--- a/develop/.buildinfo
+++ b/develop/.buildinfo
@@ -1,4 +1,4 @@
# Sphinx build info version 1
# This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done.
-config: a7b7f9800faa1bcc2746e1b3ba7ad6cf
+config: ea14d6c449de46604b07382202b24124
tags: 645f666f9bcd5a90fca523b33c5a78b7
diff --git a/develop/CODE_OF_CONDUCT.html b/develop/CODE_OF_CONDUCT.html
index 1c50c1822..afa95fae3 100644
--- a/develop/CODE_OF_CONDUCT.html
+++ b/develop/CODE_OF_CONDUCT.html
@@ -9,7 +9,7 @@
- Contributor Covenant Code of Conduct — SLEAP (v1.4.1a2)
+ Contributor Covenant Code of Conduct — SLEAP (v1.3.4)
diff --git a/develop/CONTRIBUTING.html b/develop/CONTRIBUTING.html
index f247fa178..d21c88946 100644
--- a/develop/CONTRIBUTING.html
+++ b/develop/CONTRIBUTING.html
@@ -9,7 +9,7 @@
- Contributing to SLEAP — SLEAP (v1.4.1a2)
+ Contributing to SLEAP — SLEAP (v1.3.4)
diff --git a/develop/_sources/api.rst b/develop/_sources/api.rst
index e6af22307..dfb670913 100644
--- a/develop/_sources/api.rst
+++ b/develop/_sources/api.rst
@@ -19,38 +19,6 @@ Developer API
sleap.message
sleap.skeleton
sleap.util
- sleap.info.align
- sleap.info.feature_suggestions
- sleap.info.labels
- sleap.info.metrics
- sleap.info.summary
- sleap.info.trackcleaner
- sleap.info.write_tracking_h5
- sleap.io.asyncvideo
- sleap.io.convert
- sleap.io.dataset
- sleap.io.legacy
- sleap.io.pathutils
- sleap.io.video
- sleap.io.videowriter
- sleap.io.visuals
- sleap.io.format.adaptor
- sleap.io.format.alphatracker
- sleap.io.format.coco
- sleap.io.format.csv
- sleap.io.format.deeplabcut
- sleap.io.format.deepposekit
- sleap.io.format.dispatch
- sleap.io.format.filehandle
- sleap.io.format.genericjson
- sleap.io.format.hdf5
- sleap.io.format.labels_json
- sleap.io.format.leap_matlab
- sleap.io.format.main
- sleap.io.format.ndx_pose
- sleap.io.format.nix
- sleap.io.format.sleap_analysis
- sleap.io.format.text
sleap.nn.callbacks
sleap.nn.evals
sleap.nn.heads
@@ -65,12 +33,6 @@ Developer API
sleap.nn.training
sleap.nn.utils
sleap.nn.viz
- sleap.nn.config.data
- sleap.nn.config.model
- sleap.nn.config.optimization
- sleap.nn.config.outputs
- sleap.nn.config.training_job
- sleap.nn.config.utils
sleap.nn.architectures.common
sleap.nn.architectures.encoder_decoder
sleap.nn.architectures.hourglass
@@ -99,3 +61,41 @@ Developer API
sleap.nn.data.resizing
sleap.nn.data.training
sleap.nn.data.utils
+ sleap.nn.config.data
+ sleap.nn.config.model
+ sleap.nn.config.optimization
+ sleap.nn.config.outputs
+ sleap.nn.config.training_job
+ sleap.nn.config.utils
+ sleap.info.align
+ sleap.info.feature_suggestions
+ sleap.info.labels
+ sleap.info.metrics
+ sleap.info.summary
+ sleap.info.trackcleaner
+ sleap.info.write_tracking_h5
+ sleap.io.asyncvideo
+ sleap.io.convert
+ sleap.io.dataset
+ sleap.io.legacy
+ sleap.io.pathutils
+ sleap.io.video
+ sleap.io.videowriter
+ sleap.io.visuals
+ sleap.io.format.adaptor
+ sleap.io.format.alphatracker
+ sleap.io.format.coco
+ sleap.io.format.csv
+ sleap.io.format.deeplabcut
+ sleap.io.format.deepposekit
+ sleap.io.format.dispatch
+ sleap.io.format.filehandle
+ sleap.io.format.genericjson
+ sleap.io.format.hdf5
+ sleap.io.format.labels_json
+ sleap.io.format.leap_matlab
+ sleap.io.format.main
+ sleap.io.format.ndx_pose
+ sleap.io.format.nix
+ sleap.io.format.sleap_analysis
+ sleap.io.format.text
diff --git a/develop/_sources/guides/cli.md b/develop/_sources/guides/cli.md
index 03b806903..35ea52171 100644
--- a/develop/_sources/guides/cli.md
+++ b/develop/_sources/guides/cli.md
@@ -36,8 +36,8 @@ optional arguments:
```none
usage: sleap-train [-h] [--video-paths VIDEO_PATHS] [--val_labels VAL_LABELS]
- [--test_labels TEST_LABELS] [--tensorboard] [--save_viz]
- [--keep_viz] [--zmq] [--run_name RUN_NAME] [--prefix PREFIX]
+ [--test_labels TEST_LABELS] [--tensorboard] [--save_viz]
+ [--zmq] [--run_name RUN_NAME] [--prefix PREFIX]
[--suffix SUFFIX]
training_job_path [labels_path]
@@ -68,8 +68,6 @@ optional arguments:
--save_viz Enable saving of prediction visualizations to the run
folder if not already specified in the training job
config.
- --keep_viz Keep prediction visualization images in the run
- folder after training if --save_viz is enabled.
--zmq Enable ZMQ logging (for GUI) if not already specified
in the training job config.
--run_name RUN_NAME Run name to use when saving file, overrides other run
@@ -101,9 +99,9 @@ optional arguments:
-e [EXPORT_PATH], --export_path [EXPORT_PATH]
Path to output directory where the frozen model will be exported to.
Defaults to a folder named 'exported_model'.
- -r, --ragged RAGGED
- Keep tensors ragged if present. If ommited, convert
- ragged tensors into regular tensors with NaN padding.
+ -u, --unrag UNRAG
+ Convert ragged tensors into regular tensors with NaN padding.
+ Defaults to True.
-n, --max_instances MAX_INSTANCES
Limit maximum number of instances in multi-instance models.
Not available for ID models. Defaults to None.
@@ -138,10 +136,7 @@ usage: sleap-track [-h] [-m MODELS] [--frames FRAMES] [--only-labeled-frames] [-
[data_path]
positional arguments:
- data_path Path to data to predict on. This can be one of the following: A .slp file containing labeled data; A folder containing multiple
- video files in supported formats; An individual video file in a supported format; A CSV file with a column of video file paths.
- If more than one column is provided in the CSV file, the first will be used for the input data paths and the next column will be
- used as the output paths; A text file with a path to a video file on each line
+ data_path Path to data to predict on. This can be a labels (.slp) file or any supported video format.
optional arguments:
-h, --help show this help message and exit
@@ -156,7 +151,7 @@ optional arguments:
Only run inference on unlabeled suggested frames when running on labels dataset. This is useful for generating predictions for
initialization during labeling.
-o OUTPUT, --output OUTPUT
- The output filename or directory path to use for the predicted data. If not provided, defaults to '[data_path].predictions.slp'.
+ The output filename to use for the predicted data. If not provided, defaults to '[data_path].predictions.slp'.
--no-empty-frames Clear any empty frames that did not have any detected instances before saving to output.
--verbosity {none,rich,json}
Verbosity of inference progress reporting. 'none' does not output anything during inference, 'rich' displays an updating
@@ -327,8 +322,7 @@ optional arguments:
analysis file for the latter video is given a default name.
--format FORMAT Output format. Default ('slp') is SLEAP dataset;
'analysis' results in analysis.h5 file; 'analysis.nix' results
- in an analysis nix file; 'analysis.csv' results
- in an analysis csv file; 'h5' or 'json' results in SLEAP dataset
+ in an analysis nix file; 'h5' or 'json' results in SLEAP dataset
with specified file format.
--video VIDEO Path to video (if needed for conversion).
```
@@ -395,9 +389,6 @@ optional arguments:
--distinctly_color DISTINCTLY_COLOR
Specify how to color instances. Options include: "instances",
"edges", and "nodes" (default: "instances")
- --background BACKGROUND
- Specify the type of background to be used to save the videos.
- Options: original, black, white and grey. (default: "original")
```
## Debugging
diff --git a/develop/_sources/guides/gui.md b/develop/_sources/guides/gui.md
index 813ed68fa..88cf3f656 100644
--- a/develop/_sources/guides/gui.md
+++ b/develop/_sources/guides/gui.md
@@ -60,7 +60,7 @@ Note that many of the menu command have keyboard shortcuts which can be configur
"**Edge Style**" controls whether edges are drawn as thin lines or as wedges which indicate the {ref}`orientation` of the instance (as well as the direction of the part affinity field which would be used to predict the connection between nodes when using a "bottom-up" approach).
-"**Trail Length**" allows you to show a trail of where each instance was located in prior frames (the length of the trail is the number of prior frames). This can be useful when proofreading predictions since it can help you detect swaps in the identities of animals across frames. By default, you can only select trail lengths of up to 250 frames. You can use a custom trail length by modifying the default length in the `preferences.yaml` file. However, using trail lengths longer than about 500 frames can result in significant lag.
+"**Trail Length**" allows you to show a trail of where each instance was located in prior frames (the length of the trail is the number of prior frames). This can be useful when proofreading predictions since it can help you detect swaps in the identities of animals across frames.
"**Fit Instances to View**" allows you to toggle whether the view is auto-zoomed to the instances in each frame. This can be useful when proofreading predictions.
diff --git a/develop/_sources/installation.md b/develop/_sources/installation.md
index c0ab66580..ee2e7eec0 100644
--- a/develop/_sources/installation.md
+++ b/develop/_sources/installation.md
@@ -137,13 +137,13 @@ SLEAP can be installed three different ways: via {ref}`conda package=1.3.3\"\n",
+ "!pip install -qqq \"sleap[pypi]>=1.3.4\"\n",
"\n",
"# But to do it locally, we'd recommend the conda package (available on Windows + Linux):\n",
"# conda create -n sleap -c sleap -c conda-forge -c nvidia sleap"
diff --git a/develop/_sources/notebooks/Interactive_and_realtime_inference.ipynb b/develop/_sources/notebooks/Interactive_and_realtime_inference.ipynb
index 4a3b612a2..94a20ea3b 100644
--- a/develop/_sources/notebooks/Interactive_and_realtime_inference.ipynb
+++ b/develop/_sources/notebooks/Interactive_and_realtime_inference.ipynb
@@ -60,7 +60,7 @@
"source": [
"# This should take care of all the dependencies on colab:\n",
"!pip uninstall -qqq -y opencv-python opencv-contrib-python\n",
- "!pip install -qqq \"sleap[pypi]>=1.3.3\"\n",
+ "!pip install -qqq \"sleap[pypi]>=1.3.4\"\n",
"\n",
"\n",
"# But to do it locally, we'd recommend the conda package (available on Windows + Linux):\n",
diff --git a/develop/_sources/notebooks/Interactive_and_resumable_training.ipynb b/develop/_sources/notebooks/Interactive_and_resumable_training.ipynb
index f30f036f3..68b4f0715 100644
--- a/develop/_sources/notebooks/Interactive_and_resumable_training.ipynb
+++ b/develop/_sources/notebooks/Interactive_and_resumable_training.ipynb
@@ -62,7 +62,7 @@
"source": [
"# This should take care of all the dependencies on colab:\n",
"!pip uninstall -qqq -y opencv-python opencv-contrib-python\n",
- "!pip install -qqq \"sleap[pypi]>=1.3.3\"\n",
+ "!pip install -qqq \"sleap[pypi]>=1.3.4\"\n",
"\n",
"\n",
"# But to do it locally, we'd recommend the conda package (available on Windows + Linux):\n",
diff --git a/develop/_sources/notebooks/Model_evaluation.ipynb b/develop/_sources/notebooks/Model_evaluation.ipynb
index 41ca6568c..af2b55d51 100644
--- a/develop/_sources/notebooks/Model_evaluation.ipynb
+++ b/develop/_sources/notebooks/Model_evaluation.ipynb
@@ -40,7 +40,7 @@
],
"source": [
"!pip uninstall -qqq -y opencv-python opencv-contrib-python\n",
- "!pip install -qqq \"sleap[pypi]>=1.3.3\"\n",
+ "!pip install -qqq \"sleap[pypi]>=1.3.4\"\n",
"!apt -qq install tree\n",
"!wget -q https://storage.googleapis.com/sleap-data/reference/flies13/td_fast.210505_012601.centered_instance.n%3D1800.zip\n",
"!unzip -qq -o -d \"td_fast.210505_012601.centered_instance.n=1800\" \"td_fast.210505_012601.centered_instance.n=1800.zip\""
diff --git a/develop/_sources/notebooks/Post_inference_tracking.ipynb b/develop/_sources/notebooks/Post_inference_tracking.ipynb
index 239176bdb..e91ed002d 100644
--- a/develop/_sources/notebooks/Post_inference_tracking.ipynb
+++ b/develop/_sources/notebooks/Post_inference_tracking.ipynb
@@ -61,7 +61,7 @@
"source": [
"# This should take care of all the dependencies on colab:\n",
"!pip uninstall -qqq -y opencv-python opencv-contrib-python\n",
- "!pip install -qqq \"sleap[pypi]>=1.3.3\"\n",
+ "!pip install -qqq \"sleap[pypi]>=1.3.4\"\n",
"\n",
"# But to do it locally, we'd recommend the conda package (available on Windows + Linux):\n",
"# conda create -n sleap -c sleap -c conda-forge -c nvidia sleap"
diff --git a/develop/_sources/notebooks/Training_and_inference_on_an_example_dataset.ipynb b/develop/_sources/notebooks/Training_and_inference_on_an_example_dataset.ipynb
index 4e26cb286..df101eb19 100644
--- a/develop/_sources/notebooks/Training_and_inference_on_an_example_dataset.ipynb
+++ b/develop/_sources/notebooks/Training_and_inference_on_an_example_dataset.ipynb
@@ -1,1302 +1,1302 @@
{
- "cells": [
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "view-in-github"
- },
- "source": [
- ""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {},
- "source": [
- "# Training and inference on an example dataset"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "LlV70jDuWzea"
- },
- "source": [
- "In this notebook we'll install SLEAP, download a sample dataset, run training and inference on that dataset using the SLEAP command-line interface, and then download the predictions."
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "colab_type": "text",
- "id": "yX9noEb8m8re"
- },
- "source": [
- "## Install SLEAP\n",
- "Note: Before installing SLEAP check [SLEAP releases](https://github.com/talmolab/sleap/releases) page for the latest version."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 36,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/",
- "height": 1000
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "view-in-github"
+ },
+ "source": [
+ ""
+ ]
},
- "id": "DUfnkxMtLcK3",
- "outputId": "a6340ef1-eaac-42ef-f8d4-bcc499feb57b"
- },
- "outputs": [
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[31mERROR: Cannot uninstall opencv-python 4.6.0, RECORD file not found. Hint: The package was installed by conda.\u001b[0m\u001b[31m\n",
- "\u001b[0m\u001b[31mERROR: Cannot uninstall shiboken2 5.15.6, RECORD file not found. You might be able to recover from this via: 'pip install --force-reinstall --no-deps shiboken2==5.15.6'.\u001b[0m\u001b[31m\n",
- "\u001b[0m"
- ]
- }
- ],
- "source": [
- "!pip uninstall -qqq -y opencv-python opencv-contrib-python\n",
- "!pip install -qqq \"sleap[pypi]>=1.3.3\""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "iq7jrgUksLtR"
- },
- "source": [
- "## Download sample training data into Colab\n",
- "Let's download a sample dataset from the SLEAP [sample datasets repository](https://github.com/talmolab/sleap-datasets) into Colab."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 24,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/"
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Training and inference on an example dataset"
+ ]
},
- "id": "fm3cU1Bc0tWc",
- "outputId": "c0ac5677-e3c5-477c-a2f7-44d619208b22"
- },
- "outputs": [
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "E: Could not open lock file /var/lib/dpkg/lock-frontend - open (13: Permission denied)\n",
- "E: Unable to acquire the dpkg frontend lock (/var/lib/dpkg/lock-frontend), are you root?\n",
- "--2023-09-01 13:30:33-- https://github.com/talmolab/sleap-datasets/releases/download/dm-courtship-v1/drosophila-melanogaster-courtship.zip\n",
- "Resolving github.com (github.com)... 192.30.255.113\n",
- "Connecting to github.com (github.com)|192.30.255.113|:443... connected.\n",
- "HTTP request sent, awaiting response... 302 Found\n",
- "Location: https://objects.githubusercontent.com/github-production-release-asset-2e65be/263375180/16df8d00-94f1-11ea-98d1-6c03a2f89e1c?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20230901%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230901T203033Z&X-Amz-Expires=300&X-Amz-Signature=b9b0638744af3144affdc46668c749128bd6c4f23ca2a1313821c7bbcd54ccdd&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=263375180&response-content-disposition=attachment%3B%20filename%3Ddrosophila-melanogaster-courtship.zip&response-content-type=application%2Foctet-stream [following]\n",
- "--2023-09-01 13:30:33-- https://objects.githubusercontent.com/github-production-release-asset-2e65be/263375180/16df8d00-94f1-11ea-98d1-6c03a2f89e1c?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAIWNJYAX4CSVEH53A%2F20230901%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20230901T203033Z&X-Amz-Expires=300&X-Amz-Signature=b9b0638744af3144affdc46668c749128bd6c4f23ca2a1313821c7bbcd54ccdd&X-Amz-SignedHeaders=host&actor_id=0&key_id=0&repo_id=263375180&response-content-disposition=attachment%3B%20filename%3Ddrosophila-melanogaster-courtship.zip&response-content-type=application%2Foctet-stream\n",
- "Resolving objects.githubusercontent.com (objects.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
- "Connecting to objects.githubusercontent.com (objects.githubusercontent.com)|185.199.108.133|:443... connected.\n",
- "HTTP request sent, awaiting response... 200 OK\n",
- "Length: 111973079 (107M) [application/octet-stream]\n",
- "Saving to: ‘dataset.zip’\n",
- "\n",
- "dataset.zip 100%[===================>] 106.79M 63.0MB/s in 1.7s \n",
- "\n",
- "2023-09-01 13:30:35 (63.0 MB/s) - ‘dataset.zip’ saved [111973079/111973079]\n",
- "\n",
- "Archive: dataset.zip\n",
- " creating: dataset/drosophila-melanogaster-courtship/\n",
- " inflating: dataset/drosophila-melanogaster-courtship/.DS_Store \n",
- " creating: dataset/__MACOSX/\n",
- " creating: dataset/__MACOSX/drosophila-melanogaster-courtship/\n",
- " inflating: dataset/__MACOSX/drosophila-melanogaster-courtship/._.DS_Store \n",
- " inflating: dataset/drosophila-melanogaster-courtship/20190128_113421.mp4 \n",
- " inflating: dataset/__MACOSX/drosophila-melanogaster-courtship/._20190128_113421.mp4 \n",
- " inflating: dataset/drosophila-melanogaster-courtship/courtship_labels.slp \n",
- " inflating: dataset/__MACOSX/drosophila-melanogaster-courtship/._courtship_labels.slp \n",
- " inflating: dataset/drosophila-melanogaster-courtship/example.jpg \n",
- " inflating: dataset/__MACOSX/drosophila-melanogaster-courtship/._example.jpg \n",
- "\u001b[01;34mdataset\u001b[00m\n",
- "├── \u001b[01;34mdrosophila-melanogaster-courtship\u001b[00m\n",
- "│ ├── \u001b[01;32m20190128_113421.mp4\u001b[00m\n",
- "│ ├── \u001b[01;32mcourtship_labels.slp\u001b[00m\n",
- "│ └── \u001b[01;35mexample.jpg\u001b[00m\n",
- "└── \u001b[01;34m__MACOSX\u001b[00m\n",
- " └── \u001b[01;34mdrosophila-melanogaster-courtship\u001b[00m\n",
- "\n",
- "3 directories, 3 files\n"
- ]
- }
- ],
- "source": [
- "!apt-get install tree\n",
- "!wget -O dataset.zip https://github.com/talmolab/sleap-datasets/releases/download/dm-courtship-v1/drosophila-melanogaster-courtship.zip\n",
- "!mkdir dataset\n",
- "!unzip dataset.zip -d dataset\n",
- "!rm dataset.zip\n",
- "!tree dataset"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "xZ-sr67av5uu"
- },
- "source": [
- "## Train models\n",
- "For the top-down pipeline, we'll need train two models: a centroid model and a centered-instance model.\n",
- "\n",
- "Using the command-line interface, we'll first train a model for centroids using the default **training profile**. The training profile determines the model architecture, the learning rate, and other parameters.\n",
- "\n",
- "When you start training, you'll first see the training parameters and then the training and validation loss for each training epoch. \n",
- "\n",
- "As soon as you're satisfied with the validation loss you see for an epoch during training, you're welcome to stop training by clicking the stop button. The version of the model with the lowest validation loss is saved during training, and that's what will be used for inference.\n",
- "\n",
- "If you don't stop training, it will run for 200 epochs or until validation loss fails to improve for some number of epochs (controlled by the `early_stopping` fields in the training profile)."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 25,
- "metadata": {
- "id": "QKf6qzMqNBUi"
- },
- "outputs": [
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "LlV70jDuWzea"
+ },
+ "source": [
+ "In this notebook we'll install SLEAP, download a sample dataset, run training and inference on that dataset using the SLEAP command-line interface, and then download the predictions."
+ ]
+ },
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "INFO:sleap.nn.training:Versions:\n",
- "SLEAP: 1.3.2\n",
- "TensorFlow: 2.7.0\n",
- "Numpy: 1.21.5\n",
- "Python: 3.7.12\n",
- "OS: Linux-5.15.0-78-generic-x86_64-with-debian-bookworm-sid\n",
- "INFO:sleap.nn.training:Training labels file: dataset/drosophila-melanogaster-courtship/courtship_labels.slp\n",
- "INFO:sleap.nn.training:Training profile: /home/talmolab/sleap-estimates-animal-poses/pull-requests/sleap/sleap/training_profiles/baseline.centroid.json\n",
- "INFO:sleap.nn.training:\n",
- "INFO:sleap.nn.training:Arguments:\n",
- "INFO:sleap.nn.training:{\n",
- " \"training_job_path\": \"baseline.centroid.json\",\n",
- " \"labels_path\": \"dataset/drosophila-melanogaster-courtship/courtship_labels.slp\",\n",
- " \"video_paths\": [\n",
- " \"dataset/drosophila-melanogaster-courtship/20190128_113421.mp4\"\n",
- " ],\n",
- " \"val_labels\": null,\n",
- " \"test_labels\": null,\n",
- " \"base_checkpoint\": null,\n",
- " \"tensorboard\": false,\n",
- " \"save_viz\": false,\n",
- " \"zmq\": false,\n",
- " \"run_name\": \"courtship.centroid\",\n",
- " \"prefix\": \"\",\n",
- " \"suffix\": \"\",\n",
- " \"cpu\": false,\n",
- " \"first_gpu\": false,\n",
- " \"last_gpu\": false,\n",
- " \"gpu\": \"auto\"\n",
- "}\n",
- "INFO:sleap.nn.training:\n",
- "INFO:sleap.nn.training:Training job:\n",
- "INFO:sleap.nn.training:{\n",
- " \"data\": {\n",
- " \"labels\": {\n",
- " \"training_labels\": null,\n",
- " \"validation_labels\": null,\n",
- " \"validation_fraction\": 0.1,\n",
- " \"test_labels\": null,\n",
- " \"split_by_inds\": false,\n",
- " \"training_inds\": null,\n",
- " \"validation_inds\": null,\n",
- " \"test_inds\": null,\n",
- " \"search_path_hints\": [],\n",
- " \"skeletons\": []\n",
- " },\n",
- " \"preprocessing\": {\n",
- " \"ensure_rgb\": false,\n",
- " \"ensure_grayscale\": false,\n",
- " \"imagenet_mode\": null,\n",
- " \"input_scaling\": 0.5,\n",
- " \"pad_to_stride\": null,\n",
- " \"resize_and_pad_to_target\": true,\n",
- " \"target_height\": null,\n",
- " \"target_width\": null\n",
- " },\n",
- " \"instance_cropping\": {\n",
- " \"center_on_part\": null,\n",
- " \"crop_size\": null,\n",
- " \"crop_size_detection_padding\": 16\n",
- " }\n",
- " },\n",
- " \"model\": {\n",
- " \"backbone\": {\n",
- " \"leap\": null,\n",
- " \"unet\": {\n",
- " \"stem_stride\": null,\n",
- " \"max_stride\": 16,\n",
- " \"output_stride\": 2,\n",
- " \"filters\": 16,\n",
- " \"filters_rate\": 2.0,\n",
- " \"middle_block\": true,\n",
- " \"up_interpolate\": true,\n",
- " \"stacks\": 1\n",
- " },\n",
- " \"hourglass\": null,\n",
- " \"resnet\": null,\n",
- " \"pretrained_encoder\": null\n",
- " },\n",
- " \"heads\": {\n",
- " \"single_instance\": null,\n",
- " \"centroid\": {\n",
- " \"anchor_part\": null,\n",
- " \"sigma\": 2.5,\n",
- " \"output_stride\": 2,\n",
- " \"loss_weight\": 1.0,\n",
- " \"offset_refinement\": false\n",
- " },\n",
- " \"centered_instance\": null,\n",
- " \"multi_instance\": null,\n",
- " \"multi_class_bottomup\": null,\n",
- " \"multi_class_topdown\": null\n",
- " },\n",
- " \"base_checkpoint\": null\n",
- " },\n",
- " \"optimization\": {\n",
- " \"preload_data\": true,\n",
- " \"augmentation_config\": {\n",
- " \"rotate\": true,\n",
- " \"rotation_min_angle\": -15.0,\n",
- " \"rotation_max_angle\": 15.0,\n",
- " \"translate\": false,\n",
- " \"translate_min\": -5,\n",
- " \"translate_max\": 5,\n",
- " \"scale\": false,\n",
- " \"scale_min\": 0.9,\n",
- " \"scale_max\": 1.1,\n",
- " \"uniform_noise\": false,\n",
- " \"uniform_noise_min_val\": 0.0,\n",
- " \"uniform_noise_max_val\": 10.0,\n",
- " \"gaussian_noise\": false,\n",
- " \"gaussian_noise_mean\": 5.0,\n",
- " \"gaussian_noise_stddev\": 1.0,\n",
- " \"contrast\": false,\n",
- " \"contrast_min_gamma\": 0.5,\n",
- " \"contrast_max_gamma\": 2.0,\n",
- " \"brightness\": false,\n",
- " \"brightness_min_val\": 0.0,\n",
- " \"brightness_max_val\": 10.0,\n",
- " \"random_crop\": false,\n",
- " \"random_crop_height\": 256,\n",
- " \"random_crop_width\": 256,\n",
- " \"random_flip\": false,\n",
- " \"flip_horizontal\": true\n",
- " },\n",
- " \"online_shuffling\": true,\n",
- " \"shuffle_buffer_size\": 128,\n",
- " \"prefetch\": true,\n",
- " \"batch_size\": 4,\n",
- " \"batches_per_epoch\": null,\n",
- " \"min_batches_per_epoch\": 200,\n",
- " \"val_batches_per_epoch\": null,\n",
- " \"min_val_batches_per_epoch\": 10,\n",
- " \"epochs\": 200,\n",
- " \"optimizer\": \"adam\",\n",
- " \"initial_learning_rate\": 0.0001,\n",
- " \"learning_rate_schedule\": {\n",
- " \"reduce_on_plateau\": true,\n",
- " \"reduction_factor\": 0.5,\n",
- " \"plateau_min_delta\": 1e-08,\n",
- " \"plateau_patience\": 5,\n",
- " \"plateau_cooldown\": 3,\n",
- " \"min_learning_rate\": 1e-08\n",
- " },\n",
- " \"hard_keypoint_mining\": {\n",
- " \"online_mining\": false,\n",
- " \"hard_to_easy_ratio\": 2.0,\n",
- " \"min_hard_keypoints\": 2,\n",
- " \"max_hard_keypoints\": null,\n",
- " \"loss_scale\": 5.0\n",
- " },\n",
- " \"early_stopping\": {\n",
- " \"stop_training_on_plateau\": true,\n",
- " \"plateau_min_delta\": 1e-08,\n",
- " \"plateau_patience\": 20\n",
- " }\n",
- " },\n",
- " \"outputs\": {\n",
- " \"save_outputs\": true,\n",
- " \"run_name\": \"courtship.centroid\",\n",
- " \"run_name_prefix\": \"\",\n",
- " \"run_name_suffix\": null,\n",
- " \"runs_folder\": \"models\",\n",
- " \"tags\": [],\n",
- " \"save_visualizations\": true,\n",
- " \"keep_viz_images\": true,\n",
- " \"zip_outputs\": false,\n",
- " \"log_to_csv\": true,\n",
- " \"checkpointing\": {\n",
- " \"initial_model\": false,\n",
- " \"best_model\": true,\n",
- " \"every_epoch\": false,\n",
- " \"latest_model\": false,\n",
- " \"final_model\": false\n",
- " },\n",
- " \"tensorboard\": {\n",
- " \"write_logs\": false,\n",
- " \"loss_frequency\": \"epoch\",\n",
- " \"architecture_graph\": false,\n",
- " \"profile_graph\": false,\n",
- " \"visualizations\": true\n",
- " },\n",
- " \"zmq\": {\n",
- " \"subscribe_to_controller\": false,\n",
- " \"controller_address\": \"tcp://127.0.0.1:9000\",\n",
- " \"controller_polling_timeout\": 10,\n",
- " \"publish_updates\": false,\n",
- " \"publish_address\": \"tcp://127.0.0.1:9001\"\n",
- " }\n",
- " },\n",
- " \"name\": \"\",\n",
- " \"description\": \"\",\n",
- " \"sleap_version\": \"1.3.2\",\n",
- " \"filename\": \"/home/talmolab/sleap-estimates-animal-poses/pull-requests/sleap/sleap/training_profiles/baseline.centroid.json\"\n",
- "}\n",
- "INFO:sleap.nn.training:\n",
- "2023-09-01 13:30:38.827290: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:30:38.831845: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:30:38.832633: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "INFO:sleap.nn.training:Auto-selected GPU 0 with 22980 MiB of free memory.\n",
- "INFO:sleap.nn.training:Using GPU 0 for acceleration.\n",
- "INFO:sleap.nn.training:Disabled GPU memory pre-allocation.\n",
- "INFO:sleap.nn.training:System:\n",
- "GPUs: 1/1 available\n",
- " Device: /physical_device:GPU:0\n",
- " Available: True\n",
- " Initalized: False\n",
- " Memory growth: True\n",
- "INFO:sleap.nn.training:\n",
- "INFO:sleap.nn.training:Initializing trainer...\n",
- "INFO:sleap.nn.training:Loading training labels from: dataset/drosophila-melanogaster-courtship/courtship_labels.slp\n",
- "INFO:sleap.nn.training:Creating training and validation splits from validation fraction: 0.1\n",
- "INFO:sleap.nn.training: Splits: Training = 134 / Validation = 15.\n",
- "INFO:sleap.nn.training:Setting up for training...\n",
- "INFO:sleap.nn.training:Setting up pipeline builders...\n",
- "INFO:sleap.nn.training:Setting up model...\n",
- "INFO:sleap.nn.training:Building test pipeline...\n",
- "2023-09-01 13:30:39.755154: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\n",
- "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
- "2023-09-01 13:30:39.756024: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:30:39.757213: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:30:39.758315: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:30:40.089801: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:30:40.090652: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:30:40.091464: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:30:40.092164: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 21084 MB memory: -> device: 0, name: NVIDIA RTX A5000, pci bus id: 0000:01:00.0, compute capability: 8.6\n",
- "INFO:sleap.nn.training:Loaded test example. [1.326s]\n",
- "INFO:sleap.nn.training: Input shape: (512, 512, 3)\n",
- "INFO:sleap.nn.training:Created Keras model.\n",
- "INFO:sleap.nn.training: Backbone: UNet(stacks=1, filters=16, filters_rate=2.0, kernel_size=3, stem_kernel_size=7, convs_per_block=2, stem_blocks=0, down_blocks=4, middle_block=True, up_blocks=3, up_interpolate=True, block_contraction=False)\n",
- "INFO:sleap.nn.training: Max stride: 16\n",
- "INFO:sleap.nn.training: Parameters: 1,953,393\n",
- "INFO:sleap.nn.training: Heads: \n",
- "INFO:sleap.nn.training: [0] = CentroidConfmapsHead(anchor_part=None, sigma=2.5, output_stride=2, loss_weight=1.0)\n",
- "INFO:sleap.nn.training: Outputs: \n",
- "INFO:sleap.nn.training: [0] = KerasTensor(type_spec=TensorSpec(shape=(None, 256, 256, 1), dtype=tf.float32, name=None), name='CentroidConfmapsHead/BiasAdd:0', description=\"created by layer 'CentroidConfmapsHead'\")\n",
- "INFO:sleap.nn.training:Training from scratch\n",
- "INFO:sleap.nn.training:Setting up data pipelines...\n",
- "INFO:sleap.nn.training:Training set: n = 134\n",
- "INFO:sleap.nn.training:Validation set: n = 15\n",
- "INFO:sleap.nn.training:Setting up optimization...\n",
- "INFO:sleap.nn.training: Learning rate schedule: LearningRateScheduleConfig(reduce_on_plateau=True, reduction_factor=0.5, plateau_min_delta=1e-08, plateau_patience=5, plateau_cooldown=3, min_learning_rate=1e-08)\n",
- "INFO:sleap.nn.training: Early stopping: EarlyStoppingConfig(stop_training_on_plateau=True, plateau_min_delta=1e-08, plateau_patience=20)\n",
- "INFO:sleap.nn.training:Setting up outputs...\n",
- "INFO:sleap.nn.training:Created run path: models/courtship.centroid\n",
- "INFO:sleap.nn.training:Setting up visualization...\n",
- "INFO:sleap.nn.training:Finished trainer set up. [3.5s]\n",
- "INFO:sleap.nn.training:Creating tf.data.Datasets for training data generation...\n",
- "INFO:sleap.nn.training:Finished creating training datasets. [5.4s]\n",
- "INFO:sleap.nn.training:Starting training loop...\n",
- "Epoch 1/200\n",
- "2023-09-01 13:30:49.814560: I tensorflow/stream_executor/cuda/cuda_dnn.cc:366] Loaded cuDNN version 8201\n",
- "2023-09-01 13:31:07.940585: I tensorflow/stream_executor/cuda/cuda_blas.cc:1774] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.\n",
- "200/200 - 20s - loss: 2.5945e-04 - val_loss: 1.5190e-04 - lr: 1.0000e-04 - 20s/epoch - 99ms/step\n",
- "Epoch 2/200\n",
- "200/200 - 11s - loss: 1.2513e-04 - val_loss: 9.5694e-05 - lr: 1.0000e-04 - 11s/epoch - 57ms/step\n",
- "Epoch 3/200\n",
- "200/200 - 11s - loss: 9.6987e-05 - val_loss: 6.8224e-05 - lr: 1.0000e-04 - 11s/epoch - 57ms/step\n",
- "Epoch 4/200\n",
- "200/200 - 12s - loss: 8.1486e-05 - val_loss: 5.0657e-05 - lr: 1.0000e-04 - 12s/epoch - 58ms/step\n",
- "Epoch 5/200\n",
- "200/200 - 11s - loss: 7.2174e-05 - val_loss: 5.3859e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 6/200\n",
- "200/200 - 11s - loss: 5.9181e-05 - val_loss: 7.0259e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 7/200\n",
- "200/200 - 11s - loss: 4.9353e-05 - val_loss: 4.9832e-05 - lr: 1.0000e-04 - 11s/epoch - 57ms/step\n",
- "Epoch 8/200\n",
- "200/200 - 11s - loss: 3.8997e-05 - val_loss: 4.4787e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 9/200\n",
- "200/200 - 11s - loss: 3.5596e-05 - val_loss: 6.5150e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 10/200\n",
- "200/200 - 12s - loss: 2.9256e-05 - val_loss: 3.8968e-05 - lr: 1.0000e-04 - 12s/epoch - 58ms/step\n",
- "Epoch 11/200\n",
- "200/200 - 11s - loss: 2.8572e-05 - val_loss: 3.5451e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 12/200\n",
- "200/200 - 11s - loss: 2.2156e-05 - val_loss: 4.8602e-05 - lr: 1.0000e-04 - 11s/epoch - 53ms/step\n",
- "Epoch 13/200\n",
- "200/200 - 11s - loss: 1.7656e-05 - val_loss: 4.1905e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 14/200\n",
- "200/200 - 11s - loss: 1.6440e-05 - val_loss: 3.6607e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 15/200\n",
- "200/200 - 11s - loss: 1.4415e-05 - val_loss: 4.1699e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 16/200\n",
- "200/200 - 11s - loss: 1.3589e-05 - val_loss: 3.5362e-05 - lr: 1.0000e-04 - 11s/epoch - 56ms/step\n",
- "Epoch 17/200\n",
- "200/200 - 11s - loss: 1.0888e-05 - val_loss: 2.1600e-05 - lr: 1.0000e-04 - 11s/epoch - 56ms/step\n",
- "Epoch 18/200\n",
- "200/200 - 11s - loss: 1.0426e-05 - val_loss: 3.6782e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 19/200\n",
- "200/200 - 11s - loss: 9.9092e-06 - val_loss: 3.8284e-05 - lr: 1.0000e-04 - 11s/epoch - 56ms/step\n",
- "Epoch 20/200\n",
- "200/200 - 11s - loss: 8.0018e-06 - val_loss: 2.9439e-05 - lr: 1.0000e-04 - 11s/epoch - 57ms/step\n",
- "Epoch 21/200\n",
- "200/200 - 11s - loss: 7.7977e-06 - val_loss: 2.8703e-05 - lr: 1.0000e-04 - 11s/epoch - 56ms/step\n",
- "Epoch 22/200\n",
- "\n",
- "Epoch 00022: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-05.\n",
- "200/200 - 11s - loss: 6.5981e-06 - val_loss: 3.6030e-05 - lr: 1.0000e-04 - 11s/epoch - 55ms/step\n",
- "Epoch 23/200\n",
- "200/200 - 11s - loss: 4.6479e-06 - val_loss: 2.8081e-05 - lr: 5.0000e-05 - 11s/epoch - 55ms/step\n",
- "Epoch 24/200\n",
- "200/200 - 11s - loss: 4.2579e-06 - val_loss: 3.7954e-05 - lr: 5.0000e-05 - 11s/epoch - 55ms/step\n",
- "Epoch 25/200\n",
- "200/200 - 11s - loss: 3.9628e-06 - val_loss: 2.6399e-05 - lr: 5.0000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 26/200\n",
- "200/200 - 11s - loss: 3.6915e-06 - val_loss: 1.9973e-05 - lr: 5.0000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 27/200\n",
- "200/200 - 11s - loss: 3.4726e-06 - val_loss: 3.5831e-05 - lr: 5.0000e-05 - 11s/epoch - 55ms/step\n",
- "Epoch 28/200\n",
- "200/200 - 11s - loss: 3.2110e-06 - val_loss: 2.7290e-05 - lr: 5.0000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 29/200\n",
- "200/200 - 11s - loss: 3.3421e-06 - val_loss: 3.1827e-05 - lr: 5.0000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 30/200\n",
- "200/200 - 11s - loss: 3.3472e-06 - val_loss: 3.4653e-05 - lr: 5.0000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 31/200\n",
- "\n",
- "Epoch 00031: ReduceLROnPlateau reducing learning rate to 2.499999936844688e-05.\n",
- "200/200 - 11s - loss: 3.1221e-06 - val_loss: 2.7741e-05 - lr: 5.0000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 32/200\n",
- "200/200 - 11s - loss: 2.5739e-06 - val_loss: 3.2486e-05 - lr: 2.5000e-05 - 11s/epoch - 55ms/step\n",
- "Epoch 33/200\n",
- "200/200 - 11s - loss: 2.5589e-06 - val_loss: 3.3135e-05 - lr: 2.5000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 34/200\n",
- "200/200 - 11s - loss: 2.4215e-06 - val_loss: 2.8923e-05 - lr: 2.5000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 35/200\n",
- "200/200 - 11s - loss: 2.4033e-06 - val_loss: 2.8776e-05 - lr: 2.5000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 36/200\n",
- "200/200 - 11s - loss: 2.3358e-06 - val_loss: 2.5874e-05 - lr: 2.5000e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 37/200\n",
- "200/200 - 11s - loss: 2.2922e-06 - val_loss: 3.6051e-05 - lr: 2.5000e-05 - 11s/epoch - 55ms/step\n",
- "Epoch 38/200\n",
- "\n",
- "Epoch 00038: ReduceLROnPlateau reducing learning rate to 1.249999968422344e-05.\n",
- "200/200 - 11s - loss: 2.1278e-06 - val_loss: 2.4898e-05 - lr: 2.5000e-05 - 11s/epoch - 55ms/step\n",
- "Epoch 39/200\n",
- "200/200 - 11s - loss: 2.0474e-06 - val_loss: 2.8901e-05 - lr: 1.2500e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 40/200\n",
- "200/200 - 11s - loss: 2.0612e-06 - val_loss: 3.7469e-05 - lr: 1.2500e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 41/200\n",
- "200/200 - 11s - loss: 1.8414e-06 - val_loss: 2.8496e-05 - lr: 1.2500e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 42/200\n",
- "200/200 - 11s - loss: 2.0196e-06 - val_loss: 3.5206e-05 - lr: 1.2500e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 43/200\n",
- "200/200 - 11s - loss: 1.8551e-06 - val_loss: 2.6483e-05 - lr: 1.2500e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 44/200\n",
- "200/200 - 11s - loss: 1.9705e-06 - val_loss: 2.4643e-05 - lr: 1.2500e-05 - 11s/epoch - 55ms/step\n",
- "Epoch 45/200\n",
- "\n",
- "Epoch 00045: ReduceLROnPlateau reducing learning rate to 6.24999984211172e-06.\n",
- "200/200 - 11s - loss: 1.9136e-06 - val_loss: 2.8379e-05 - lr: 1.2500e-05 - 11s/epoch - 56ms/step\n",
- "Epoch 46/200\n",
- "200/200 - 11s - loss: 1.7911e-06 - val_loss: 4.0055e-05 - lr: 6.2500e-06 - 11s/epoch - 56ms/step\n",
- "Epoch 00046: early stopping\n",
- "INFO:sleap.nn.training:Finished training loop. [8.7 min]\n",
- "INFO:sleap.nn.training:Deleting visualization directory: models/courtship.centroid/viz\n",
- "INFO:sleap.nn.training:Saving evaluation metrics to model folder...\n",
- "\u001b[2KPredicting... \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[35m100%\u001b[0m ETA: \u001b[36m0:00:00\u001b[0m \u001b[31m33.7 FPS\u001b[0m31m51.9 FPS\u001b[0m31m52.6 FPS\u001b[0mFPS\u001b[0m\n",
- "\u001b[?25hINFO:sleap.nn.evals:Saved predictions: models/courtship.centroid/labels_pr.train.slp\n",
- "INFO:sleap.nn.evals:Saved metrics: models/courtship.centroid/metrics.train.npz\n",
- "INFO:sleap.nn.evals:OKS mAP: 0.725241\n",
- "\u001b[2KPredicting... \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[35m100%\u001b[0m ETA: \u001b[36m0:00:00\u001b[0m \u001b[31m7.3 FPS\u001b[0m0:00:01\u001b[0m \u001b[31m184.6 FPS\u001b[0mm\n",
- "\u001b[?25hINFO:sleap.nn.evals:Saved predictions: models/courtship.centroid/labels_pr.val.slp\n",
- "INFO:sleap.nn.evals:Saved metrics: models/courtship.centroid/metrics.val.npz\n",
- "INFO:sleap.nn.evals:OKS mAP: 0.870526\n"
- ]
- }
- ],
- "source": [
- "!sleap-train baseline.centroid.json \"dataset/drosophila-melanogaster-courtship/courtship_labels.slp\" --run_name \"courtship.centroid\" --video-paths \"dataset/drosophila-melanogaster-courtship/20190128_113421.mp4\""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "Vm3i0ry04IMx"
- },
- "source": [
- "Let's now train a centered-instance model."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 26,
- "metadata": {
- "id": "ufbULTDw4Hbh"
- },
- "outputs": [
+ "cell_type": "markdown",
+ "metadata": {
+ "colab_type": "text",
+ "id": "yX9noEb8m8re"
+ },
+ "source": [
+ "## Install SLEAP\n",
+ "Note: Before installing SLEAP check [SLEAP releases](https://github.com/talmolab/sleap/releases) page for the latest version."
+ ]
+ },
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "INFO:sleap.nn.training:Versions:\n",
- "SLEAP: 1.3.2\n",
- "TensorFlow: 2.7.0\n",
- "Numpy: 1.21.5\n",
- "Python: 3.7.12\n",
- "OS: Linux-5.15.0-78-generic-x86_64-with-debian-bookworm-sid\n",
- "INFO:sleap.nn.training:Training labels file: dataset/drosophila-melanogaster-courtship/courtship_labels.slp\n",
- "INFO:sleap.nn.training:Training profile: /home/talmolab/sleap-estimates-animal-poses/pull-requests/sleap/sleap/training_profiles/baseline_medium_rf.topdown.json\n",
- "INFO:sleap.nn.training:\n",
- "INFO:sleap.nn.training:Arguments:\n",
- "INFO:sleap.nn.training:{\n",
- " \"training_job_path\": \"baseline_medium_rf.topdown.json\",\n",
- " \"labels_path\": \"dataset/drosophila-melanogaster-courtship/courtship_labels.slp\",\n",
- " \"video_paths\": [\n",
- " \"dataset/drosophila-melanogaster-courtship/20190128_113421.mp4\"\n",
- " ],\n",
- " \"val_labels\": null,\n",
- " \"test_labels\": null,\n",
- " \"base_checkpoint\": null,\n",
- " \"tensorboard\": false,\n",
- " \"save_viz\": false,\n",
- " \"zmq\": false,\n",
- " \"run_name\": \"courtship.topdown_confmaps\",\n",
- " \"prefix\": \"\",\n",
- " \"suffix\": \"\",\n",
- " \"cpu\": false,\n",
- " \"first_gpu\": false,\n",
- " \"last_gpu\": false,\n",
- " \"gpu\": \"auto\"\n",
- "}\n",
- "INFO:sleap.nn.training:\n",
- "INFO:sleap.nn.training:Training job:\n",
- "INFO:sleap.nn.training:{\n",
- " \"data\": {\n",
- " \"labels\": {\n",
- " \"training_labels\": null,\n",
- " \"validation_labels\": null,\n",
- " \"validation_fraction\": 0.1,\n",
- " \"test_labels\": null,\n",
- " \"split_by_inds\": false,\n",
- " \"training_inds\": null,\n",
- " \"validation_inds\": null,\n",
- " \"test_inds\": null,\n",
- " \"search_path_hints\": [],\n",
- " \"skeletons\": []\n",
- " },\n",
- " \"preprocessing\": {\n",
- " \"ensure_rgb\": false,\n",
- " \"ensure_grayscale\": false,\n",
- " \"imagenet_mode\": null,\n",
- " \"input_scaling\": 1.0,\n",
- " \"pad_to_stride\": null,\n",
- " \"resize_and_pad_to_target\": true,\n",
- " \"target_height\": null,\n",
- " \"target_width\": null\n",
- " },\n",
- " \"instance_cropping\": {\n",
- " \"center_on_part\": null,\n",
- " \"crop_size\": null,\n",
- " \"crop_size_detection_padding\": 16\n",
- " }\n",
- " },\n",
- " \"model\": {\n",
- " \"backbone\": {\n",
- " \"leap\": null,\n",
- " \"unet\": {\n",
- " \"stem_stride\": null,\n",
- " \"max_stride\": 16,\n",
- " \"output_stride\": 4,\n",
- " \"filters\": 24,\n",
- " \"filters_rate\": 2.0,\n",
- " \"middle_block\": true,\n",
- " \"up_interpolate\": true,\n",
- " \"stacks\": 1\n",
- " },\n",
- " \"hourglass\": null,\n",
- " \"resnet\": null,\n",
- " \"pretrained_encoder\": null\n",
- " },\n",
- " \"heads\": {\n",
- " \"single_instance\": null,\n",
- " \"centroid\": null,\n",
- " \"centered_instance\": {\n",
- " \"anchor_part\": null,\n",
- " \"part_names\": null,\n",
- " \"sigma\": 2.5,\n",
- " \"output_stride\": 4,\n",
- " \"loss_weight\": 1.0,\n",
- " \"offset_refinement\": false\n",
- " },\n",
- " \"multi_instance\": null,\n",
- " \"multi_class_bottomup\": null,\n",
- " \"multi_class_topdown\": null\n",
- " },\n",
- " \"base_checkpoint\": null\n",
- " },\n",
- " \"optimization\": {\n",
- " \"preload_data\": true,\n",
- " \"augmentation_config\": {\n",
- " \"rotate\": true,\n",
- " \"rotation_min_angle\": -15.0,\n",
- " \"rotation_max_angle\": 15.0,\n",
- " \"translate\": false,\n",
- " \"translate_min\": -5,\n",
- " \"translate_max\": 5,\n",
- " \"scale\": false,\n",
- " \"scale_min\": 0.9,\n",
- " \"scale_max\": 1.1,\n",
- " \"uniform_noise\": false,\n",
- " \"uniform_noise_min_val\": 0.0,\n",
- " \"uniform_noise_max_val\": 10.0,\n",
- " \"gaussian_noise\": false,\n",
- " \"gaussian_noise_mean\": 5.0,\n",
- " \"gaussian_noise_stddev\": 1.0,\n",
- " \"contrast\": false,\n",
- " \"contrast_min_gamma\": 0.5,\n",
- " \"contrast_max_gamma\": 2.0,\n",
- " \"brightness\": false,\n",
- " \"brightness_min_val\": 0.0,\n",
- " \"brightness_max_val\": 10.0,\n",
- " \"random_crop\": false,\n",
- " \"random_crop_height\": 256,\n",
- " \"random_crop_width\": 256,\n",
- " \"random_flip\": false,\n",
- " \"flip_horizontal\": true\n",
- " },\n",
- " \"online_shuffling\": true,\n",
- " \"shuffle_buffer_size\": 128,\n",
- " \"prefetch\": true,\n",
- " \"batch_size\": 4,\n",
- " \"batches_per_epoch\": null,\n",
- " \"min_batches_per_epoch\": 200,\n",
- " \"val_batches_per_epoch\": null,\n",
- " \"min_val_batches_per_epoch\": 10,\n",
- " \"epochs\": 200,\n",
- " \"optimizer\": \"adam\",\n",
- " \"initial_learning_rate\": 0.0001,\n",
- " \"learning_rate_schedule\": {\n",
- " \"reduce_on_plateau\": true,\n",
- " \"reduction_factor\": 0.5,\n",
- " \"plateau_min_delta\": 1e-08,\n",
- " \"plateau_patience\": 5,\n",
- " \"plateau_cooldown\": 3,\n",
- " \"min_learning_rate\": 1e-08\n",
- " },\n",
- " \"hard_keypoint_mining\": {\n",
- " \"online_mining\": false,\n",
- " \"hard_to_easy_ratio\": 2.0,\n",
- " \"min_hard_keypoints\": 2,\n",
- " \"max_hard_keypoints\": null,\n",
- " \"loss_scale\": 5.0\n",
- " },\n",
- " \"early_stopping\": {\n",
- " \"stop_training_on_plateau\": true,\n",
- " \"plateau_min_delta\": 1e-08,\n",
- " \"plateau_patience\": 10\n",
- " }\n",
- " },\n",
- " \"outputs\": {\n",
- " \"save_outputs\": true,\n",
- " \"run_name\": \"courtship.topdown_confmaps\",\n",
- " \"run_name_prefix\": \"\",\n",
- " \"run_name_suffix\": null,\n",
- " \"runs_folder\": \"models\",\n",
- " \"tags\": [],\n",
- " \"save_visualizations\": true,\n",
- " \"keep_viz_images\": true,\n",
- " \"zip_outputs\": false,\n",
- " \"log_to_csv\": true,\n",
- " \"checkpointing\": {\n",
- " \"initial_model\": false,\n",
- " \"best_model\": true,\n",
- " \"every_epoch\": false,\n",
- " \"latest_model\": false,\n",
- " \"final_model\": false\n",
- " },\n",
- " \"tensorboard\": {\n",
- " \"write_logs\": false,\n",
- " \"loss_frequency\": \"epoch\",\n",
- " \"architecture_graph\": true,\n",
- " \"profile_graph\": false,\n",
- " \"visualizations\": true\n",
- " },\n",
- " \"zmq\": {\n",
- " \"subscribe_to_controller\": false,\n",
- " \"controller_address\": \"tcp://127.0.0.1:9000\",\n",
- " \"controller_polling_timeout\": 10,\n",
- " \"publish_updates\": false,\n",
- " \"publish_address\": \"tcp://127.0.0.1:9001\"\n",
- " }\n",
- " },\n",
- " \"name\": \"\",\n",
- " \"description\": \"\",\n",
- " \"sleap_version\": \"1.3.2\",\n",
- " \"filename\": \"/home/talmolab/sleap-estimates-animal-poses/pull-requests/sleap/sleap/training_profiles/baseline_medium_rf.topdown.json\"\n",
- "}\n",
- "INFO:sleap.nn.training:\n",
- "2023-09-01 13:39:43.324520: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:39:43.329181: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:39:43.329961: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "INFO:sleap.nn.training:Auto-selected GPU 0 with 23056 MiB of free memory.\n",
- "INFO:sleap.nn.training:Using GPU 0 for acceleration.\n",
- "INFO:sleap.nn.training:Disabled GPU memory pre-allocation.\n",
- "INFO:sleap.nn.training:System:\n",
- "GPUs: 1/1 available\n",
- " Device: /physical_device:GPU:0\n",
- " Available: True\n",
- " Initalized: False\n",
- " Memory growth: True\n",
- "INFO:sleap.nn.training:\n",
- "INFO:sleap.nn.training:Initializing trainer...\n",
- "INFO:sleap.nn.training:Loading training labels from: dataset/drosophila-melanogaster-courtship/courtship_labels.slp\n",
- "INFO:sleap.nn.training:Creating training and validation splits from validation fraction: 0.1\n",
- "INFO:sleap.nn.training: Splits: Training = 134 / Validation = 15.\n",
- "INFO:sleap.nn.training:Setting up for training...\n",
- "INFO:sleap.nn.training:Setting up pipeline builders...\n",
- "INFO:sleap.nn.training:Setting up model...\n",
- "INFO:sleap.nn.training:Building test pipeline...\n",
- "2023-09-01 13:39:44.254912: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\n",
- "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
- "2023-09-01 13:39:44.255468: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:39:44.256291: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:39:44.257158: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:39:44.546117: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:39:44.546866: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:39:44.547533: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:39:44.548184: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 21151 MB memory: -> device: 0, name: NVIDIA RTX A5000, pci bus id: 0000:01:00.0, compute capability: 8.6\n",
- "INFO:sleap.nn.training:Loaded test example. [1.684s]\n",
- "INFO:sleap.nn.training: Input shape: (144, 144, 3)\n",
- "INFO:sleap.nn.training:Created Keras model.\n",
- "INFO:sleap.nn.training: Backbone: UNet(stacks=1, filters=24, filters_rate=2.0, kernel_size=3, stem_kernel_size=7, convs_per_block=2, stem_blocks=0, down_blocks=4, middle_block=True, up_blocks=2, up_interpolate=True, block_contraction=False)\n",
- "INFO:sleap.nn.training: Max stride: 16\n",
- "INFO:sleap.nn.training: Parameters: 4,311,877\n",
- "INFO:sleap.nn.training: Heads: \n",
- "INFO:sleap.nn.training: [0] = CenteredInstanceConfmapsHead(part_names=['head', 'thorax', 'abdomen', 'wingL', 'wingR', 'forelegL4', 'forelegR4', 'midlegL4', 'midlegR4', 'hindlegL4', 'hindlegR4', 'eyeL', 'eyeR'], anchor_part=None, sigma=2.5, output_stride=4, loss_weight=1.0)\n",
- "INFO:sleap.nn.training: Outputs: \n",
- "INFO:sleap.nn.training: [0] = KerasTensor(type_spec=TensorSpec(shape=(None, 36, 36, 13), dtype=tf.float32, name=None), name='CenteredInstanceConfmapsHead/BiasAdd:0', description=\"created by layer 'CenteredInstanceConfmapsHead'\")\n",
- "INFO:sleap.nn.training:Training from scratch\n",
- "INFO:sleap.nn.training:Setting up data pipelines...\n",
- "INFO:sleap.nn.training:Training set: n = 134\n",
- "INFO:sleap.nn.training:Validation set: n = 15\n",
- "INFO:sleap.nn.training:Setting up optimization...\n",
- "INFO:sleap.nn.training: Learning rate schedule: LearningRateScheduleConfig(reduce_on_plateau=True, reduction_factor=0.5, plateau_min_delta=1e-08, plateau_patience=5, plateau_cooldown=3, min_learning_rate=1e-08)\n",
- "INFO:sleap.nn.training: Early stopping: EarlyStoppingConfig(stop_training_on_plateau=True, plateau_min_delta=1e-08, plateau_patience=10)\n",
- "INFO:sleap.nn.training:Setting up outputs...\n",
- "INFO:sleap.nn.training:Created run path: models/courtship.topdown_confmaps\n",
- "INFO:sleap.nn.training:Setting up visualization...\n",
- "INFO:sleap.nn.training:Finished trainer set up. [3.2s]\n",
- "INFO:sleap.nn.training:Creating tf.data.Datasets for training data generation...\n",
- "INFO:sleap.nn.training:Finished creating training datasets. [5.9s]\n",
- "INFO:sleap.nn.training:Starting training loop...\n",
- "Epoch 1/200\n",
- "2023-09-01 13:39:54.940083: I tensorflow/stream_executor/cuda/cuda_dnn.cc:366] Loaded cuDNN version 8201\n",
- "2023-09-01 13:40:00.337645: I tensorflow/stream_executor/cuda/cuda_blas.cc:1774] TensorFloat-32 will be used for the matrix multiplication. This will only be logged once.\n",
- "200/200 - 8s - loss: 0.0108 - head: 0.0073 - thorax: 0.0067 - abdomen: 0.0111 - wingL: 0.0125 - wingR: 0.0126 - forelegL4: 0.0111 - forelegR4: 0.0108 - midlegL4: 0.0127 - midlegR4: 0.0128 - hindlegL4: 0.0131 - hindlegR4: 0.0131 - eyeL: 0.0082 - eyeR: 0.0083 - val_loss: 0.0087 - val_head: 0.0033 - val_thorax: 0.0039 - val_abdomen: 0.0089 - val_wingL: 0.0105 - val_wingR: 0.0106 - val_forelegL4: 0.0091 - val_forelegR4: 0.0091 - val_midlegL4: 0.0123 - val_midlegR4: 0.0116 - val_hindlegL4: 0.0128 - val_hindlegR4: 0.0116 - val_eyeL: 0.0045 - val_eyeR: 0.0045 - lr: 1.0000e-04 - 8s/epoch - 38ms/step\n",
- "Epoch 2/200\n",
- "200/200 - 4s - loss: 0.0064 - head: 0.0019 - thorax: 0.0029 - abdomen: 0.0057 - wingL: 0.0061 - wingR: 0.0073 - forelegL4: 0.0075 - forelegR4: 0.0078 - midlegL4: 0.0092 - midlegR4: 0.0092 - hindlegL4: 0.0099 - hindlegR4: 0.0102 - eyeL: 0.0025 - eyeR: 0.0025 - val_loss: 0.0061 - val_head: 0.0015 - val_thorax: 0.0024 - val_abdomen: 0.0049 - val_wingL: 0.0056 - val_wingR: 0.0078 - val_forelegL4: 0.0079 - val_forelegR4: 0.0067 - val_midlegL4: 0.0086 - val_midlegR4: 0.0089 - val_hindlegL4: 0.0093 - val_hindlegR4: 0.0081 - val_eyeL: 0.0037 - val_eyeR: 0.0032 - lr: 1.0000e-04 - 4s/epoch - 19ms/step\n",
- "Epoch 3/200\n",
- "200/200 - 3s - loss: 0.0048 - head: 8.9048e-04 - thorax: 0.0019 - abdomen: 0.0036 - wingL: 0.0041 - wingR: 0.0051 - forelegL4: 0.0063 - forelegR4: 0.0066 - midlegL4: 0.0076 - midlegR4: 0.0076 - hindlegL4: 0.0076 - hindlegR4: 0.0080 - eyeL: 0.0015 - eyeR: 0.0015 - val_loss: 0.0058 - val_head: 0.0014 - val_thorax: 0.0021 - val_abdomen: 0.0044 - val_wingL: 0.0051 - val_wingR: 0.0070 - val_forelegL4: 0.0072 - val_forelegR4: 0.0063 - val_midlegL4: 0.0088 - val_midlegR4: 0.0085 - val_hindlegL4: 0.0097 - val_hindlegR4: 0.0079 - val_eyeL: 0.0038 - val_eyeR: 0.0032 - lr: 1.0000e-04 - 3s/epoch - 16ms/step\n",
- "Epoch 4/200\n",
- "200/200 - 3s - loss: 0.0041 - head: 7.6417e-04 - thorax: 0.0015 - abdomen: 0.0028 - wingL: 0.0035 - wingR: 0.0041 - forelegL4: 0.0058 - forelegR4: 0.0060 - midlegL4: 0.0066 - midlegR4: 0.0064 - hindlegL4: 0.0066 - hindlegR4: 0.0070 - eyeL: 0.0013 - eyeR: 0.0012 - val_loss: 0.0048 - val_head: 7.6555e-04 - val_thorax: 0.0013 - val_abdomen: 0.0034 - val_wingL: 0.0042 - val_wingR: 0.0065 - val_forelegL4: 0.0063 - val_forelegR4: 0.0064 - val_midlegL4: 0.0069 - val_midlegR4: 0.0071 - val_hindlegL4: 0.0080 - val_hindlegR4: 0.0062 - val_eyeL: 0.0028 - val_eyeR: 0.0026 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 5/200\n",
- "200/200 - 3s - loss: 0.0034 - head: 6.1233e-04 - thorax: 0.0012 - abdomen: 0.0023 - wingL: 0.0028 - wingR: 0.0032 - forelegL4: 0.0052 - forelegR4: 0.0054 - midlegL4: 0.0052 - midlegR4: 0.0051 - hindlegL4: 0.0057 - hindlegR4: 0.0058 - eyeL: 0.0011 - eyeR: 0.0011 - val_loss: 0.0044 - val_head: 9.3809e-04 - val_thorax: 0.0012 - val_abdomen: 0.0027 - val_wingL: 0.0032 - val_wingR: 0.0048 - val_forelegL4: 0.0062 - val_forelegR4: 0.0053 - val_midlegL4: 0.0068 - val_midlegR4: 0.0063 - val_hindlegL4: 0.0067 - val_hindlegR4: 0.0065 - val_eyeL: 0.0035 - val_eyeR: 0.0032 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 6/200\n",
- "200/200 - 3s - loss: 0.0028 - head: 5.5957e-04 - thorax: 9.3519e-04 - abdomen: 0.0019 - wingL: 0.0023 - wingR: 0.0025 - forelegL4: 0.0045 - forelegR4: 0.0045 - midlegL4: 0.0040 - midlegR4: 0.0040 - hindlegL4: 0.0047 - hindlegR4: 0.0048 - eyeL: 0.0010 - eyeR: 9.7287e-04 - val_loss: 0.0038 - val_head: 7.6837e-04 - val_thorax: 9.9723e-04 - val_abdomen: 0.0027 - val_wingL: 0.0025 - val_wingR: 0.0046 - val_forelegL4: 0.0058 - val_forelegR4: 0.0049 - val_midlegL4: 0.0054 - val_midlegR4: 0.0058 - val_hindlegL4: 0.0057 - val_hindlegR4: 0.0065 - val_eyeL: 0.0023 - val_eyeR: 0.0022 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 7/200\n",
- "200/200 - 3s - loss: 0.0024 - head: 4.7941e-04 - thorax: 7.5772e-04 - abdomen: 0.0017 - wingL: 0.0020 - wingR: 0.0022 - forelegL4: 0.0039 - forelegR4: 0.0041 - midlegL4: 0.0033 - midlegR4: 0.0033 - hindlegL4: 0.0039 - hindlegR4: 0.0040 - eyeL: 9.3055e-04 - eyeR: 8.9191e-04 - val_loss: 0.0036 - val_head: 6.1078e-04 - val_thorax: 0.0010 - val_abdomen: 0.0023 - val_wingL: 0.0025 - val_wingR: 0.0039 - val_forelegL4: 0.0053 - val_forelegR4: 0.0058 - val_midlegL4: 0.0049 - val_midlegR4: 0.0056 - val_hindlegL4: 0.0054 - val_hindlegR4: 0.0049 - val_eyeL: 0.0026 - val_eyeR: 0.0024 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 8/200\n",
- "200/200 - 3s - loss: 0.0020 - head: 4.4425e-04 - thorax: 6.8283e-04 - abdomen: 0.0014 - wingL: 0.0015 - wingR: 0.0017 - forelegL4: 0.0035 - forelegR4: 0.0035 - midlegL4: 0.0027 - midlegR4: 0.0026 - hindlegL4: 0.0033 - hindlegR4: 0.0033 - eyeL: 7.7111e-04 - eyeR: 7.2022e-04 - val_loss: 0.0035 - val_head: 7.1555e-04 - val_thorax: 9.1508e-04 - val_abdomen: 0.0022 - val_wingL: 0.0023 - val_wingR: 0.0033 - val_forelegL4: 0.0054 - val_forelegR4: 0.0049 - val_midlegL4: 0.0049 - val_midlegR4: 0.0052 - val_hindlegL4: 0.0052 - val_hindlegR4: 0.0051 - val_eyeL: 0.0025 - val_eyeR: 0.0025 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 9/200\n",
- "200/200 - 3s - loss: 0.0017 - head: 3.8990e-04 - thorax: 5.4963e-04 - abdomen: 0.0012 - wingL: 0.0012 - wingR: 0.0014 - forelegL4: 0.0030 - forelegR4: 0.0031 - midlegL4: 0.0022 - midlegR4: 0.0022 - hindlegL4: 0.0027 - hindlegR4: 0.0027 - eyeL: 6.9041e-04 - eyeR: 6.7679e-04 - val_loss: 0.0034 - val_head: 5.6666e-04 - val_thorax: 7.9156e-04 - val_abdomen: 0.0023 - val_wingL: 0.0020 - val_wingR: 0.0041 - val_forelegL4: 0.0043 - val_forelegR4: 0.0048 - val_midlegL4: 0.0041 - val_midlegR4: 0.0051 - val_hindlegL4: 0.0053 - val_hindlegR4: 0.0052 - val_eyeL: 0.0024 - val_eyeR: 0.0026 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 10/200\n",
- "200/200 - 3s - loss: 0.0015 - head: 3.6281e-04 - thorax: 5.2471e-04 - abdomen: 0.0010 - wingL: 0.0011 - wingR: 0.0012 - forelegL4: 0.0027 - forelegR4: 0.0028 - midlegL4: 0.0019 - midlegR4: 0.0019 - hindlegL4: 0.0023 - hindlegR4: 0.0024 - eyeL: 7.0986e-04 - eyeR: 6.9581e-04 - val_loss: 0.0024 - val_head: 4.8376e-04 - val_thorax: 6.2502e-04 - val_abdomen: 0.0016 - val_wingL: 0.0014 - val_wingR: 0.0027 - val_forelegL4: 0.0035 - val_forelegR4: 0.0033 - val_midlegL4: 0.0028 - val_midlegR4: 0.0041 - val_hindlegL4: 0.0036 - val_hindlegR4: 0.0038 - val_eyeL: 0.0015 - val_eyeR: 0.0016 - lr: 1.0000e-04 - 3s/epoch - 16ms/step\n",
- "Epoch 11/200\n",
- "200/200 - 3s - loss: 0.0013 - head: 3.1183e-04 - thorax: 4.7891e-04 - abdomen: 9.4567e-04 - wingL: 9.6811e-04 - wingR: 0.0011 - forelegL4: 0.0023 - forelegR4: 0.0025 - midlegL4: 0.0016 - midlegR4: 0.0016 - hindlegL4: 0.0020 - hindlegR4: 0.0021 - eyeL: 5.7635e-04 - eyeR: 5.3648e-04 - val_loss: 0.0028 - val_head: 5.2940e-04 - val_thorax: 6.6554e-04 - val_abdomen: 0.0020 - val_wingL: 0.0013 - val_wingR: 0.0024 - val_forelegL4: 0.0041 - val_forelegR4: 0.0041 - val_midlegL4: 0.0034 - val_midlegR4: 0.0042 - val_hindlegL4: 0.0047 - val_hindlegR4: 0.0040 - val_eyeL: 0.0025 - val_eyeR: 0.0022 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 12/200\n",
- "200/200 - 3s - loss: 0.0011 - head: 2.8863e-04 - thorax: 4.2604e-04 - abdomen: 8.0488e-04 - wingL: 8.1238e-04 - wingR: 8.5798e-04 - forelegL4: 0.0021 - forelegR4: 0.0021 - midlegL4: 0.0014 - midlegR4: 0.0014 - hindlegL4: 0.0017 - hindlegR4: 0.0018 - eyeL: 5.1007e-04 - eyeR: 4.5654e-04 - val_loss: 0.0031 - val_head: 8.1802e-04 - val_thorax: 7.9789e-04 - val_abdomen: 0.0018 - val_wingL: 0.0014 - val_wingR: 0.0028 - val_forelegL4: 0.0040 - val_forelegR4: 0.0048 - val_midlegL4: 0.0057 - val_midlegR4: 0.0037 - val_hindlegL4: 0.0053 - val_hindlegR4: 0.0050 - val_eyeL: 0.0020 - val_eyeR: 0.0018 - lr: 1.0000e-04 - 3s/epoch - 14ms/step\n",
- "Epoch 13/200\n",
- "200/200 - 3s - loss: 0.0010 - head: 2.8818e-04 - thorax: 4.1018e-04 - abdomen: 7.8027e-04 - wingL: 7.8017e-04 - wingR: 8.4529e-04 - forelegL4: 0.0019 - forelegR4: 0.0019 - midlegL4: 0.0013 - midlegR4: 0.0013 - hindlegL4: 0.0015 - hindlegR4: 0.0016 - eyeL: 4.6272e-04 - eyeR: 4.3265e-04 - val_loss: 0.0026 - val_head: 3.5806e-04 - val_thorax: 6.6352e-04 - val_abdomen: 0.0017 - val_wingL: 0.0015 - val_wingR: 0.0037 - val_forelegL4: 0.0036 - val_forelegR4: 0.0042 - val_midlegL4: 0.0034 - val_midlegR4: 0.0032 - val_hindlegL4: 0.0041 - val_hindlegR4: 0.0047 - val_eyeL: 0.0013 - val_eyeR: 0.0013 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 14/200\n",
- "200/200 - 3s - loss: 9.4029e-04 - head: 2.8339e-04 - thorax: 3.6739e-04 - abdomen: 7.0118e-04 - wingL: 7.4831e-04 - wingR: 7.1158e-04 - forelegL4: 0.0017 - forelegR4: 0.0017 - midlegL4: 0.0012 - midlegR4: 0.0011 - hindlegL4: 0.0014 - hindlegR4: 0.0015 - eyeL: 4.2793e-04 - eyeR: 4.1400e-04 - val_loss: 0.0024 - val_head: 3.4292e-04 - val_thorax: 7.1119e-04 - val_abdomen: 0.0014 - val_wingL: 0.0013 - val_wingR: 0.0028 - val_forelegL4: 0.0030 - val_forelegR4: 0.0043 - val_midlegL4: 0.0031 - val_midlegR4: 0.0030 - val_hindlegL4: 0.0039 - val_hindlegR4: 0.0038 - val_eyeL: 0.0017 - val_eyeR: 0.0015 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 15/200\n",
- "200/200 - 3s - loss: 7.8295e-04 - head: 2.3028e-04 - thorax: 3.3006e-04 - abdomen: 5.9391e-04 - wingL: 5.8825e-04 - wingR: 6.0989e-04 - forelegL4: 0.0015 - forelegR4: 0.0015 - midlegL4: 9.6945e-04 - midlegR4: 9.3611e-04 - hindlegL4: 0.0011 - hindlegR4: 0.0012 - eyeL: 3.4493e-04 - eyeR: 3.1164e-04 - val_loss: 0.0019 - val_head: 4.4152e-04 - val_thorax: 5.4500e-04 - val_abdomen: 0.0013 - val_wingL: 0.0012 - val_wingR: 0.0026 - val_forelegL4: 0.0024 - val_forelegR4: 0.0037 - val_midlegL4: 0.0024 - val_midlegR4: 0.0024 - val_hindlegL4: 0.0030 - val_hindlegR4: 0.0030 - val_eyeL: 0.0011 - val_eyeR: 0.0011 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 16/200\n",
- "200/200 - 3s - loss: 7.3208e-04 - head: 2.3573e-04 - thorax: 3.0631e-04 - abdomen: 5.5007e-04 - wingL: 5.3431e-04 - wingR: 5.9773e-04 - forelegL4: 0.0013 - forelegR4: 0.0014 - midlegL4: 9.1004e-04 - midlegR4: 8.7803e-04 - hindlegL4: 0.0010 - hindlegR4: 0.0011 - eyeL: 3.3279e-04 - eyeR: 2.9841e-04 - val_loss: 0.0023 - val_head: 3.5381e-04 - val_thorax: 7.0128e-04 - val_abdomen: 0.0015 - val_wingL: 0.0013 - val_wingR: 0.0022 - val_forelegL4: 0.0031 - val_forelegR4: 0.0041 - val_midlegL4: 0.0033 - val_midlegR4: 0.0028 - val_hindlegL4: 0.0036 - val_hindlegR4: 0.0033 - val_eyeL: 0.0017 - val_eyeR: 0.0014 - lr: 1.0000e-04 - 3s/epoch - 14ms/step\n",
- "Epoch 17/200\n",
- "200/200 - 3s - loss: 6.3161e-04 - head: 2.0100e-04 - thorax: 2.8088e-04 - abdomen: 4.9153e-04 - wingL: 4.7586e-04 - wingR: 4.9866e-04 - forelegL4: 0.0011 - forelegR4: 0.0012 - midlegL4: 7.6100e-04 - midlegR4: 8.0266e-04 - hindlegL4: 8.9697e-04 - hindlegR4: 8.9149e-04 - eyeL: 2.8189e-04 - eyeR: 2.7208e-04 - val_loss: 0.0018 - val_head: 2.8070e-04 - val_thorax: 5.1903e-04 - val_abdomen: 0.0011 - val_wingL: 9.8509e-04 - val_wingR: 0.0025 - val_forelegL4: 0.0022 - val_forelegR4: 0.0026 - val_midlegL4: 0.0025 - val_midlegR4: 0.0021 - val_hindlegL4: 0.0031 - val_hindlegR4: 0.0031 - val_eyeL: 0.0011 - val_eyeR: 9.7838e-04 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 18/200\n",
- "200/200 - 3s - loss: 5.7844e-04 - head: 1.9896e-04 - thorax: 2.9112e-04 - abdomen: 4.7495e-04 - wingL: 4.5591e-04 - wingR: 4.5877e-04 - forelegL4: 0.0011 - forelegR4: 0.0012 - midlegL4: 6.9042e-04 - midlegR4: 6.6195e-04 - hindlegL4: 7.9452e-04 - hindlegR4: 7.6819e-04 - eyeL: 2.5989e-04 - eyeR: 2.4763e-04 - val_loss: 0.0018 - val_head: 3.1925e-04 - val_thorax: 6.0394e-04 - val_abdomen: 0.0012 - val_wingL: 9.0835e-04 - val_wingR: 0.0019 - val_forelegL4: 0.0022 - val_forelegR4: 0.0029 - val_midlegL4: 0.0026 - val_midlegR4: 0.0024 - val_hindlegL4: 0.0033 - val_hindlegR4: 0.0022 - val_eyeL: 0.0015 - val_eyeR: 0.0011 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 19/200\n",
- "200/200 - 3s - loss: 5.1323e-04 - head: 1.8346e-04 - thorax: 2.5475e-04 - abdomen: 4.2159e-04 - wingL: 4.3027e-04 - wingR: 3.9814e-04 - forelegL4: 9.5814e-04 - forelegR4: 9.9765e-04 - midlegL4: 5.9968e-04 - midlegR4: 5.8423e-04 - hindlegL4: 6.7869e-04 - hindlegR4: 6.9121e-04 - eyeL: 2.4343e-04 - eyeR: 2.3077e-04 - val_loss: 0.0021 - val_head: 3.3346e-04 - val_thorax: 5.9007e-04 - val_abdomen: 0.0014 - val_wingL: 0.0013 - val_wingR: 0.0031 - val_forelegL4: 0.0026 - val_forelegR4: 0.0036 - val_midlegL4: 0.0029 - val_midlegR4: 0.0021 - val_hindlegL4: 0.0037 - val_hindlegR4: 0.0036 - val_eyeL: 0.0011 - val_eyeR: 9.4254e-04 - lr: 1.0000e-04 - 3s/epoch - 14ms/step\n",
- "Epoch 20/200\n",
- "200/200 - 3s - loss: 4.7991e-04 - head: 1.7328e-04 - thorax: 2.2397e-04 - abdomen: 4.2417e-04 - wingL: 3.9313e-04 - wingR: 3.9871e-04 - forelegL4: 8.8547e-04 - forelegR4: 8.9704e-04 - midlegL4: 5.3515e-04 - midlegR4: 5.8294e-04 - hindlegL4: 6.5212e-04 - hindlegR4: 6.2828e-04 - eyeL: 2.2438e-04 - eyeR: 2.2012e-04 - val_loss: 0.0014 - val_head: 2.7034e-04 - val_thorax: 4.7978e-04 - val_abdomen: 9.7903e-04 - val_wingL: 8.6477e-04 - val_wingR: 0.0020 - val_forelegL4: 0.0018 - val_forelegR4: 0.0024 - val_midlegL4: 0.0019 - val_midlegR4: 0.0018 - val_hindlegL4: 0.0024 - val_hindlegR4: 0.0022 - val_eyeL: 9.9423e-04 - val_eyeR: 8.4541e-04 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 21/200\n",
- "200/200 - 3s - loss: 4.4100e-04 - head: 1.6076e-04 - thorax: 2.4080e-04 - abdomen: 3.8343e-04 - wingL: 3.6759e-04 - wingR: 3.7489e-04 - forelegL4: 8.1060e-04 - forelegR4: 8.1600e-04 - midlegL4: 4.7288e-04 - midlegR4: 5.2695e-04 - hindlegL4: 5.6401e-04 - hindlegR4: 6.3519e-04 - eyeL: 1.9033e-04 - eyeR: 1.8954e-04 - val_loss: 0.0018 - val_head: 2.5764e-04 - val_thorax: 5.8718e-04 - val_abdomen: 0.0011 - val_wingL: 9.6939e-04 - val_wingR: 0.0019 - val_forelegL4: 0.0022 - val_forelegR4: 0.0026 - val_midlegL4: 0.0025 - val_midlegR4: 0.0026 - val_hindlegL4: 0.0032 - val_hindlegR4: 0.0028 - val_eyeL: 0.0014 - val_eyeR: 0.0011 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 22/200\n",
- "200/200 - 3s - loss: 3.7738e-04 - head: 1.4725e-04 - thorax: 2.0905e-04 - abdomen: 3.2447e-04 - wingL: 3.2224e-04 - wingR: 3.0585e-04 - forelegL4: 6.2169e-04 - forelegR4: 6.7379e-04 - midlegL4: 4.5061e-04 - midlegR4: 4.3931e-04 - hindlegL4: 5.1129e-04 - hindlegR4: 5.2449e-04 - eyeL: 1.9372e-04 - eyeR: 1.8213e-04 - val_loss: 0.0015 - val_head: 2.2947e-04 - val_thorax: 5.4640e-04 - val_abdomen: 9.8293e-04 - val_wingL: 8.6663e-04 - val_wingR: 0.0013 - val_forelegL4: 0.0018 - val_forelegR4: 0.0027 - val_midlegL4: 0.0021 - val_midlegR4: 0.0019 - val_hindlegL4: 0.0027 - val_hindlegR4: 0.0022 - val_eyeL: 0.0013 - val_eyeR: 0.0010 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 23/200\n",
- "200/200 - 3s - loss: 3.6084e-04 - head: 1.4440e-04 - thorax: 2.0277e-04 - abdomen: 3.0561e-04 - wingL: 3.0192e-04 - wingR: 2.8845e-04 - forelegL4: 6.3221e-04 - forelegR4: 6.7722e-04 - midlegL4: 3.9143e-04 - midlegR4: 4.3545e-04 - hindlegL4: 5.1985e-04 - hindlegR4: 4.5058e-04 - eyeL: 1.7636e-04 - eyeR: 1.6468e-04 - val_loss: 0.0015 - val_head: 2.9639e-04 - val_thorax: 4.6412e-04 - val_abdomen: 0.0011 - val_wingL: 9.0466e-04 - val_wingR: 0.0021 - val_forelegL4: 0.0015 - val_forelegR4: 0.0025 - val_midlegL4: 0.0018 - val_midlegR4: 0.0016 - val_hindlegL4: 0.0029 - val_hindlegR4: 0.0022 - val_eyeL: 8.7357e-04 - val_eyeR: 7.0067e-04 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 24/200\n",
- "200/200 - 3s - loss: 3.4886e-04 - head: 1.4382e-04 - thorax: 1.9157e-04 - abdomen: 3.2551e-04 - wingL: 3.0634e-04 - wingR: 3.0727e-04 - forelegL4: 6.3863e-04 - forelegR4: 6.0904e-04 - midlegL4: 3.5949e-04 - midlegR4: 4.1201e-04 - hindlegL4: 4.2893e-04 - hindlegR4: 4.8121e-04 - eyeL: 1.6669e-04 - eyeR: 1.6464e-04 - val_loss: 0.0022 - val_head: 3.2159e-04 - val_thorax: 7.2743e-04 - val_abdomen: 0.0014 - val_wingL: 0.0011 - val_wingR: 0.0027 - val_forelegL4: 0.0025 - val_forelegR4: 0.0037 - val_midlegL4: 0.0033 - val_midlegR4: 0.0020 - val_hindlegL4: 0.0043 - val_hindlegR4: 0.0031 - val_eyeL: 0.0017 - val_eyeR: 0.0012 - lr: 1.0000e-04 - 3s/epoch - 14ms/step\n",
- "Epoch 25/200\n",
- "\n",
- "Epoch 00025: ReduceLROnPlateau reducing learning rate to 4.999999873689376e-05.\n",
- "200/200 - 3s - loss: 3.0444e-04 - head: 1.2563e-04 - thorax: 1.7247e-04 - abdomen: 2.6934e-04 - wingL: 2.5754e-04 - wingR: 2.4728e-04 - forelegL4: 5.8390e-04 - forelegR4: 5.3959e-04 - midlegL4: 3.3003e-04 - midlegR4: 3.6432e-04 - hindlegL4: 4.0270e-04 - hindlegR4: 3.5518e-04 - eyeL: 1.5609e-04 - eyeR: 1.5365e-04 - val_loss: 0.0017 - val_head: 2.5420e-04 - val_thorax: 5.5809e-04 - val_abdomen: 0.0011 - val_wingL: 9.6708e-04 - val_wingR: 0.0022 - val_forelegL4: 0.0018 - val_forelegR4: 0.0033 - val_midlegL4: 0.0025 - val_midlegR4: 0.0017 - val_hindlegL4: 0.0031 - val_hindlegR4: 0.0031 - val_eyeL: 9.8718e-04 - val_eyeR: 8.0263e-04 - lr: 1.0000e-04 - 3s/epoch - 15ms/step\n",
- "Epoch 26/200\n",
- "200/200 - 3s - loss: 2.3368e-04 - head: 1.1149e-04 - thorax: 1.5177e-04 - abdomen: 2.1763e-04 - wingL: 2.2159e-04 - wingR: 1.9396e-04 - forelegL4: 3.8234e-04 - forelegR4: 3.8248e-04 - midlegL4: 2.7555e-04 - midlegR4: 2.8653e-04 - hindlegL4: 2.7842e-04 - hindlegR4: 2.8074e-04 - eyeL: 1.3157e-04 - eyeR: 1.2374e-04 - val_loss: 0.0017 - val_head: 2.1815e-04 - val_thorax: 5.0063e-04 - val_abdomen: 0.0011 - val_wingL: 8.2248e-04 - val_wingR: 0.0020 - val_forelegL4: 0.0019 - val_forelegR4: 0.0035 - val_midlegL4: 0.0022 - val_midlegR4: 0.0016 - val_hindlegL4: 0.0031 - val_hindlegR4: 0.0022 - val_eyeL: 0.0013 - val_eyeR: 9.8071e-04 - lr: 5.0000e-05 - 3s/epoch - 14ms/step\n",
- "Epoch 27/200\n",
- "200/200 - 3s - loss: 2.0711e-04 - head: 9.7513e-05 - thorax: 1.4018e-04 - abdomen: 2.0210e-04 - wingL: 1.8693e-04 - wingR: 1.7399e-04 - forelegL4: 3.1753e-04 - forelegR4: 3.7613e-04 - midlegL4: 2.2838e-04 - midlegR4: 2.4643e-04 - hindlegL4: 2.4471e-04 - hindlegR4: 2.4706e-04 - eyeL: 1.1696e-04 - eyeR: 1.1452e-04 - val_loss: 0.0011 - val_head: 1.7855e-04 - val_thorax: 3.7885e-04 - val_abdomen: 7.0074e-04 - val_wingL: 6.4821e-04 - val_wingR: 0.0012 - val_forelegL4: 0.0012 - val_forelegR4: 0.0017 - val_midlegL4: 0.0014 - val_midlegR4: 0.0013 - val_hindlegL4: 0.0019 - val_hindlegR4: 0.0018 - val_eyeL: 8.8941e-04 - val_eyeR: 7.0606e-04 - lr: 5.0000e-05 - 3s/epoch - 15ms/step\n",
- "Epoch 28/200\n",
- "200/200 - 3s - loss: 1.9539e-04 - head: 9.4716e-05 - thorax: 1.3617e-04 - abdomen: 1.8547e-04 - wingL: 1.8173e-04 - wingR: 1.6716e-04 - forelegL4: 3.2783e-04 - forelegR4: 3.1060e-04 - midlegL4: 2.2172e-04 - midlegR4: 2.2648e-04 - hindlegL4: 2.3846e-04 - hindlegR4: 2.2823e-04 - eyeL: 1.1204e-04 - eyeR: 1.0944e-04 - val_loss: 0.0012 - val_head: 1.9505e-04 - val_thorax: 3.8105e-04 - val_abdomen: 7.7888e-04 - val_wingL: 6.8985e-04 - val_wingR: 0.0016 - val_forelegL4: 0.0015 - val_forelegR4: 0.0020 - val_midlegL4: 0.0017 - val_midlegR4: 0.0011 - val_hindlegL4: 0.0022 - val_hindlegR4: 0.0019 - val_eyeL: 9.1223e-04 - val_eyeR: 7.0778e-04 - lr: 5.0000e-05 - 3s/epoch - 15ms/step\n",
- "Epoch 29/200\n",
- "200/200 - 3s - loss: 1.8262e-04 - head: 9.2364e-05 - thorax: 1.3126e-04 - abdomen: 1.7625e-04 - wingL: 1.7494e-04 - wingR: 1.5998e-04 - forelegL4: 3.0159e-04 - forelegR4: 2.9470e-04 - midlegL4: 1.9773e-04 - midlegR4: 2.0446e-04 - hindlegL4: 2.0576e-04 - hindlegR4: 2.1560e-04 - eyeL: 1.1218e-04 - eyeR: 1.0720e-04 - val_loss: 0.0015 - val_head: 2.2535e-04 - val_thorax: 4.8031e-04 - val_abdomen: 9.5428e-04 - val_wingL: 7.7468e-04 - val_wingR: 0.0016 - val_forelegL4: 0.0017 - val_forelegR4: 0.0025 - val_midlegL4: 0.0021 - val_midlegR4: 0.0018 - val_hindlegL4: 0.0029 - val_hindlegR4: 0.0019 - val_eyeL: 0.0013 - val_eyeR: 9.6936e-04 - lr: 5.0000e-05 - 3s/epoch - 15ms/step\n",
- "Epoch 30/200\n",
- "200/200 - 3s - loss: 1.7461e-04 - head: 8.9617e-05 - thorax: 1.2428e-04 - abdomen: 1.7234e-04 - wingL: 1.6780e-04 - wingR: 1.5580e-04 - forelegL4: 2.7324e-04 - forelegR4: 2.8042e-04 - midlegL4: 1.9090e-04 - midlegR4: 2.0420e-04 - hindlegL4: 1.9914e-04 - hindlegR4: 2.0318e-04 - eyeL: 1.0518e-04 - eyeR: 1.0386e-04 - val_loss: 0.0015 - val_head: 1.9058e-04 - val_thorax: 4.9603e-04 - val_abdomen: 0.0011 - val_wingL: 9.7566e-04 - val_wingR: 0.0018 - val_forelegL4: 0.0016 - val_forelegR4: 0.0028 - val_midlegL4: 0.0022 - val_midlegR4: 0.0015 - val_hindlegL4: 0.0028 - val_hindlegR4: 0.0028 - val_eyeL: 9.9699e-04 - val_eyeR: 8.3721e-04 - lr: 5.0000e-05 - 3s/epoch - 15ms/step\n",
- "Epoch 31/200\n",
- "200/200 - 3s - loss: 1.7064e-04 - head: 8.7373e-05 - thorax: 1.2365e-04 - abdomen: 1.6765e-04 - wingL: 1.5656e-04 - wingR: 1.4505e-04 - forelegL4: 2.7352e-04 - forelegR4: 2.6274e-04 - midlegL4: 1.9639e-04 - midlegR4: 1.9628e-04 - hindlegL4: 2.0323e-04 - hindlegR4: 1.9917e-04 - eyeL: 1.0639e-04 - eyeR: 1.0032e-04 - val_loss: 0.0011 - val_head: 1.7938e-04 - val_thorax: 3.6727e-04 - val_abdomen: 7.7820e-04 - val_wingL: 6.4437e-04 - val_wingR: 0.0014 - val_forelegL4: 0.0014 - val_forelegR4: 0.0020 - val_midlegL4: 0.0016 - val_midlegR4: 0.0010 - val_hindlegL4: 0.0021 - val_hindlegR4: 0.0016 - val_eyeL: 8.0607e-04 - val_eyeR: 6.6172e-04 - lr: 5.0000e-05 - 3s/epoch - 16ms/step\n",
- "Epoch 32/200\n",
- "\n",
- "Epoch 00032: ReduceLROnPlateau reducing learning rate to 2.499999936844688e-05.\n",
- "200/200 - 4s - loss: 1.6547e-04 - head: 8.6407e-05 - thorax: 1.1578e-04 - abdomen: 1.6160e-04 - wingL: 1.5752e-04 - wingR: 1.4326e-04 - forelegL4: 2.5855e-04 - forelegR4: 2.8317e-04 - midlegL4: 1.7880e-04 - midlegR4: 1.8021e-04 - hindlegL4: 1.9743e-04 - hindlegR4: 1.8831e-04 - eyeL: 1.0074e-04 - eyeR: 9.9381e-05 - val_loss: 0.0012 - val_head: 1.9257e-04 - val_thorax: 3.7361e-04 - val_abdomen: 7.0451e-04 - val_wingL: 7.8240e-04 - val_wingR: 0.0015 - val_forelegL4: 0.0014 - val_forelegR4: 0.0020 - val_midlegL4: 0.0016 - val_midlegR4: 0.0011 - val_hindlegL4: 0.0020 - val_hindlegR4: 0.0019 - val_eyeL: 8.9328e-04 - val_eyeR: 7.3886e-04 - lr: 5.0000e-05 - 4s/epoch - 18ms/step\n",
- "Epoch 33/200\n",
- "200/200 - 3s - loss: 1.4767e-04 - head: 8.0575e-05 - thorax: 1.1097e-04 - abdomen: 1.4927e-04 - wingL: 1.4112e-04 - wingR: 1.3113e-04 - forelegL4: 2.1913e-04 - forelegR4: 2.1998e-04 - midlegL4: 1.6045e-04 - midlegR4: 1.6535e-04 - hindlegL4: 1.8091e-04 - hindlegR4: 1.7343e-04 - eyeL: 9.5387e-05 - eyeR: 9.2035e-05 - val_loss: 0.0014 - val_head: 1.9046e-04 - val_thorax: 4.6921e-04 - val_abdomen: 9.4087e-04 - val_wingL: 7.5647e-04 - val_wingR: 0.0015 - val_forelegL4: 0.0015 - val_forelegR4: 0.0025 - val_midlegL4: 0.0020 - val_midlegR4: 0.0015 - val_hindlegL4: 0.0026 - val_hindlegR4: 0.0021 - val_eyeL: 0.0013 - val_eyeR: 0.0010 - lr: 2.5000e-05 - 3s/epoch - 16ms/step\n",
- "Epoch 34/200\n",
- "200/200 - 3s - loss: 1.4506e-04 - head: 7.9790e-05 - thorax: 1.0771e-04 - abdomen: 1.5052e-04 - wingL: 1.4143e-04 - wingR: 1.2485e-04 - forelegL4: 2.2486e-04 - forelegR4: 2.1619e-04 - midlegL4: 1.6584e-04 - midlegR4: 1.6250e-04 - hindlegL4: 1.6521e-04 - hindlegR4: 1.6717e-04 - eyeL: 9.1550e-05 - eyeR: 8.8112e-05 - val_loss: 0.0013 - val_head: 1.8689e-04 - val_thorax: 3.7203e-04 - val_abdomen: 9.3770e-04 - val_wingL: 7.0190e-04 - val_wingR: 0.0019 - val_forelegL4: 0.0015 - val_forelegR4: 0.0023 - val_midlegL4: 0.0016 - val_midlegR4: 0.0012 - val_hindlegL4: 0.0025 - val_hindlegR4: 0.0022 - val_eyeL: 8.0213e-04 - val_eyeR: 6.5036e-04 - lr: 2.5000e-05 - 3s/epoch - 15ms/step\n",
- "Epoch 35/200\n",
- "200/200 - 3s - loss: 1.3911e-04 - head: 7.9674e-05 - thorax: 1.0668e-04 - abdomen: 1.4330e-04 - wingL: 1.3906e-04 - wingR: 1.2752e-04 - forelegL4: 1.9657e-04 - forelegR4: 1.9577e-04 - midlegL4: 1.5228e-04 - midlegR4: 1.5642e-04 - hindlegL4: 1.6610e-04 - hindlegR4: 1.6394e-04 - eyeL: 9.1523e-05 - eyeR: 8.9620e-05 - val_loss: 0.0013 - val_head: 1.7511e-04 - val_thorax: 4.2162e-04 - val_abdomen: 9.5009e-04 - val_wingL: 6.7908e-04 - val_wingR: 0.0013 - val_forelegL4: 0.0015 - val_forelegR4: 0.0023 - val_midlegL4: 0.0018 - val_midlegR4: 0.0014 - val_hindlegL4: 0.0027 - val_hindlegR4: 0.0019 - val_eyeL: 0.0012 - val_eyeR: 9.8818e-04 - lr: 2.5000e-05 - 3s/epoch - 16ms/step\n",
- "Epoch 36/200\n",
- "200/200 - 3s - loss: 1.3697e-04 - head: 7.5207e-05 - thorax: 1.0507e-04 - abdomen: 1.3913e-04 - wingL: 1.3497e-04 - wingR: 1.2511e-04 - forelegL4: 1.9152e-04 - forelegR4: 2.0264e-04 - midlegL4: 1.5207e-04 - midlegR4: 1.5519e-04 - hindlegL4: 1.6368e-04 - hindlegR4: 1.5869e-04 - eyeL: 9.0233e-05 - eyeR: 8.7055e-05 - val_loss: 0.0013 - val_head: 1.8066e-04 - val_thorax: 4.6591e-04 - val_abdomen: 9.9582e-04 - val_wingL: 7.2600e-04 - val_wingR: 0.0012 - val_forelegL4: 0.0015 - val_forelegR4: 0.0022 - val_midlegL4: 0.0019 - val_midlegR4: 0.0015 - val_hindlegL4: 0.0028 - val_hindlegR4: 0.0018 - val_eyeL: 0.0012 - val_eyeR: 9.6224e-04 - lr: 2.5000e-05 - 3s/epoch - 15ms/step\n",
- "Epoch 37/200\n",
- "200/200 - 3s - loss: 1.3638e-04 - head: 7.6822e-05 - thorax: 1.0531e-04 - abdomen: 1.4107e-04 - wingL: 1.4047e-04 - wingR: 1.2177e-04 - forelegL4: 1.9564e-04 - forelegR4: 1.7970e-04 - midlegL4: 1.5364e-04 - midlegR4: 1.5089e-04 - hindlegL4: 1.6647e-04 - hindlegR4: 1.6322e-04 - eyeL: 9.0198e-05 - eyeR: 8.7722e-05 - val_loss: 0.0017 - val_head: 2.3218e-04 - val_thorax: 5.3881e-04 - val_abdomen: 0.0011 - val_wingL: 0.0010 - val_wingR: 0.0019 - val_forelegL4: 0.0021 - val_forelegR4: 0.0028 - val_midlegL4: 0.0025 - val_midlegR4: 0.0016 - val_hindlegL4: 0.0033 - val_hindlegR4: 0.0029 - val_eyeL: 0.0015 - val_eyeR: 0.0012 - lr: 2.5000e-05 - 3s/epoch - 16ms/step\n",
- "Epoch 00037: early stopping\n",
- "INFO:sleap.nn.training:Finished training loop. [2.0 min]\n",
- "INFO:sleap.nn.training:Deleting visualization directory: models/courtship.topdown_confmaps/viz\n",
- "INFO:sleap.nn.training:Saving evaluation metrics to model folder...\n",
- "\u001b[2KPredicting... \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[35m100%\u001b[0m ETA: \u001b[36m0:00:00\u001b[0m \u001b[31m39.3 FPS\u001b[0m31m48.8 FPS\u001b[0m31m49.5 FPS\u001b[0mFPS\u001b[0m\n",
- "\u001b[?25hINFO:sleap.nn.evals:Saved predictions: models/courtship.topdown_confmaps/labels_pr.train.slp\n",
- "INFO:sleap.nn.evals:Saved metrics: models/courtship.topdown_confmaps/metrics.train.npz\n",
- "INFO:sleap.nn.evals:OKS mAP: 0.899237\n",
- "\u001b[2KPredicting... \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[35m100%\u001b[0m ETA: \u001b[36m0:00:00\u001b[0m \u001b[31m14.2 FPS\u001b[0m0:00:01\u001b[0m \u001b[31m270.2 FPS\u001b[0mm\n",
- "\u001b[?25hINFO:sleap.nn.evals:Saved predictions: models/courtship.topdown_confmaps/labels_pr.val.slp\n",
- "INFO:sleap.nn.evals:Saved metrics: models/courtship.topdown_confmaps/metrics.val.npz\n",
- "INFO:sleap.nn.evals:OKS mAP: 0.691378\n"
- ]
- }
- ],
- "source": [
- "!sleap-train baseline_medium_rf.topdown.json \"dataset/drosophila-melanogaster-courtship/courtship_labels.slp\" --run_name \"courtship.topdown_confmaps\" --video-paths \"dataset/drosophila-melanogaster-courtship/20190128_113421.mp4\""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "whOf8PaFxYbt"
- },
- "source": [
- "The models (along with the profiles and ground truth data used to train and validate the model) are saved in the `models/` directory:"
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 27,
- "metadata": {
- "colab": {
- "base_uri": "https://localhost:8080/",
- "height": 306
+ "cell_type": "code",
+ "execution_count": 36,
+ "metadata": {
+ "colab": {
+ "base_uri": "https://localhost:8080/",
+ "height": 1000
+ },
+ "id": "DUfnkxMtLcK3",
+ "outputId": "a6340ef1-eaac-42ef-f8d4-bcc499feb57b"
+ },
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[31mERROR: Cannot uninstall opencv-python 4.6.0, RECORD file not found. Hint: The package was installed by conda.\u001b[0m\u001b[31m\n",
+ "\u001b[0m\u001b[31mERROR: Cannot uninstall shiboken2 5.15.6, RECORD file not found. You might be able to recover from this via: 'pip install --force-reinstall --no-deps shiboken2==5.15.6'.\u001b[0m\u001b[31m\n",
+ "\u001b[0m"
+ ]
+ }
+ ],
+ "source": [
+ "!pip uninstall -qqq -y opencv-python opencv-contrib-python\n",
+ "!pip install -qqq \"sleap[pypi]>=1.3.4\""
+ ]
},
- "id": "GBUTQ2Cm44En",
- "outputId": "ca298981-af65-43b3-f0f6-573f423acba8"
- },
- "outputs": [
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "\u001b[01;34mmodels/\u001b[00m\n",
- "├── \u001b[01;34mcourtship.centroid\u001b[00m\n",
- "│ ├── best_model.h5\n",
- "│ ├── initial_config.json\n",
- "│ ├── labels_gt.train.slp\n",
- "│ ├── labels_gt.val.slp\n",
- "│ ├── labels_pr.train.slp\n",
- "│ ├── labels_pr.val.slp\n",
- "│ ├── metrics.train.npz\n",
- "│ ├── metrics.val.npz\n",
- "│ ├── training_config.json\n",
- "│ └── training_log.csv\n",
- "└── \u001b[01;34mcourtship.topdown_confmaps\u001b[00m\n",
- " ├── best_model.h5\n",
- " ├── initial_config.json\n",
- " ├── labels_gt.train.slp\n",
- " ├── labels_gt.val.slp\n",
- " ├── labels_pr.train.slp\n",
- " ├── labels_pr.val.slp\n",
- " ├── metrics.train.npz\n",
- " ├── metrics.val.npz\n",
- " ├── training_config.json\n",
- " └── training_log.csv\n",
- "\n",
- "2 directories, 20 files\n"
- ]
- }
- ],
- "source": [
- "!tree models/"
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "nIsKUX661xFK"
- },
- "source": [
- "## Inference\n",
- "Let's run inference with our trained models for centroids and centered instances."
- ]
- },
- {
- "cell_type": "code",
- "execution_count": 28,
- "metadata": {
- "id": "CLtjtq9E1Znr"
- },
- "outputs": [
+ "cell_type": "markdown",
+ "metadata": {
+ "id": "iq7jrgUksLtR"
+ },
+ "source": [
+ "## Download sample training data into Colab\n",
+ "Let's download a sample dataset from the SLEAP [sample datasets repository](https://github.com/talmolab/sleap-datasets) into Colab."
+ ]
+ },
{
- "name": "stdout",
- "output_type": "stream",
- "text": [
- "Started inference at: 2023-09-01 13:42:03.066840\n",
- "Args:\n",
- "\u001b[1m{\u001b[0m\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'data_path'\u001b[0m: \u001b[32m'dataset/drosophila-melanogaster-courtship/20190128_113421.mp4'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'models'\u001b[0m: \u001b[1m[\u001b[0m\n",
- "\u001b[2;32m│ │ \u001b[0m\u001b[32m'models/courtship.centroid'\u001b[0m,\n",
- "\u001b[2;32m│ │ \u001b[0m\u001b[32m'models/courtship.topdown_confmaps'\u001b[0m\n",
- "\u001b[2;32m│ \u001b[0m\u001b[1m]\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'frames'\u001b[0m: \u001b[32m'0-100'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'only_labeled_frames'\u001b[0m: \u001b[3;91mFalse\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'only_suggested_frames'\u001b[0m: \u001b[3;91mFalse\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'output'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'no_empty_frames'\u001b[0m: \u001b[3;91mFalse\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'verbosity'\u001b[0m: \u001b[32m'rich'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'video.dataset'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'video.input_format'\u001b[0m: \u001b[32m'channels_last'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'video.index'\u001b[0m: \u001b[32m''\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'cpu'\u001b[0m: \u001b[3;91mFalse\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'first_gpu'\u001b[0m: \u001b[3;91mFalse\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'last_gpu'\u001b[0m: \u001b[3;91mFalse\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'gpu'\u001b[0m: \u001b[32m'auto'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'max_edge_length_ratio'\u001b[0m: \u001b[1;36m0.25\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'dist_penalty_weight'\u001b[0m: \u001b[1;36m1.0\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'batch_size'\u001b[0m: \u001b[1;36m4\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'open_in_gui'\u001b[0m: \u001b[3;91mFalse\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'peak_threshold'\u001b[0m: \u001b[1;36m0.2\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'max_instances'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.tracker'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.target_instance_count'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.pre_cull_to_target'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.pre_cull_iou_threshold'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.post_connect_single_breaks'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.clean_instance_count'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.clean_iou_threshold'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.similarity'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.match'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.robust'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.track_window'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.min_new_track_points'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.min_match_points'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.img_scale'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.of_window_size'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.of_max_levels'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.save_shifted_instances'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.kf_node_indices'\u001b[0m: \u001b[3;35mNone\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'tracking.kf_init_frame_count'\u001b[0m: \u001b[3;35mNone\u001b[0m\n",
- "\u001b[1m}\u001b[0m\n",
- "\n",
- "2023-09-01 13:42:03.098811: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:42:03.103255: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:42:03.103982: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "INFO:sleap.nn.inference:Auto-selected GPU 0 with 23050 MiB of free memory.\n",
- "Versions:\n",
- "SLEAP: 1.3.2\n",
- "TensorFlow: 2.7.0\n",
- "Numpy: 1.21.5\n",
- "Python: 3.7.12\n",
- "OS: Linux-5.15.0-78-generic-x86_64-with-debian-bookworm-sid\n",
- "\n",
- "System:\n",
- "GPUs: 1/1 available\n",
- " Device: /physical_device:GPU:0\n",
- " Available: True\n",
- " Initalized: False\n",
- " Memory growth: True\n",
- "\n",
- "Video: dataset/drosophila-melanogaster-courtship/20190128_113421.mp4\n",
- "2023-09-01 13:42:03.157392: I tensorflow/core/platform/cpu_feature_guard.cc:151] This TensorFlow binary is optimized with oneAPI Deep Neural Network Library (oneDNN) to use the following CPU instructions in performance-critical operations: AVX2 AVX512F FMA\n",
- "To enable them in other operations, rebuild TensorFlow with the appropriate compiler flags.\n",
- "2023-09-01 13:42:03.158019: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:42:03.158864: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:42:03.159656: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:42:03.455402: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:42:03.456138: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:42:03.456803: I tensorflow/stream_executor/cuda/cuda_gpu_executor.cc:939] successful NUMA node read from SysFS had negative value (-1), but there must be at least one NUMA node, so returning NUMA node zero\n",
- "2023-09-01 13:42:03.457464: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1525] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 21145 MB memory: -> device: 0, name: NVIDIA RTX A5000, pci bus id: 0000:01:00.0, compute capability: 8.6\n",
- "\u001b[2KPredicting... \u001b[38;5;237m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[35m 0%\u001b[0m ETA: \u001b[36m-:--:--\u001b[0m \u001b[31m?\u001b[0m2023-09-01 13:42:07.038687: I tensorflow/stream_executor/cuda/cuda_dnn.cc:366] Loaded cuDNN version 8201\n",
- "\u001b[2KPredicting... \u001b[38;2;114;156;31m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[35m100%\u001b[0m ETA: \u001b[36m0:00:00\u001b[0m \u001b[31m51.9 FPS\u001b[0m[0m \u001b[31m126.4 FPS\u001b[0m FPS\u001b[0mFPS\u001b[0m\n",
- "\u001b[?25hFinished inference at: 2023-09-01 13:42:10.842469\n",
- "Total runtime: 7.775644779205322 secs\n",
- "Predicted frames: 101/101\n",
- "Provenance:\n",
- "\u001b[1m{\u001b[0m\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'model_paths'\u001b[0m: \u001b[1m[\u001b[0m\n",
- "\u001b[2;32m│ │ \u001b[0m\u001b[32m'models/courtship.centroid/training_config.json'\u001b[0m,\n",
- "\u001b[2;32m│ │ \u001b[0m\u001b[32m'models/courtship.topdown_confmaps/training_config.json'\u001b[0m\n",
- "\u001b[2;32m│ \u001b[0m\u001b[1m]\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'predictor'\u001b[0m: \u001b[32m'TopDownPredictor'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'sleap_version'\u001b[0m: \u001b[32m'1.3.2'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'platform'\u001b[0m: \u001b[32m'Linux-5.15.0-78-generic-x86_64-with-debian-bookworm-sid'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'command'\u001b[0m: \u001b[32m'/home/talmolab/micromamba/envs/s0/bin/sleap-track dataset/drosophila-melanogaster-courtship/20190128_113421.mp4 --frames 0-100 -m models/courtship.centroid -m models/courtship.topdown_confmaps'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'data_path'\u001b[0m: \u001b[32m'dataset/drosophila-melanogaster-courtship/20190128_113421.mp4'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'output_path'\u001b[0m: \u001b[32m'dataset/drosophila-melanogaster-courtship/20190128_113421.mp4.predictions.slp'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'total_elapsed'\u001b[0m: \u001b[1;36m7.775644779205322\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'start_timestamp'\u001b[0m: \u001b[32m'2023-09-01 13:42:03.066840'\u001b[0m,\n",
- "\u001b[2;32m│ \u001b[0m\u001b[32m'finish_timestamp'\u001b[0m: \u001b[32m'2023-09-01 13:42:10.842469'\u001b[0m\n",
- "\u001b[1m}\u001b[0m\n",
- "\n",
- "Saved output: dataset/drosophila-melanogaster-courtship/20190128_113421.mp4.predictions.slp\n"
- ]
- }
- ],
- "source": [
- "!sleap-track \"dataset/drosophila-melanogaster-courtship/20190128_113421.mp4\" --frames 0-100 -m \"models/courtship.centroid\" -m \"models/courtship.topdown_confmaps\""
- ]
- },
- {
- "cell_type": "markdown",
- "metadata": {
- "id": "nzObCUToEqwA"
- },
- "source": [
- "When inference is finished, predictions are saved in a file. Since we didn't specify a path, it will be saved as `
-
-sleap.info.align.align_instance_points(source_points_array, target_points_array)[source]
+sleap.info.align.align_instance_points(source_points_array, target_points_array)[source]
Transforms source for best fit on to target.
-
-sleap.info.align.align_instances(all_points_arrays: numpy.ndarray, node_a: int, node_b: int, rotate_on_node_a: bool = False) → numpy.ndarray[source]
+sleap.info.align.align_instances(all_points_arrays: numpy.ndarray, node_a: int, node_b: int, rotate_on_node_a: bool = False) → numpy.ndarray[source]
Rotates every instance so that line from node_a to node_b aligns.
-
-sleap.info.align.align_instances_on_most_stable(all_points_arrays: numpy.ndarray, min_stable_dist: float = 4.0) → numpy.ndarray[source]
+sleap.info.align.align_instances_on_most_stable(all_points_arrays: numpy.ndarray, min_stable_dist: float = 4.0) → numpy.ndarray[source]
Gets most stable pair of nodes and aligned instances along these nodes.
-
-sleap.info.align.get_instances_points(instances: List[sleap.instance.Instance]) → numpy.ndarray[source]
+sleap.info.align.get_instances_points(instances: List[sleap.instance.Instance]) → numpy.ndarray[source]
Returns single (instance, node, 2) matrix with points for all instances.
-
-sleap.info.align.get_mean_and_std_for_points(aligned_points_arrays: numpy.ndarray) → Tuple[numpy.ndarray, numpy.ndarray][source]
+sleap.info.align.get_mean_and_std_for_points(aligned_points_arrays: numpy.ndarray) → Tuple[numpy.ndarray, numpy.ndarray][source]
Returns mean and standard deviation for every node given aligned points.
-
-sleap.info.align.get_most_stable_node_pair(all_points_arrays: numpy.ndarray, min_dist: float = 0.0) → Tuple[int, int][source]
+sleap.info.align.get_most_stable_node_pair(all_points_arrays: numpy.ndarray, min_dist: float = 0.0) → Tuple[int, int][source]
Returns pair of nodes which are at stable distance (over min threshold).
-
-sleap.info.align.get_stable_node_pairs(all_points_arrays: numpy.ndarray, node_names, min_dist: float = 0.0)[source]
+sleap.info.align.get_stable_node_pairs(all_points_arrays: numpy.ndarray, node_names, min_dist: float = 0.0)[source]
Returns sorted list of node pairs with mean and standard dev distance.
-
-sleap.info.align.get_template_points_array(instances: List[sleap.instance.Instance]) → numpy.ndarray[source]
+sleap.info.align.get_template_points_array(instances: List[sleap.instance.Instance]) → numpy.ndarray[source]
Returns mean of aligned points for instances.
diff --git a/develop/api/sleap.info.feature_suggestions.html b/develop/api/sleap.info.feature_suggestions.html
index e2f42130d..b731417c4 100644
--- a/develop/api/sleap.info.feature_suggestions.html
+++ b/develop/api/sleap.info.feature_suggestions.html
@@ -9,7 +9,7 @@
- sleap.info.feature_suggestions — SLEAP (v1.4.1a2)
+ sleap.info.feature_suggestions — SLEAP (v1.3.4)
@@ -322,7 +322,7 @@ sleap.info.feature_suggestions
Module for generating lists of frames using frame features, pca, kmeans, etc.
-
-class sleap.info.feature_suggestions.FrameGroupSet(method: str, item_group: Dict[sleap.info.feature_suggestions.FrameItem, int] = NOTHING, group_data: Dict[int, dict] = NOTHING, groupset_data: Dict = NOTHING)[source]
+class sleap.info.feature_suggestions.FrameGroupSet(method: str, item_group: Dict[sleap.info.feature_suggestions.FrameItem, int] = NOTHING, group_data: Dict[int, dict] = NOTHING, groupset_data: Dict = NOTHING)[source]
Class for a set of groups of FrameItem objects.
Each item can have at most one group; each group is represented as an int.
@@ -378,19 +378,19 @@ sleap.info.feature_suggestions
-
-append_to_group(group: int, item: sleap.info.feature_suggestions.FrameItem)[source]
+append_to_group(group: int, item: sleap.info.feature_suggestions.FrameItem)[source]
Adds item to group.
-
-extend_group_items(group: int, item_list: List[sleap.info.feature_suggestions.FrameItem])[source]
+extend_group_items(group: int, item_list: List[sleap.info.feature_suggestions.FrameItem])[source]
Adds all items in list to group.
-
-get_item_group(item: sleap.info.feature_suggestions.FrameItem)[source]
+get_item_group(item: sleap.info.feature_suggestions.FrameItem)[source]
Returns group that contain item.
@@ -402,7 +402,7 @@ sleap.info.feature_suggestions
-
-sample(per_group: int, unique_samples: bool = True)[source]
+sample(per_group: int, unique_samples: bool = True)[source]
Returns new FrameGroupSet with groups sampled from current groups.
Note that the order of items in the new groups will not match order of
items in the groups from which samples are drawn.
@@ -424,13 +424,13 @@ sleap.info.feature_suggestions
-
-class sleap.info.feature_suggestions.FrameItem(video: sleap.io.video.Video, frame_idx: int)[source]
+class sleap.info.feature_suggestions.FrameItem(video: sleap.io.video.Video, frame_idx: int)[source]
Just a simple wrapper for (video, frame_idx), plus method to get image.
-
-class sleap.info.feature_suggestions.ItemStack(items: List = NOTHING, data: Optional[numpy.ndarray] = None, ownership: Optional[List[tuple]] = None, meta: List = NOTHING, group_sets: List[sleap.info.feature_suggestions.FrameGroupSet] = NOTHING)[source]
+class sleap.info.feature_suggestions.ItemStack(items: List = NOTHING, data: Optional[numpy.ndarray] = None, ownership: Optional[List[tuple]] = None, meta: List = NOTHING, group_sets: List[sleap.info.feature_suggestions.FrameGroupSet] = NOTHING)[source]
Container for items, each item can “own” one or more rows of data.
-
@@ -490,7 +490,7 @@
sleap.info.feature_suggestions
-
-brisk_bag_of_features(brisk_threshold=40, vocab_size=20)[source]
+brisk_bag_of_features(brisk_threshold=40, vocab_size=20)[source]
Transform data using bag of features based on brisk features.
@@ -502,67 +502,67 @@ sleap.info.feature_suggestions
-
-extend_ownership(ownership, row_count)[source]
+extend_ownership(ownership, row_count)[source]
Extends an ownership list with number of rows owned by next item.
-
-flatten()[source]
+flatten()[source]
Flattens each row of data to 1-d array.
-
-get_all_items_from_group()[source]
+get_all_items_from_group()[source]
Sets items for Stack to all items from current GroupSet.
-
-get_item_data(item)[source]
+get_item_data(item)[source]
Returns rows of data which belong to item.
-
-get_item_data_idxs(item)[source]
+get_item_data_idxs(item)[source]
Returns indexes of rows in data which belong to item.
-
-get_raw_images(scale=0.5)[source]
+get_raw_images(scale=0.5)[source]
Sets data to raw image for each FrameItem.
-
-hog_bag_of_features(brisk_threshold=40, vocab_size=20)[source]
+hog_bag_of_features(brisk_threshold=40, vocab_size=20)[source]
Transforms data into bag of features vector of hog descriptors.
-
-kmeans(n_clusters: int)[source]
+kmeans(n_clusters: int)[source]
Adds GroupSet using k-means clustering on data.
-
-make_sample_group(videos: List[sleap.io.video.Video], samples_per_video: int, sample_method: str = 'stride')[source]
+make_sample_group(videos: List[sleap.io.video.Video], samples_per_video: int, sample_method: str = 'stride')[source]
Adds GroupSet by sampling frames from each video.
-
-pca(n_components: int)[source]
+pca(n_components: int)[source]
Transforms data by applying PCA.
-
-sample_groups(samples_per_group: int)[source]
+sample_groups(samples_per_group: int)[source]
Adds GroupSet by sampling items from current GroupSet.
@@ -570,7 +570,7 @@ sleap.info.feature_suggestions
-
-class sleap.info.feature_suggestions.ParallelFeaturePipeline(pipeline: sleap.info.feature_suggestions.FeatureSuggestionPipeline, videos_as_dicts: List[Dict])[source]
+class sleap.info.feature_suggestions.ParallelFeaturePipeline(pipeline: sleap.info.feature_suggestions.FeatureSuggestionPipeline, videos_as_dicts: List[Dict])[source]
Enables easy per-video pipeline parallelization for feature suggestions.
Create a FeatureSuggestionPipeline
with the desired parameters, and
then call ParallelFeaturePipeline.run()
with the pipeline and the list
@@ -579,25 +579,25 @@
sleap.info.feature_suggestions
the results back into a single list of SuggestionFrame
objects.
-
-get(video_idx)[source]
+get(video_idx)[source]
Apply pipeline to single video by idx. Can be called in process.
-
-classmethod make(pipeline, videos)[source]
+classmethod make(pipeline, videos)[source]
Make class object from pipeline and list of videos.
-
-classmethod run(pipeline, videos, parallel=True)[source]
+classmethod run(pipeline, videos, parallel=True)[source]
Runs pipeline on all videos in parallel and returns suggestions.
-
-classmethod tuples_to_suggestions(tuples, videos)[source]
+classmethod tuples_to_suggestions(tuples, videos)[source]
Converts serialized data from processes back into SuggestionFrames.
diff --git a/develop/api/sleap.info.labels.html b/develop/api/sleap.info.labels.html
index d6c715684..ec4701338 100644
--- a/develop/api/sleap.info.labels.html
+++ b/develop/api/sleap.info.labels.html
@@ -9,7 +9,7 @@
- sleap.info.labels — SLEAP (v1.4.1a2)
+ sleap.info.labels — SLEAP (v1.3.4)
diff --git a/develop/api/sleap.info.metrics.html b/develop/api/sleap.info.metrics.html
index 579c1f2f1..e87beee04 100644
--- a/develop/api/sleap.info.metrics.html
+++ b/develop/api/sleap.info.metrics.html
@@ -9,7 +9,7 @@
- sleap.info.metrics — SLEAP (v1.4.1a2)
+ sleap.info.metrics — SLEAP (v1.3.4)
@@ -322,40 +322,40 @@ sleap.info.metrics
Module for producing prediction metrics for SLEAP datasets.
-
-sleap.info.metrics.calculate_pairwise_cost(instances_a: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], instances_b: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], cost_function: Callable) → numpy.ndarray[source]
+sleap.info.metrics.calculate_pairwise_cost(instances_a: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], instances_b: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], cost_function: Callable) → numpy.ndarray[source]
Calculate (a * b) matrix of pairwise costs using cost function.
-
-sleap.info.metrics.compare_instance_lists(instances_a: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], instances_b: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]]) → numpy.ndarray[source]
+sleap.info.metrics.compare_instance_lists(instances_a: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], instances_b: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]]) → numpy.ndarray[source]
Given two lists of corresponding Instances, returns
(instances * nodes) matrix of distances between corresponding nodes.
-
-sleap.info.metrics.list_points_array(instances: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]]) → numpy.ndarray[source]
+sleap.info.metrics.list_points_array(instances: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]]) → numpy.ndarray[source]
Given list of Instances, returns (instances * nodes * 2) matrix.
-
-sleap.info.metrics.match_instance_lists(instances_a: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], instances_b: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], cost_function: Callable) → Tuple[List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]]][source]
+sleap.info.metrics.match_instance_lists(instances_a: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], instances_b: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], cost_function: Callable) → Tuple[List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]]][source]
Sorts two lists of Instances to find best overall correspondence
for a given cost function (e.g., total distance between points).
-
-sleap.info.metrics.match_instance_lists_nodewise(instances_a: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], instances_b: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], thresh: float = 5) → Tuple[List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]]][source]
+sleap.info.metrics.match_instance_lists_nodewise(instances_a: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], instances_b: List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], thresh: float = 5) → Tuple[List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]], List[Union[sleap.instance.Instance, sleap.instance.PredictedInstance]]][source]
For each node for each instance in the first list, pairs it with the
closest corresponding node from any instance in the second list.
-
-sleap.info.metrics.matched_instance_distances(labels_gt: sleap.io.dataset.Labels, labels_pr: sleap.io.dataset.Labels, match_lists_function: typing.Callable = <function match_instance_lists_nodewise>, frame_range: typing.Optional[range] = None) → Tuple[List[int], numpy.ndarray, numpy.ndarray, numpy.ndarray][source]
+sleap.info.metrics.matched_instance_distances(labels_gt: sleap.io.dataset.Labels, labels_pr: sleap.io.dataset.Labels, match_lists_function: Callable, frame_range: Optional[range] = None) → Tuple[List[int], numpy.ndarray, numpy.ndarray, numpy.ndarray][source]
Distances between ground truth and predicted nodes over a set of frames.
- Parameters
@@ -386,26 +386,26 @@ sleap.info.metrics
-
-sleap.info.metrics.nodeless_point_dist(inst_a: Union[sleap.instance.Instance, sleap.instance.PredictedInstance], inst_b: Union[sleap.instance.Instance, sleap.instance.PredictedInstance]) → numpy.ndarray[source]
+sleap.info.metrics.nodeless_point_dist(inst_a: Union[sleap.instance.Instance, sleap.instance.PredictedInstance], inst_b: Union[sleap.instance.Instance, sleap.instance.PredictedInstance]) → numpy.ndarray[source]
Given two instances, returns array of distances for closest points
ignoring node identities.
-
-sleap.info.metrics.point_dist(inst_a: Union[sleap.instance.Instance, sleap.instance.PredictedInstance], inst_b: Union[sleap.instance.Instance, sleap.instance.PredictedInstance]) → numpy.ndarray[source]
+sleap.info.metrics.point_dist(inst_a: Union[sleap.instance.Instance, sleap.instance.PredictedInstance], inst_b: Union[sleap.instance.Instance, sleap.instance.PredictedInstance]) → numpy.ndarray[source]
Given two instances, returns array of distances for corresponding nodes.
-
-sleap.info.metrics.point_match_count(dist_array: numpy.ndarray, thresh: float = 5) → int[source]
+sleap.info.metrics.point_match_count(dist_array: numpy.ndarray, thresh: float = 5) → int[source]
Given an array of distances, returns number which are <= threshold.
-
-sleap.info.metrics.point_nonmatch_count(dist_array: numpy.ndarray, thresh: float = 5) → int[source]
+sleap.info.metrics.point_nonmatch_count(dist_array: numpy.ndarray, thresh: float = 5) → int[source]
Given an array of distances, returns number which are not <= threshold.
diff --git a/develop/api/sleap.info.summary.html b/develop/api/sleap.info.summary.html
index b51ab9283..b0b946870 100644
--- a/develop/api/sleap.info.summary.html
+++ b/develop/api/sleap.info.summary.html
@@ -9,7 +9,7 @@
- sleap.info.summary — SLEAP (v1.4.1a2)
+ sleap.info.summary — SLEAP (v1.3.4)
@@ -323,7 +323,7 @@ sleap.info.summary
data for each frame of some labeled video.
-
-class sleap.info.summary.StatisticSeries(labels: sleap.io.dataset.Labels)[source]
+class sleap.info.summary.StatisticSeries(labels: sleap.io.dataset.Labels)[source]
Class to calculate various statistical series for labeled frames.
Each method returns a series which is a dictionary in which keys
are frame index and value are some numerical value for the frame.
@@ -334,7 +334,7 @@ sleap.info.summary
-
-get_instance_score_series(video, reduction='sum') → Dict[int, float][source]
+get_instance_score_series(video, reduction='sum') → Dict[int, float][source]
Get series with statistic of instance scores in each frame.
- Parameters
@@ -353,13 +353,13 @@ sleap.info.summary
-
-get_point_count_series(video: sleap.io.video.Video) → Dict[int, float][source]
+get_point_count_series(video: sleap.io.video.Video) → Dict[int, float][source]
Get series with total number of labeled points in each frame.
-
-get_point_displacement_series(video, reduction='sum') → Dict[int, float][source]
+get_point_displacement_series(video, reduction='sum') → Dict[int, float][source]
Get series with statistic of point displacement in each frame.
Point displacement is the distance between the point location in
frame and the location of the corresponding point (same node,
@@ -382,7 +382,7 @@
sleap.info.summary
-
-get_point_score_series(video: sleap.io.video.Video, reduction: str = 'sum') → Dict[int, float][source]
+get_point_score_series(video: sleap.io.video.Video, reduction: str = 'sum') → Dict[int, float][source]
Get series with statistic of point scores in each frame.
- Parameters
@@ -401,7 +401,7 @@ sleap.info.summary
-
-get_primary_point_displacement_series(video, reduction='sum', primary_node=None)[source]
+get_primary_point_displacement_series(video, reduction='sum', primary_node=None)[source]
Get sum of displacement for single node of each instance per frame.
- Parameters
diff --git a/develop/api/sleap.info.trackcleaner.html b/develop/api/sleap.info.trackcleaner.html
index 109ae6a7e..d904c5c98 100644
--- a/develop/api/sleap.info.trackcleaner.html
+++ b/develop/api/sleap.info.trackcleaner.html
@@ -9,7 +9,7 @@
- sleap.info.trackcleaner — SLEAP (v1.4.1a2)
+ sleap.info.trackcleaner — SLEAP (v1.3.4)
@@ -328,7 +328,7 @@ sleap.info.trackcleaner
it will be better to use the sleap-track
CLI.
-
-sleap.info.trackcleaner.fit_tracks(filename: str, instance_count: int)[source]
+sleap.info.trackcleaner.fit_tracks(filename: str, instance_count: int)[source]
Wraps TrackCleaner
for easier cli api.
diff --git a/develop/api/sleap.info.write_tracking_h5.html b/develop/api/sleap.info.write_tracking_h5.html
index 962ac1075..41c1bfcb9 100644
--- a/develop/api/sleap.info.write_tracking_h5.html
+++ b/develop/api/sleap.info.write_tracking_h5.html
@@ -9,7 +9,7 @@
- sleap.info.write_tracking_h5 — SLEAP (v1.4.1a2)
+ sleap.info.write_tracking_h5 — SLEAP (v1.3.4)
@@ -343,19 +343,19 @@ sleap.info.write_tracking_h5
Note: the datasets are stored column-major as expected by MATLAB.
-
-sleap.info.write_tracking_h5.get_edges_as_np_strings(labels: sleap.io.dataset.Labels) → List[Tuple[numpy.bytes_, numpy.bytes_]][source]
+sleap.info.write_tracking_h5.get_edges_as_np_strings(labels: sleap.io.dataset.Labels) → List[Tuple[numpy.bytes_, numpy.bytes_]][source]
Get list of edge names as np.string_
.
-
-sleap.info.write_tracking_h5.get_nodes_as_np_strings(labels: sleap.io.dataset.Labels) → List[numpy.bytes_][source]
+sleap.info.write_tracking_h5.get_nodes_as_np_strings(labels: sleap.io.dataset.Labels) → List[numpy.bytes_][source]
Get list of node names as np.string_
.
-
-sleap.info.write_tracking_h5.get_occupancy_and_points_matrices(labels: sleap.io.dataset.Labels, all_frames: bool, video: Optional[sleap.io.video.Video] = None) → Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray][source]
+sleap.info.write_tracking_h5.get_occupancy_and_points_matrices(labels: sleap.io.dataset.Labels, all_frames: bool, video: Optional[sleap.io.video.Video] = None) → Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray][source]
Builds numpy matrices with track occupancy and point location data.
Note: This function assumes either all instances have tracks or no instances have
tracks.
@@ -390,13 +390,13 @@ sleap.info.write_tracking_h5
-
-sleap.info.write_tracking_h5.get_tracks_as_np_strings(labels: sleap.io.dataset.Labels) → List[numpy.bytes_][source]
+sleap.info.write_tracking_h5.get_tracks_as_np_strings(labels: sleap.io.dataset.Labels) → List[numpy.bytes_][source]
Get list of track names as np.string_
.
-
-sleap.info.write_tracking_h5.main(labels: sleap.io.dataset.Labels, output_path: str, labels_path: Optional[str] = None, all_frames: bool = True, video: Optional[sleap.io.video.Video] = None, csv: bool = False)[source]
+sleap.info.write_tracking_h5.main(labels: sleap.io.dataset.Labels, output_path: str, labels_path: Optional[str] = None, all_frames: bool = True, video: Optional[sleap.io.video.Video] = None, csv: bool = False)[source]
Writes HDF5 file with matrices of track occupancy and coordinates.
- Parameters
@@ -422,7 +422,7 @@ sleap.info.write_tracking_h5
-
-sleap.info.write_tracking_h5.remove_empty_tracks_from_matrices(track_names: List, occupancy_matrix: numpy.ndarray, locations_matrix: numpy.ndarray, point_scores: numpy.ndarray, instance_scores: numpy.ndarray, tracking_scores: numpy.ndarray) → Tuple[List, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray][source]
+sleap.info.write_tracking_h5.remove_empty_tracks_from_matrices(track_names: List, occupancy_matrix: numpy.ndarray, locations_matrix: numpy.ndarray, point_scores: numpy.ndarray, instance_scores: numpy.ndarray, tracking_scores: numpy.ndarray) → Tuple[List, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray, numpy.ndarray][source]
Removes matrix rows/columns for unoccupied tracks.
- Parameters
@@ -444,7 +444,7 @@ sleap.info.write_tracking_h5
-
-sleap.info.write_tracking_h5.write_csv_file(output_path, data_dict)[source]
+sleap.info.write_tracking_h5.write_csv_file(output_path, data_dict)[source]
Write CSV file with data from given dictionary.
- Parameters
@@ -462,7 +462,7 @@ sleap.info.write_tracking_h5
-
-sleap.info.write_tracking_h5.write_occupancy_file(output_path: str, data_dict: Dict[str, Any], transpose: bool = True)[source]
+sleap.info.write_tracking_h5.write_occupancy_file(output_path: str, data_dict: Dict[str, Any], transpose: bool = True)[source]
Write HDF5 file with data from given dictionary.
- Parameters
diff --git a/develop/api/sleap.instance.html b/develop/api/sleap.instance.html
index 81b3fe62b..b5094015f 100644
--- a/develop/api/sleap.instance.html
+++ b/develop/api/sleap.instance.html
@@ -9,7 +9,7 @@
- sleap.instance — SLEAP (v1.4.1a2)
+ sleap.instance — SLEAP (v1.3.4)
@@ -334,7 +334,7 @@ sleap.instance
-
-class sleap.instance.Instance(skeleton: sleap.skeleton.Skeleton, track: sleap.instance.Track = None, from_predicted: Optional[PredictedInstance] = None, points: sleap.instance.PointArray = None, nodes: List = None, frame: Optional[LabeledFrame] = None)[source]
+class sleap.instance.Instance(skeleton: sleap.skeleton.Skeleton, track: sleap.instance.Track = None, from_predicted: Optional[PredictedInstance] = None, points: sleap.instance.PointArray = None, nodes: List = None, frame: Optional[LabeledFrame] = None)[source]
This class represents a labeled instance.
- Parameters
@@ -372,7 +372,7 @@ sleap.instance
-
-fill_missing(max_x: Optional[float] = None, max_y: Optional[float] = None)[source]
+fill_missing(max_x: Optional[float] = None, max_y: Optional[float] = None)[source]
Add points for skeleton nodes that are missing in the instance.
This is useful when modifying the skeleton so the nodes appears in the GUI.
@@ -393,7 +393,7 @@ sleap.instance
-
-classmethod from_numpy(points: numpy.ndarray, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.Instance[source]
+classmethod from_numpy(points: numpy.ndarray, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.Instance[source]
Create an instance from a numpy array.
- Parameters
@@ -418,7 +418,7 @@ sleap.instance
-
-classmethod from_pointsarray(points: numpy.ndarray, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.Instance[source]
+classmethod from_pointsarray(points: numpy.ndarray, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.Instance[source]
Create an instance from an array of points.
- Parameters
@@ -439,7 +439,7 @@ sleap.instance
-
-get_points_array(copy: bool = True, invisible_as_nan: bool = False, full: bool = False) → Union[numpy.ndarray, numpy.recarray][source]
+get_points_array(copy: bool = True, invisible_as_nan: bool = False, full: bool = False) → Union[numpy.ndarray, numpy.recarray][source]
Return the instance’s points in array form.
- Parameters
@@ -467,7 +467,7 @@ sleap.instance
-
-matches(other: sleap.instance.Instance) → bool[source]
+matches(other: sleap.instance.Instance) → bool[source]
Whether two instances match by value.
Checks the types, points, track, and frame index.
@@ -506,7 +506,7 @@ sleap.instance
-
-numpy() → numpy.ndarray[source]
+numpy() → numpy.ndarray[source]
Return the instance node coordinates as a numpy array.
Alias for points_array
.
@@ -539,7 +539,7 @@ sleap.instance
-
-transform_points(transformation_matrix)[source]
+transform_points(transformation_matrix)[source]
Apply affine transformation matrix to points in the instance.
- Parameters
@@ -557,79 +557,9 @@ sleap.instance
-
--
-class sleap.instance.InstancesList(*args, labeled_frame: Optional[sleap.instance.LabeledFrame] = None)[source]
-A list of Instance`s associated with a `LabeledFrame
.
-This class should only be used for the LabeledFrame.instances
attribute.
-
--
-append(instance: Union[sleap.instance.Instance, sleap.instance.PredictedInstance])[source]
-Append an Instance
or PredictedInstance
to the list, setting the frame.
-
-- Parameters
-item – The Instance
or PredictedInstance
to append to the list.
-
-
-
-
-
--
-clear() → None[source]
-Remove all instances from list, setting instance.frame to None.
-
-
-
--
-copy() → list[source]
-Return a shallow copy of the list of instances as a list.
-Note: This will not return an InstancesList
object, but a normal list.
-
-
-
--
-extend(instances: List[Union[sleap.instance.PredictedInstance, sleap.instance.Instance]])[source]
-Extend the list with a list of `Instance`s or `PredictedInstance`s.
-
-- Parameters
-instances – A list of Instance
or PredictedInstance
objects to add to the
-list.
-
-- Returns
-None
-
-
-
-
-
--
-insert(index: int, instance: Union[sleap.instance.Instance, sleap.instance.PredictedInstance]) → None[source]
-Insert object before index.
-
-
-
--
-property labeled_frame: sleap.instance.LabeledFrame
-Return the LabeledFrame
associated with this list of instances.
-
-
-
--
-pop(index: int) → Union[sleap.instance.Instance, sleap.instance.PredictedInstance][source]
-Remove and return instance at index, setting instance.frame to None.
-
-
-
--
-remove(instance: Union[sleap.instance.Instance, sleap.instance.PredictedInstance]) → None[source]
-Remove instance from list, setting instance.frame to None.
-
-
-
-
-
-class sleap.instance.LabeledFrame(video: sleap.io.video.Video, frame_idx, instances: sleap.instance.InstancesList = NOTHING)[source]
+class sleap.instance.LabeledFrame(video: sleap.io.video.Video, frame_idx, instances: Union[List[sleap.instance.Instance], List[sleap.instance.PredictedInstance]] = NOTHING)[source]
Holds labeled data for a single frame of a video.
- Parameters
@@ -642,7 +572,7 @@ sleap.instance
-
-classmethod complex_frame_merge(base_frame: sleap.instance.LabeledFrame, new_frame: sleap.instance.LabeledFrame) → Tuple[List[sleap.instance.Instance], List[sleap.instance.Instance], List[sleap.instance.Instance]][source]
+classmethod complex_frame_merge(base_frame: sleap.instance.LabeledFrame, new_frame: sleap.instance.LabeledFrame) → Tuple[List[sleap.instance.Instance], List[sleap.instance.Instance], List[sleap.instance.Instance]][source]
Merge two frames, return conflicts if any.
A conflict occurs when
* each frame has Instances which don’t perfectly match those
@@ -676,7 +606,7 @@ sleap.instance
-
-classmethod complex_merge_between(base_labels: Labels, new_frames: List[LabeledFrame]) → Tuple[Dict[sleap.io.video.Video, Dict[int, List[sleap.instance.Instance]]], List[sleap.instance.Instance], List[sleap.instance.Instance]][source]
+classmethod complex_merge_between(base_labels: Labels, new_frames: List[LabeledFrame]) → Tuple[Dict[sleap.io.video.Video, Dict[int, List[sleap.instance.Instance]]], List[sleap.instance.Instance], List[sleap.instance.Instance]][source]
Merge data from new frames into a Labels
object.
Everything that can be merged cleanly is merged, any conflicts
are returned.
@@ -692,7 +622,7 @@ sleap.instance
- Dictionary, keys are
Video
, values are dictionary in which keys are frame index (int)
-and value is list of :class:`Instance`s
+and value is list of :class:`Instance`s
@@ -709,13 +639,13 @@ sleap.instance
-
-find(track: Optional[Union[sleap.instance.Track, int]] = - 1, user: bool = False) → List[sleap.instance.Instance][source]
+find(track: Optional[Union[sleap.instance.Track, int]] = - 1, user: bool = False) → List[sleap.instance.Instance][source]
Retrieve instances (if any) matching specifications.
- Parameters
-
@@ -752,13 +682,13 @@
sleap.instance
-
-index(value: sleap.instance.Instance) → int[source]
+index(value: sleap.instance.Instance) → int[source]
Return index of given Instance
.
-
-insert(index: int, value: sleap.instance.Instance)[source]
+insert(index: int, value: sleap.instance.Instance)[source]
Add instance to frame.
- Parameters
@@ -795,7 +725,7 @@ sleap.instance
-
-static merge_frames(labeled_frames: List[sleap.instance.LabeledFrame], video: sleap.io.video.Video, remove_redundant=True) → List[sleap.instance.LabeledFrame][source]
+static merge_frames(labeled_frames: List[sleap.instance.LabeledFrame], video: sleap.io.video.Video, remove_redundant=True) → List[sleap.instance.LabeledFrame][source]
Return merged LabeledFrames for same video and frame index.
- Parameters
@@ -809,7 +739,7 @@ sleap.instance
- Returns
-The merged list of :class:`LabeledFrame`s.
+The merged list of :class:`LabeledFrame`s.
@@ -834,13 +764,13 @@ sleap.instance
-
-numpy() → numpy.ndarray[source]
+numpy() → numpy.ndarray[source]
Return the instances as an array of shape (instances, nodes, 2).
-
-plot(image: bool = True, scale: float = 1.0)[source]
+plot(image: bool = True, scale: float = 1.0)[source]
Plot the frame with all instances.
- Parameters
@@ -860,7 +790,7 @@ sleap.instance
-
-plot_predicted(image: bool = True, scale: float = 1.0)[source]
+plot_predicted(image: bool = True, scale: float = 1.0)[source]
Plot the frame with all predicted instances.
- Parameters
@@ -886,13 +816,13 @@ sleap.instance
-
-remove_empty_instances()[source]
+remove_empty_instances()[source]
Remove instances with no visible nodes from the labeled frame.
-
-remove_untracked()[source]
+remove_untracked()[source]
Removes any instances without a track assignment.
@@ -926,7 +856,7 @@ sleap.instance
-
-class sleap.instance.Point(x: float = nan, y: float = nan, visible: bool = True, complete: bool = False)[source]
+class sleap.instance.Point(x: float = nan, y: float = nan, visible: bool = True, complete: bool = False)[source]
A labelled point and any metadata associated with it.
- Parameters
@@ -945,7 +875,7 @@ sleap.instance
-
-isnan() → bool[source]
+isnan() → bool[source]
Are either of the coordinates a NaN value.
- Returns
@@ -956,7 +886,7 @@ sleap.instance
-
-numpy() → numpy.ndarray[source]
+numpy() → numpy.ndarray[source]
Return the point as a numpy array.
@@ -964,12 +894,12 @@ sleap.instance
-
-class sleap.instance.PointArray(shape, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, order='C')[source]
+class sleap.instance.PointArray(shape, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, order='C')[source]
PointArray is a sub-class of numpy recarray which stores
Point objects as records.
-
-classmethod from_array(a: sleap.instance.PointArray) → sleap.instance.PointArray[source]
+classmethod from_array(a: sleap.instance.PointArray) → sleap.instance.PointArray[source]
Converts a PointArray
(or child) to a new instance.
This will convert an object to the same type as itself,
so a PredictedPointArray
will result in the same.
@@ -987,7 +917,7 @@ sleap.instance
-
-classmethod make_default(size: int) → sleap.instance.PointArray[source]
+classmethod make_default(size: int) → sleap.instance.PointArray[source]
Construct a point array where points are all set to default.
The constructed PointArray
will have specified size
and each value in the array is assigned the default values for
@@ -1006,7 +936,7 @@
sleap.instance
-
-class sleap.instance.PredictedInstance(skeleton: sleap.skeleton.Skeleton, track: sleap.instance.Track = None, from_predicted: Optional[PredictedInstance] = None, points: sleap.instance.PointArray = None, nodes: List = None, frame: Optional[LabeledFrame] = None, score=0.0, tracking_score=0.0)[source]
+class sleap.instance.PredictedInstance(skeleton: sleap.skeleton.Skeleton, track: sleap.instance.Track = None, from_predicted: Optional[PredictedInstance] = None, points: sleap.instance.PointArray = None, nodes: List = None, frame: Optional[LabeledFrame] = None, score=0.0, tracking_score=0.0)[source]
A predicted instance is an output of the inference procedure.
- Parameters
@@ -1018,7 +948,7 @@ sleap.instance
-
-classmethod from_arrays(points: numpy.ndarray, point_confidences: numpy.ndarray, instance_score: float, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.PredictedInstance[source]
+classmethod from_arrays(points: numpy.ndarray, point_confidences: numpy.ndarray, instance_score: float, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.PredictedInstance[source]
Create a predicted instance from data arrays.
- Parameters
@@ -1043,7 +973,7 @@ sleap.instance
-
-classmethod from_instance(instance: sleap.instance.Instance, score: float) → sleap.instance.PredictedInstance[source]
+classmethod from_instance(instance: sleap.instance.Instance, score: float) → sleap.instance.PredictedInstance[source]
Create a PredictedInstance
from an Instance
.
The fields are copied in a shallow manner with the exception of points. For each
point in the instance a PredictedPoint
is created with score set to default
@@ -1063,7 +993,7 @@
sleap.instance
-
-classmethod from_numpy(points: numpy.ndarray, point_confidences: numpy.ndarray, instance_score: float, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.PredictedInstance[source]
+classmethod from_numpy(points: numpy.ndarray, point_confidences: numpy.ndarray, instance_score: float, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.PredictedInstance[source]
Create a predicted instance from data arrays.
- Parameters
@@ -1088,7 +1018,7 @@ sleap.instance
-
-classmethod from_pointsarray(points: numpy.ndarray, point_confidences: numpy.ndarray, instance_score: float, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.PredictedInstance[source]
+classmethod from_pointsarray(points: numpy.ndarray, point_confidences: numpy.ndarray, instance_score: float, skeleton: sleap.skeleton.Skeleton, track: Optional[sleap.instance.Track] = None) → sleap.instance.PredictedInstance[source]
Create a predicted instance from data arrays.
- Parameters
@@ -1130,7 +1060,7 @@ sleap.instance
-
-class sleap.instance.PredictedPoint(x: float = nan, y: float = nan, visible: bool = True, complete: bool = False, score: float = 0.0)[source]
+class sleap.instance.PredictedPoint(x: float = nan, y: float = nan, visible: bool = True, complete: bool = False, score: float = 0.0)[source]
A predicted point is an output of the inference procedure.
It has all the properties of a labeled point, plus a score.
@@ -1151,7 +1081,7 @@ sleap.instance
-
-classmethod from_point(point: sleap.instance.Point, score: float = 0.0) → sleap.instance.PredictedPoint[source]
+classmethod from_point(point: sleap.instance.Point, score: float = 0.0) → sleap.instance.PredictedPoint[source]
Create a PredictedPoint from a Point
- Parameters
@@ -1170,12 +1100,12 @@ sleap.instance
-
-class sleap.instance.PredictedPointArray(shape, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, order='C')[source]
+class sleap.instance.PredictedPointArray(shape, buf=None, offset=0, strides=None, formats=None, names=None, titles=None, byteorder=None, aligned=False, order='C')[source]
PredictedPointArray is analogous to PointArray except for predicted
points.
-
-classmethod to_array(a: sleap.instance.PredictedPointArray) → sleap.instance.PointArray[source]
+classmethod to_array(a: sleap.instance.PredictedPointArray) → sleap.instance.PointArray[source]
Convert a PredictedPointArray to a normal PointArray.
- Parameters
@@ -1191,7 +1121,7 @@ sleap.instance
-
-class sleap.instance.Track(spawned_on=0, name='')[source]
+class sleap.instance.Track(spawned_on=0, name='')[source]
A track object is associated with a set of animal/object instances
across multiple frames of video. This allows tracking of unique
entities in the video over time and space.
@@ -1205,7 +1135,7 @@ sleap.instance
-
-matches(other: sleap.instance.Track)[source]
+matches(other: sleap.instance.Track)[source]
Check if two tracks match by value.
- Parameters
@@ -1221,7 +1151,7 @@ sleap.instance
-
-sleap.instance.make_instance_cattr() → cattr.converters.Converter[source]
+sleap.instance.make_instance_cattr() → cattr.converters.Converter[source]
Create a cattr converter for Lists of Instances/PredictedInstances.
This is required because cattrs doesn’t automatically detect the class when the
attributes of one class are a subset of another.
diff --git a/develop/api/sleap.io.asyncvideo.html b/develop/api/sleap.io.asyncvideo.html
index 3c10f1676..bc31efda6 100644
--- a/develop/api/sleap.io.asyncvideo.html
+++ b/develop/api/sleap.io.asyncvideo.html
@@ -9,7 +9,7 @@
- sleap.io.asyncvideo — SLEAP (v1.4.1a2)
+ sleap.io.asyncvideo — SLEAP (v1.3.4)
@@ -322,7 +322,7 @@ sleap.io.asyncvideo
Support for loading video frames (by chunk) in background process.
-
-class sleap.io.asyncvideo.AsyncVideo(base_port: int = 9010)[source]
+class sleap.io.asyncvideo.AsyncVideo(base_port: int = 9010)[source]
Supports fetching chunks from video in background process.
-
@@ -338,19 +338,19 @@
sleap.io.asyncvideo
-
-close()[source]
+close()[source]
Close the async video server and communication ports.
-
-classmethod from_video(video: sleap.io.video.Video, frame_idxs: Optional[Iterable[int]] = None, frames_per_chunk: int = 64) → sleap.io.asyncvideo.AsyncVideo[source]
+classmethod from_video(video: sleap.io.video.Video, frame_idxs: Optional[Iterable[int]] = None, frames_per_chunk: int = 64) → sleap.io.asyncvideo.AsyncVideo[source]
Create object and start loading frames in background process.
-
-load_by_chunk(video: sleap.io.video.Video, frame_idxs: Optional[Iterable[int]] = None, frames_per_chunk: int = 64)[source]
+load_by_chunk(video: sleap.io.video.Video, frame_idxs: Optional[Iterable[int]] = None, frames_per_chunk: int = 64)[source]
Sends request for loading video in background process.
- Parameters
@@ -371,13 +371,13 @@ sleap.io.asyncvideo
-
-class sleap.io.asyncvideo.AsyncVideoServer(base_port: int)[source]
+class sleap.io.asyncvideo.AsyncVideoServer(base_port: int)[source]
Class which loads video frames in background on request.
All interactions with video server should go through AsyncVideo
which runs in local thread.
-
-run()[source]
+run()[source]
Method to be run in sub-process; can be overridden in sub-class
diff --git a/develop/api/sleap.io.convert.html b/develop/api/sleap.io.convert.html
index dbde3f7fd..92599c3ca 100644
--- a/develop/api/sleap.io.convert.html
+++ b/develop/api/sleap.io.convert.html
@@ -9,7 +9,7 @@
- sleap.io.convert — SLEAP (v1.4.1a2)
+ sleap.io.convert — SLEAP (v1.3.4)
@@ -354,7 +354,7 @@ sleap.io.convert
first transpose the datasets so they matche the shapes described above.
-
-sleap.io.convert.main(args: Optional[list] = None)[source]
+sleap.io.convert.main(args: Optional[list] = None)[source]
Entrypoint for sleap-convert
CLI for converting .slp to different formats.
- Parameters
diff --git a/develop/api/sleap.io.dataset.html b/develop/api/sleap.io.dataset.html
index 3a22b6894..4f25b2ea6 100644
--- a/develop/api/sleap.io.dataset.html
+++ b/develop/api/sleap.io.dataset.html
@@ -9,7 +9,7 @@
- sleap.io.dataset — SLEAP (v1.4.1a2)
+ sleap.io.dataset — SLEAP (v1.3.4)
@@ -355,7 +355,7 @@ sleap.io.dataset
default extension to use if none is provided in the filename.
-
-class sleap.io.dataset.Labels(labeled_frames: List[sleap.instance.LabeledFrame] = NOTHING, videos: List[sleap.io.video.Video] = NOTHING, skeletons: List[sleap.skeleton.Skeleton] = NOTHING, nodes: List[sleap.skeleton.Node] = NOTHING, tracks: List[sleap.instance.Track] = NOTHING, suggestions: List[sleap.gui.suggestions.SuggestionFrame] = NOTHING, negative_anchors: Dict[sleap.io.video.Video, list] = NOTHING, provenance: Dict[str, Union[str, int, float, bool]] = NOTHING)[source]
+class sleap.io.dataset.Labels(labeled_frames: List[sleap.instance.LabeledFrame] = NOTHING, videos: List[sleap.io.video.Video] = NOTHING, skeletons: List[sleap.skeleton.Skeleton] = NOTHING, nodes: List[sleap.skeleton.Node] = NOTHING, tracks: List[sleap.instance.Track] = NOTHING, suggestions: List[sleap.gui.suggestions.SuggestionFrame] = NOTHING, negative_anchors: Dict[sleap.io.video.Video, list] = NOTHING, provenance: Dict[str, Union[str, int, float, bool]] = NOTHING)[source]
The Labels
class collects the data for a SLEAP project.
This class is front-end for all interactions with loading, writing,
and modifying these labels. The actual storage backend for the data
@@ -448,13 +448,13 @@
sleap.io.dataset
-
-add_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance)[source]
+add_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance)[source]
Add instance to frame, updating track occupancy.
-
-add_suggestion(video: sleap.io.video.Video, frame_idx: int)[source]
+add_suggestion(video: sleap.io.video.Video, frame_idx: int)[source]
Add a suggested frame to the labels.
- Parameters
@@ -468,13 +468,13 @@ sleap.io.dataset
-
-add_track(video: sleap.io.video.Video, track: sleap.instance.Track)[source]
+add_track(video: sleap.io.video.Video, track: sleap.instance.Track)[source]
Add track to labels, updating occupancy.
-
-add_video(video: sleap.io.video.Video)[source]
+add_video(video: sleap.io.video.Video)[source]
Add a video to the labels if it is not already in it.
Video instances are added automatically when adding labeled frames,
but this function allows for adding videos to the labels before any
@@ -494,25 +494,25 @@
sleap.io.dataset
-
-append(value: sleap.instance.LabeledFrame)[source]
+append(value: sleap.instance.LabeledFrame)[source]
Add labeled frame to list of labeled frames.
-
-append_suggestions(suggestions: List[sleap.gui.suggestions.SuggestionFrame])[source]
+append_suggestions(suggestions: List[sleap.gui.suggestions.SuggestionFrame])[source]
Append the suggested frames.
-
-clear_suggestions()[source]
+clear_suggestions()[source]
Delete all suggestions.
-
-classmethod complex_merge_between(base_labels: sleap.io.dataset.Labels, new_labels: sleap.io.dataset.Labels, unify: bool = True) → tuple[source]
+classmethod complex_merge_between(base_labels: sleap.io.dataset.Labels, new_labels: sleap.io.dataset.Labels, unify: bool = True) → tuple[source]
Merge frames and other data from one dataset into another.
Anything that can be merged cleanly is merged into base_labels.
Frames conflict just in case each labels object has a matching
@@ -556,7 +556,7 @@
sleap.io.dataset
-
-copy() → sleap.io.dataset.Labels[source]
+copy() → sleap.io.dataset.Labels[source]
Return a full deep copy of the labels.
.. admonition:: Notes
@@ -568,19 +568,19 @@ sleap.io.dataset
-
-delete_suggestions(video)[source]
+delete_suggestions(video)[source]
Delete suggestions for specified video.
-
-describe()[source]
+describe()[source]
Print basic statistics about the labels dataset.
-
-export(filename: str)[source]
+export(filename: str)[source]
Export labels to analysis HDF5 format.
This expects the labels to contain data for a single video (e.g., predictions).
@@ -608,24 +608,9 @@ sleap.io.dataset
-
--
-export_csv(filename: str)[source]
-Export labels to CSV format.
-
-- Parameters
-filename – Output path for the CSV format file.
-
-
-
-
Notes
-
This will write the contents of the labels out as a CSV file.
-
-
-
-
-export_nwb(filename: str, overwrite: bool = False, session_description: str = 'Processed SLEAP pose data', identifier: Optional[str] = None, session_start_time: Optional[datetime.datetime] = None)[source]
+export_nwb(filename: str, overwrite: bool = False, session_description: str = 'Processed SLEAP pose data', identifier: Optional[str] = None, session_start_time: Optional[datetime.datetime] = None)[source]
Export all PredictedInstance
objects in a Labels
object to an NWB file.
Use Labels.numpy
to create a pynwb.NWBFile
with a separate
@@ -685,7 +670,7 @@
sleap.io.dataset
-
-extend_from(new_frames: Union[sleap.io.dataset.Labels, List[sleap.instance.LabeledFrame]], unify: bool = False)[source]
+extend_from(new_frames: Union[sleap.io.dataset.Labels, List[sleap.instance.LabeledFrame]], unify: bool = False)[source]
Merge data from another Labels
object or LabeledFrame
list.
- Arg:
new_frames: the object from which to copy data
@@ -704,7 +689,7 @@
sleap.io.dataset
+extract(inds, copy: bool = False) → sleap.io.dataset.Labels[source]
Extract labeled frames from indices and return a new Labels
object.
:param inds: Any valid indexing keys, e.g., a range, slice, list of label indices,
@@ -737,7 +722,7 @@ sleap.io.dataset
-
-find(video: sleap.io.video.Video, frame_idx: Optional[Union[int, Iterable[int]]] = None, return_new: bool = False) → List[sleap.instance.LabeledFrame][source]
+find(video: sleap.io.video.Video, frame_idx: Optional[Union[int, Iterable[int]]] = None, return_new: bool = False) → List[sleap.instance.LabeledFrame][source]
Search for labeled frames given video and/or frame index.
- Parameters
@@ -762,7 +747,7 @@ sleap.io.dataset
-
-find_first(video: sleap.io.video.Video, frame_idx: Optional[int] = None, use_cache: bool = False) → Optional[sleap.instance.LabeledFrame][source]
+find_first(video: sleap.io.video.Video, frame_idx: Optional[int] = None, use_cache: bool = False) → Optional[sleap.instance.LabeledFrame][source]
Find the first occurrence of a matching labeled frame.
Matches on frames for the given video and/or frame index.
@@ -786,7 +771,7 @@ sleap.io.dataset
-
-find_last(video: sleap.io.video.Video, frame_idx: Optional[int] = None) → Optional[sleap.instance.LabeledFrame][source]
+find_last(video: sleap.io.video.Video, frame_idx: Optional[int] = None) → Optional[sleap.instance.LabeledFrame][source]
Find the last occurrence of a matching labeled frame.
Matches on frames for the given video and/or frame index.
@@ -807,13 +792,13 @@ sleap.io.dataset
-
-find_suggestion(video, frame_idx)[source]
+find_suggestion(video, frame_idx)[source]
Find SuggestionFrame by video and frame index.
-
-find_track_occupancy(video: sleap.io.video.Video, track: Union[sleap.instance.Track, int], frame_range=None) → List[sleap.instance.Instance][source]
+find_track_occupancy(video: sleap.io.video.Video, track: Union[sleap.instance.Track, int], frame_range=None) → List[sleap.instance.Instance][source]
Get instances for a given video, track, and range of frames.
- Parameters
@@ -832,7 +817,7 @@ sleap.io.dataset
-
-static finish_complex_merge(base_labels: sleap.io.dataset.Labels, resolved_frames: List[sleap.instance.LabeledFrame])[source]
+static finish_complex_merge(base_labels: sleap.io.dataset.Labels, resolved_frames: List[sleap.instance.LabeledFrame])[source]
Finish conflicted merge from complex_merge_between.
- Parameters
@@ -846,7 +831,7 @@ sleap.io.dataset
-
-frames(video: sleap.io.video.Video, from_frame_idx: int = - 1, reverse=False)[source]
+frames(video: sleap.io.video.Video, from_frame_idx: int = - 1, reverse=False)[source]
Return an iterator over all labeled frames in a video.
- Parameters
@@ -865,7 +850,7 @@ sleap.io.dataset
-
-get(key: Union[int, slice, numpy.integer, numpy.ndarray, list, range, sleap.io.video.Video, Tuple[sleap.io.video.Video, Union[numpy.integer, numpy.ndarray, int, list, range]]], *secondary_key: Union[int, slice, numpy.integer, numpy.ndarray, list, range], use_cache: bool = False, raise_errors: bool = False) → Union[sleap.instance.LabeledFrame, List[sleap.instance.LabeledFrame]][source]
+get(key: Union[int, slice, numpy.integer, numpy.ndarray, list, range, sleap.io.video.Video, Tuple[sleap.io.video.Video, Union[numpy.integer, numpy.ndarray, int, list, range]]], *secondary_key: Union[int, slice, numpy.integer, numpy.ndarray, list, range], use_cache: bool = False, raise_errors: bool = False) → Union[sleap.instance.LabeledFrame, List[sleap.instance.LabeledFrame]][source]
Return labeled frames matching key or return None
if not found.
This is a safe version of labels[...]
that will not raise an exception if the
item is not found.
@@ -898,31 +883,31 @@ sleap.io.dataset
-
-get_next_suggestion(video, frame_idx, seek_direction=1)[source]
+get_next_suggestion(video, frame_idx, seek_direction=1)[source]
Return a (video, frame_idx) tuple seeking from given frame.
-
-get_suggestions() → List[sleap.gui.suggestions.SuggestionFrame][source]
+get_suggestions() → List[sleap.gui.suggestions.SuggestionFrame][source]
Return all suggestions as a list of SuggestionFrame items.
-
-get_track_count(video: sleap.io.video.Video) → int[source]
+get_track_count(video: sleap.io.video.Video) → int[source]
Return the number of occupied tracks for a given video.
-
-get_track_occupancy(video: sleap.io.video.Video) → List[source]
+get_track_occupancy(video: sleap.io.video.Video) → List[source]
Return track occupancy list for given video.
-
-get_unlabeled_suggestion_inds() → List[int][source]
+get_unlabeled_suggestion_inds() → List[int][source]
Find labeled frames for unlabeled suggestions and return their indices.
This is useful for generating a list of example indices for inference on
unlabeled suggestions.
@@ -940,7 +925,7 @@ sleap.io.dataset
-
-get_video_suggestions(video: sleap.io.video.Video, user_labeled: bool = True) → List[int][source]
+get_video_suggestions(video: sleap.io.video.Video, user_labeled: bool = True) → List[int][source]
Return a list of suggested frame indices.
- Parameters
@@ -959,7 +944,7 @@ sleap.io.dataset
-
-has_frame(lf: Optional[sleap.instance.LabeledFrame] = None, video: Optional[sleap.io.video.Video] = None, frame_idx: Optional[int] = None, use_cache: bool = True) → bool[source]
+has_frame(lf: Optional[sleap.instance.LabeledFrame] = None, video: Optional[sleap.io.video.Video] = None, frame_idx: Optional[int] = None, use_cache: bool = True) → bool[source]
Check if the labels contain a specified frame.
- Parameters
@@ -996,25 +981,25 @@ sleap.io.dataset
-
-index(value) → int[source]
+index(value) → int[source]
Return index of labeled frame in list of labeled frames.
-
-insert(index, value: sleap.instance.LabeledFrame)[source]
+insert(index, value: sleap.instance.LabeledFrame)[source]
Insert labeled frame at given index.
-
-instance_count(video: sleap.io.video.Video, frame_idx: int) → int[source]
+instance_count(video: sleap.io.video.Video, frame_idx: int) → int[source]
Return number of instances matching video/frame index.
-
-instances(video: Optional[sleap.io.video.Video] = None, skeleton: Optional[sleap.skeleton.Skeleton] = None)[source]
+instances(video: Optional[sleap.io.video.Video] = None, skeleton: Optional[sleap.skeleton.Skeleton] = None)[source]
Iterate over instances in the labels, optionally with filters.
- Parameters
@@ -1043,13 +1028,13 @@ sleap.io.dataset
-
-classmethod load_file(filename: str, video_search: Optional[Union[Callable, List[str]]] = None, *args, **kwargs)[source]
+classmethod load_file(filename: str, video_search: Optional[Union[Callable, List[str]]] = None, *args, **kwargs)[source]
Load file, detecting format from filename.
-
-classmethod make_video_callback(search_paths: Optional[List] = None, use_gui: bool = False, context: Optional[Dict[str, bool]] = None) → Callable[source]
+classmethod make_video_callback(search_paths: Optional[List] = None, use_gui: bool = False, context: Optional[Dict[str, bool]] = None) → Callable[source]
Create a callback for finding missing videos.
The callback can be used while loading a saved project and
allows the user to find videos which have been moved (or have
@@ -1072,13 +1057,13 @@
sleap.io.dataset
-
-static merge_container_dicts(dict_a: Dict, dict_b: Dict) → Dict[source]
+static merge_container_dicts(dict_a: Dict, dict_b: Dict) → Dict[source]
Merge data from dict_b into dict_a.
-
-merge_matching_frames(video: Optional[sleap.io.video.Video] = None)[source]
+merge_matching_frames(video: Optional[sleap.io.video.Video] = None)[source]
Merge LabeledFrame
objects that are for the same video frame.
- Parameters
@@ -1089,7 +1074,7 @@ sleap.io.dataset
-
-merge_nodes(base_node: str, merge_node: str)[source]
+merge_nodes(base_node: str, merge_node: str)[source]
Merge two nodes and update data accordingly.
- Parameters
@@ -1113,7 +1098,7 @@ sleap.io.dataset
-
-numpy(video: Optional[Union[sleap.io.video.Video, int]] = None, all_frames: bool = True, untracked: bool = False, return_confidence: bool = False) → numpy.ndarray[source]
+numpy(video: Optional[Union[sleap.io.video.Video, int]] = None, all_frames: bool = True, untracked: bool = False, return_confidence: bool = False) → numpy.ndarray[source]
Construct a numpy array from instance points.
- Parameters
@@ -1155,25 +1140,25 @@ sleap.io.dataset
-
-remove(value: sleap.instance.LabeledFrame)[source]
+remove(value: sleap.instance.LabeledFrame)[source]
Remove given labeled frame.
-
-remove_all_tracks()[source]
+remove_all_tracks()[source]
Remove all tracks from labels, updating (but not removing) instances.
-
-remove_empty_frames()[source]
+remove_empty_frames()[source]
Remove frames with no instances.
-
-remove_empty_instances(keep_empty_frames: bool = True)[source]
+remove_empty_instances(keep_empty_frames: bool = True)[source]
Remove instances with no visible points.
- Parameters
@@ -1190,7 +1175,7 @@ sleap.io.dataset
-
-remove_frame(lf: sleap.instance.LabeledFrame, update_cache: bool = True)[source]
+remove_frame(lf: sleap.instance.LabeledFrame, update_cache: bool = True)[source]
Remove a given labeled frame.
- Parameters
@@ -1205,7 +1190,7 @@ sleap.io.dataset
-
-remove_frames(lfs: List[sleap.instance.LabeledFrame])[source]
+remove_frames(lfs: List[sleap.instance.LabeledFrame])[source]
Remove a list of frames from the labels.
- Parameters
@@ -1216,13 +1201,13 @@ sleap.io.dataset
-
-remove_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance, in_transaction: bool = False)[source]
+remove_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance, in_transaction: bool = False)[source]
Remove instance from frame, updating track occupancy.
-
-remove_predictions(new_labels: Optional[sleap.io.dataset.Labels] = None)[source]
+remove_predictions(new_labels: Optional[sleap.io.dataset.Labels] = None)[source]
Clear predicted instances from the labels.
Useful prior to merging operations to prevent overlapping instances from new
predictions.
@@ -1245,7 +1230,7 @@ sleap.io.dataset
-
-remove_suggestion(video: sleap.io.video.Video, frame_idx: int)[source]
+remove_suggestion(video: sleap.io.video.Video, frame_idx: int)[source]
Remove a suggestion from the list by video and frame index.
- Parameters
@@ -1259,13 +1244,13 @@ sleap.io.dataset
-
-remove_track(track: sleap.instance.Track)[source]
+remove_track(track: sleap.instance.Track)[source]
Remove a track from the labels, updating (but not removing) instances.
-
-remove_untracked_instances(remove_empty_frames: bool = True)[source]
+remove_untracked_instances(remove_empty_frames: bool = True)[source]
Remove instances that do not have a track assignment.
- Parameters
@@ -1277,13 +1262,13 @@ sleap.io.dataset
-
-remove_unused_tracks()[source]
+remove_unused_tracks()[source]
Remove tracks that are not used by any instances.
-
-remove_user_instances(new_labels: Optional[sleap.io.dataset.Labels] = None)[source]
+remove_user_instances(new_labels: Optional[sleap.io.dataset.Labels] = None)[source]
Clear user instances from the labels.
Useful prior to merging operations to prevent overlapping instances from new
labels.
@@ -1306,7 +1291,7 @@ sleap.io.dataset
-
-remove_video(video: sleap.io.video.Video)[source]
+remove_video(video: sleap.io.video.Video)[source]
Remove a video from the labels and all associated labeled frames.
- Parameters
@@ -1317,7 +1302,7 @@ sleap.io.dataset
-
-save(filename: str, with_images: bool = False, embed_all_labeled: bool = False, embed_suggested: bool = False)[source]
+save(filename: str, with_images: bool = False, embed_all_labeled: bool = False, embed_suggested: bool = False)[source]
Save the labels to a file.
- Parameters
@@ -1345,7 +1330,7 @@ sleap.io.dataset
-
-classmethod save_file(labels: sleap.io.dataset.Labels, filename: str, default_suffix: str = '', *args, **kwargs)[source]
+classmethod save_file(labels: sleap.io.dataset.Labels, filename: str, default_suffix: str = '', *args, **kwargs)[source]
Save file, detecting format from filename.
- Parameters
@@ -1366,7 +1351,7 @@ sleap.io.dataset
-
-save_frame_data_hdf5(output_path: str, format: str = 'png', user_labeled: bool = True, all_labeled: bool = False, suggested: bool = False, progress_callback: Optional[Callable[[int, int], None]] = None) → List[sleap.io.video.HDF5Video][source]
+save_frame_data_hdf5(output_path: str, format: str = 'png', user_labeled: bool = True, all_labeled: bool = False, suggested: bool = False, progress_callback: Optional[Callable[[int, int], None]] = None) → List[sleap.io.video.HDF5Video][source]
Write images for labeled frames from all videos to hdf5 file.
Note that this will make an HDF5 video, not an HDF5 labels dataset.
@@ -1398,7 +1383,7 @@ sleap.io.dataset
-
-save_frame_data_imgstore(output_dir: str = './', format: str = 'png', all_labeled: bool = False, suggested: bool = False, progress_callback: Optional[Callable[[int, int], None]] = None) → List[sleap.io.video.ImgStoreVideo][source]
+save_frame_data_imgstore(output_dir: str = './', format: str = 'png', all_labeled: bool = False, suggested: bool = False, progress_callback: Optional[Callable[[int, int], None]] = None) → List[sleap.io.video.ImgStoreVideo][source]
Write images for labeled frames from all videos to imgstore datasets.
This only writes frames that have been labeled. Videos without
any labeled frames will be included as empty imgstores.
@@ -1432,7 +1417,7 @@ sleap.io.dataset
-
-set_suggestions(suggestions: List[sleap.gui.suggestions.SuggestionFrame])[source]
+set_suggestions(suggestions: List[sleap.gui.suggestions.SuggestionFrame])[source]
Set the suggested frames.
@@ -1444,7 +1429,7 @@ sleap.io.dataset
-
-split(n: Union[float, int], copy: bool = True) → Tuple[sleap.io.dataset.Labels, sleap.io.dataset.Labels][source]
+split(n: Union[float, int], copy: bool = True) → Tuple[sleap.io.dataset.Labels, sleap.io.dataset.Labels][source]
Split labels randomly.
- Parameters
@@ -1475,7 +1460,7 @@ sleap.io.dataset
-
-to_dict(skip_labels: bool = False) → Dict[str, Any][source]
+to_dict(skip_labels: bool = False) → Dict[str, Any][source]
Serialize all labels to dicts.
Serializes the labels in the underling list of LabeledFrames to a dict
structure. This function returns a nested dict structure composed entirely of
@@ -1506,7 +1491,7 @@
sleap.io.dataset
-
-to_json()[source]
+to_json()[source]
Serialize all labels in the underling list of LabeledFrame(s) to JSON.
- Returns
@@ -1517,7 +1502,7 @@ sleap.io.dataset
-
-to_pipeline(batch_size: Optional[int] = None, prefetch: bool = True, frame_indices: Optional[List[int]] = None, user_labeled_only: bool = True) → sleap.pipelines.Pipeline[source]
+to_pipeline(batch_size: Optional[int] = None, prefetch: bool = True, frame_indices: Optional[List[int]] = None, user_labeled_only: bool = True) → sleap.pipelines.Pipeline[source]
Create a pipeline for reading the dataset.
- Parameters
@@ -1542,13 +1527,13 @@ sleap.io.dataset
-
-track_set_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance, new_track: sleap.instance.Track)[source]
+track_set_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance, new_track: sleap.instance.Track)[source]
Set track on given instance, updating occupancy.
-
-track_swap(video: sleap.io.video.Video, new_track: sleap.instance.Track, old_track: Optional[sleap.instance.Track], frame_range: tuple)[source]
+track_swap(video: sleap.io.video.Video, new_track: sleap.instance.Track, old_track: Optional[sleap.instance.Track], frame_range: tuple)[source]
Swap track assignment for instances in two tracks.
If you need to change the track to or from None, you’ll need
to use track_set_instance()
for each specific
@@ -1600,7 +1585,7 @@
sleap.io.dataset
-
-with_user_labels_only(user_instances_only: bool = True, with_track_only: bool = False, copy: bool = True) → sleap.io.dataset.Labels[source]
+with_user_labels_only(user_instances_only: bool = True, with_track_only: bool = False, copy: bool = True) → sleap.io.dataset.Labels[source]
Return a new Labels
containing only user labels.
This is useful as a preprocessing step to train on only user-labeled data.
@@ -1626,89 +1611,89 @@ sleap.io.dataset
-
-class sleap.io.dataset.LabelsDataCache(labels: Labels)[source]
+class sleap.io.dataset.LabelsDataCache(labels: Labels)[source]
Class for maintaining cache of data in labels dataset.
-
-add_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance)[source]
+add_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance)[source]
Add an instance to the labels.
-
-add_track(video: sleap.io.video.Video, track: sleap.instance.Track)[source]
+add_track(video: sleap.io.video.Video, track: sleap.instance.Track)[source]
Add a track to the labels.
-
-find_fancy_frame_idxs(video, from_frame_idx, reverse)[source]
+find_fancy_frame_idxs(video, from_frame_idx, reverse)[source]
Return a list of frame idxs, with optional start position/order.
-
-find_frames(video: sleap.io.video.Video, frame_idx: Optional[Union[int, Iterable[int]]] = None) → Optional[List[sleap.instance.LabeledFrame]][source]
+find_frames(video: sleap.io.video.Video, frame_idx: Optional[Union[int, Iterable[int]]] = None) → Optional[List[sleap.instance.LabeledFrame]][source]
Return list of LabeledFrames matching video/frame_idx, or None.
-
-get_filtered_frame_idxs(video: Optional[sleap.io.video.Video] = None, filter: str = '') → Set[Tuple[int, int]][source]
+get_filtered_frame_idxs(video: Optional[sleap.io.video.Video] = None, filter: str = '') → Set[Tuple[int, int]][source]
Return list of (video_idx, frame_idx) tuples matching video/filter.
-
-get_frame_count(video: Optional[sleap.io.video.Video] = None, filter: str = '') → int[source]
+get_frame_count(video: Optional[sleap.io.video.Video] = None, filter: str = '') → int[source]
Return (possibly cached) count of frames matching video/filter.
-
-get_track_occupancy(video: sleap.io.video.Video, track: sleap.instance.Track) → sleap.rangelist.RangeList[source]
+get_track_occupancy(video: sleap.io.video.Video, track: sleap.instance.Track) → sleap.rangelist.RangeList[source]
Access track occupancy cache that adds video/track as needed.
-
-get_video_track_occupancy(video: sleap.io.video.Video) → Dict[sleap.instance.Track, sleap.rangelist.RangeList][source]
+get_video_track_occupancy(video: sleap.io.video.Video) → Dict[sleap.instance.Track, sleap.rangelist.RangeList][source]
Return track occupancy information for specified video.
-
-remove_frame(frame: sleap.instance.LabeledFrame)[source]
+remove_frame(frame: sleap.instance.LabeledFrame)[source]
Remove frame and update cache as needed.
-
-remove_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance)[source]
+remove_instance(frame: sleap.instance.LabeledFrame, instance: sleap.instance.Instance)[source]
Remove an instance and update the cache as needed.
-
-remove_video(video: sleap.io.video.Video)[source]
+remove_video(video: sleap.io.video.Video)[source]
Remove video and update cache as needed.
-
-track_swap(video: sleap.io.video.Video, new_track: sleap.instance.Track, old_track: Optional[sleap.instance.Track], frame_range: tuple)[source]
+track_swap(video: sleap.io.video.Video, new_track: sleap.instance.Track, old_track: Optional[sleap.instance.Track], frame_range: tuple)[source]
Swap tracks and update cache as needed.
-
-update(new_frame: Optional[sleap.instance.LabeledFrame] = None)[source]
+update(new_frame: Optional[sleap.instance.LabeledFrame] = None)[source]
Build (or rebuilds) various caches.
-
-update_counts_for_frame(frame: sleap.instance.LabeledFrame)[source]
+update_counts_for_frame(frame: sleap.instance.LabeledFrame)[source]
Updated the cached count. Should be called after frame is modified.
@@ -1716,7 +1701,7 @@ sleap.io.dataset
-
-sleap.io.dataset.find_path_using_paths(missing_path: str, search_paths: List[str]) → str[source]
+sleap.io.dataset.find_path_using_paths(missing_path: str, search_paths: List[str]) → str[source]
Find a path to a missing file given a set of paths to search in.
- Parameters
@@ -1733,7 +1718,7 @@ sleap.io.dataset
-
-sleap.io.dataset.load_file(filename: str, detect_videos: bool = True, search_paths: Optional[Union[List[str], str]] = None, match_to: Optional[sleap.io.dataset.Labels] = None) → sleap.io.dataset.Labels[source]
+sleap.io.dataset.load_file(filename: str, detect_videos: bool = True, search_paths: Optional[Union[List[str], str]] = None, match_to: Optional[sleap.io.dataset.Labels] = None) → sleap.io.dataset.Labels[source]
Load a SLEAP labels file.
SLEAP labels files (slp
) contain all the metadata for a labeling project or the
predicted labels from a video. This includes the skeleton, videos, labeled frames,
diff --git a/develop/api/sleap.io.format.adaptor.html b/develop/api/sleap.io.format.adaptor.html
index aeb787e8e..8f58998c1 100644
--- a/develop/api/sleap.io.format.adaptor.html
+++ b/develop/api/sleap.io.format.adaptor.html
@@ -9,7 +9,7 @@
-
sleap.io.format.adaptor — SLEAP (v1.4.1a2)
+ sleap.io.format.adaptor — SLEAP (v1.3.4)
@@ -322,7 +322,7 @@ sleap.io.format.adaptor
File format adaptor base class.
-
-class sleap.io.format.adaptor.Adaptor[source]
+class sleap.io.format.adaptor.Adaptor[source]
File format adaptor base class.
An adaptor handles reading and/or writing a specific file format. To add
support for a new file format, you’ll create a new class which inherits from
@@ -335,13 +335,13 @@
sleap.io.format.adaptor
-
-can_read_file(file: sleap.io.format.filehandle.FileHandle) → bool[source]
+can_read_file(file: sleap.io.format.filehandle.FileHandle) → bool[source]
Returns whether this adaptor can read this file.
-
-can_write_filename(filename: str) → bool[source]
+can_write_filename(filename: str) → bool[source]
Returns whether this adaptor can write format of this filename.
@@ -353,19 +353,19 @@ sleap.io.format.adaptor
-
-does_match_ext(filename: str) → bool[source]
+does_match_ext(filename: str) → bool[source]
Returns whether this adaptor can write format of this filename.
-
-does_read() → bool[source]
+does_read() → bool[source]
Returns whether this adaptor supports reading.
-
-does_write() → bool[source]
+does_write() → bool[source]
Returns whether this adaptor supports writing.
@@ -391,13 +391,13 @@ sleap.io.format.adaptor
-
-read(file: sleap.io.format.filehandle.FileHandle) → object[source]
+read(file: sleap.io.format.filehandle.FileHandle) → object[source]
Reads the file and returns the appropriate deserialized object.
-
-write(filename: str, source_object: object)[source]
+write(filename: str, source_object: object)[source]
Writes the object to a file.
@@ -405,7 +405,7 @@ sleap.io.format.adaptor
-
-class sleap.io.format.adaptor.SleapObjectType(value)[source]
+class sleap.io.format.adaptor.SleapObjectType(value)[source]
Types of files that an adaptor could read/write.
diff --git a/develop/api/sleap.io.format.alphatracker.html b/develop/api/sleap.io.format.alphatracker.html
index b96b39528..623ef5bdc 100644
--- a/develop/api/sleap.io.format.alphatracker.html
+++ b/develop/api/sleap.io.format.alphatracker.html
@@ -9,7 +9,7 @@
- sleap.io.format.alphatracker — SLEAP (v1.4.1a2)
+ sleap.io.format.alphatracker — SLEAP (v1.3.4)
@@ -329,7 +329,7 @@ sleap.io.format.alphatracker
create a video object which wraps the individual frame images.
-
-class sleap.io.format.alphatracker.AlphaTrackerAdaptor[source]
+class sleap.io.format.alphatracker.AlphaTrackerAdaptor[source]
Reads AlphaTracker JSON file with annotations for both single and multiple animals.
-
@@ -339,7 +339,7 @@
sleap.io.format.alphatracker
-
-can_read_file(file: sleap.io.format.filehandle.FileHandle) → bool[source]
+can_read_file(file: sleap.io.format.filehandle.FileHandle) → bool[source]
Returns whether this adaptor can read this file.
Checks the format of the file at three different levels:
- First, the upper-level format of file.json must be a list of dictionaries.
@@ -364,7 +364,7 @@
sleap.io.format.alphatracker
-
-can_write_filename(filename: str) → bool[source]
+can_write_filename(filename: str) → bool[source]
Returns whether this adaptor can write format of this filename.
@@ -376,19 +376,19 @@ sleap.io.format.alphatracker
-
-does_match_ext(filename: str) → bool[source]
+does_match_ext(filename: str) → bool[source]
Returns whether this adaptor can write format of this filename.
-
-does_read() → bool[source]
+does_read() → bool[source]
Returns whether this adaptor supports reading.
-
-does_write() → bool[source]
+does_write() → bool[source]
Returns whether this adaptor supports writing.
@@ -400,7 +400,7 @@ sleap.io.format.alphatracker
-
-get_alpha_tracker_frame_dict(filename: str = '')[source]
+get_alpha_tracker_frame_dict(filename: str = '')[source]
Returns a deep copy of the dictionary used for frames.
- Parameters
@@ -418,7 +418,7 @@ sleap.io.format.alphatracker
-
-