diff --git a/.github/workflows/poetry-test.yml b/.github/workflows/poetry-test.yml new file mode 100644 index 000000000..95e2eb7fe --- /dev/null +++ b/.github/workflows/poetry-test.yml @@ -0,0 +1,37 @@ +name: 🔧 Poetry Check and Installation Test Workflow +on: + push: + paths: + - 'poetry.lock' + - 'pyproject.toml' + pull_request: + paths: + - 'poetry.lock' + - 'pyproject.toml' + workflow_dispatch: + +jobs: + poetry-tests: + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, windows-latest, macos-latest] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] + runs-on: ${{ matrix.os }} + steps: + - name: 📥 Checkout the repository + uses: actions/checkout@v4 + + - name: 🐍 Set up Python ${{ matrix.python-version }} + uses: actions/setup-python@v5 + with: + python-version: ${{ matrix.python-version }} + + - name: 📦 Install the base dependencies + run: python -m pip install --upgrade poetry + + - name: 🔍 Check the correctness of the project config + run: poetry check + + - name: 🚀 Do Install the package Test + run: poetry install diff --git a/.github/workflows/publish-test.yml b/.github/workflows/publish-test.yml index 567a6aba6..2cd05bf13 100644 --- a/.github/workflows/publish-test.yml +++ b/.github/workflows/publish-test.yml @@ -2,9 +2,9 @@ name: Publish Supervision Pre-Releases to PyPI and TestPyPI on: push: tags: - - '[0-9]+.[0-9]+[0-9]+.[0-9]+a[0-9]' - - '[0-9]+.[0-9]+[0-9]+.[0-9]+b[0-9]' - - '[0-9]+.[0-9]+[0-9]+.[0-9]+rc[0-9]' + - "[0-9]+.[0-9]+[0-9]+.[0-9]+a[0-9]" + - "[0-9]+.[0-9]+[0-9]+.[0-9]+b[0-9]" + - "[0-9]+.[0-9]+[0-9]+.[0-9]+rc[0-9]" workflow_dispatch: @@ -12,6 +12,7 @@ jobs: build-and-publish-pre-release-pypi: name: Build and publish to PyPI runs-on: ubuntu-latest + environment: test permissions: id-token: write strategy: diff --git a/.github/workflows/publish.yml b/.github/workflows/publish.yml index f9a14d225..17c2629a8 100644 --- a/.github/workflows/publish.yml +++ b/.github/workflows/publish.yml @@ -2,13 +2,14 @@ name: Publish Supervision Releases to PyPI and TestPyPI on: push: tags: - - '[0-9]+.[0-9]+[0-9]+.[0-9]' + - "[0-9]+.[0-9]+[0-9]+.[0-9]" workflow_dispatch: jobs: build-and-publish-pre-release: runs-on: ubuntu-latest + environment: release permissions: id-token: write strategy: @@ -24,7 +25,7 @@ jobs: with: python-version: ${{ matrix.python-version }} - - name: 🏗️ Build source and wheel distributions + - name: 🏗️ Build source and wheel distributions run: | python -m pip install --upgrade build twine python -m build diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml index 1ce4a267f..b33f8e537 100644 --- a/.github/workflows/test.yml +++ b/.github/workflows/test.yml @@ -9,7 +9,7 @@ jobs: runs-on: ubuntu-latest strategy: matrix: - python-version: ["3.8", "3.9", "3.10", "3.11", "3.12"] + python-version: ["3.8", "3.9", "3.10", "3.11", "3.12", "3.13"] steps: - name: 🛎️ Checkout uses: actions/checkout@v4 diff --git a/.github/workflows/welcome.yml b/.github/workflows/welcome.yml deleted file mode 100644 index 4cb70b93f..000000000 --- a/.github/workflows/welcome.yml +++ /dev/null @@ -1,18 +0,0 @@ -name: Welcome WorkFlow - -on: - issues: - types: [opened] - pull_request_target: - types: [opened] - -jobs: - build: - name: 👋 Welcome - runs-on: ubuntu-latest - steps: - - uses: actions/first-interaction@v1.3.0 - with: - repo-token: ${{ secrets.GITHUB_TOKEN }} - issue-message: "Hello there, thank you for opening an Issue ! 🙏🏻 The team was notified and they will get back to you asap." - pr-message: "Hello there, thank you for opening an PR ! 🙏🏻 The team was notified and they will get back to you asap." diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d8e34a975..6767146f4 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -7,7 +7,7 @@ ci: repos: - repo: https://github.com/pre-commit/pre-commit-hooks - rev: v4.6.0 + rev: v5.0.0 hooks: - id: trailing-whitespace exclude: test/.*\.py @@ -32,7 +32,7 @@ repos: additional_dependencies: ["bandit[toml]"] - repo: https://github.com/astral-sh/ruff-pre-commit - rev: v0.6.8 + rev: v0.7.3 hooks: - id: ruff args: [--fix, --exit-non-zero-on-fix] diff --git a/README.md b/README.md index dd340ae53..2aa3cef0f 100644 --- a/README.md +++ b/README.md @@ -21,7 +21,7 @@ [![python-version](https://img.shields.io/pypi/pyversions/supervision)](https://badge.fury.io/py/supervision) [![colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/roboflow/supervision/blob/main/demo.ipynb) [![gradio](https://img.shields.io/badge/%F0%9F%A4%97%20Hugging%20Face-Spaces-blue)](https://huggingface.co/spaces/Roboflow/Annotators) -[![discord](https://img.shields.io/discord/1159501506232451173)](https://discord.gg/GbfgXGJ8Bk) +[![discord](https://img.shields.io/discord/1159501506232451173?logo=discord&label=discord&labelColor=fff&color=5865f2&link=https%3A%2F%2Fdiscord.gg%2FGbfgXGJ8Bk)](https://discord.gg/GbfgXGJ8Bk) [![built-with-material-for-mkdocs](https://img.shields.io/badge/Material_for_MkDocs-526CFE?logo=MaterialForMkDocs&logoColor=white)](https://squidfunk.github.io/mkdocs-material/)
@@ -34,8 +34,6 @@ **We write your reusable computer vision tools.** Whether you need to load your dataset from your hard drive, draw detections on an image or video, or count how many detections are in a zone. You can count on us! 🤝 -[![supervision-hackfest](https://media.roboflow.com/supervision/supervision-hacktoberfest-banner-2024.png)](https://github.com/roboflow/supervision/issues?q=is%3Aissue+is%3Aopen+label%3Ahacktoberfest) - ## 💻 install Pip install the supervision package in a @@ -137,88 +135,88 @@ for path, image, annotation in ds: - load - ```python - dataset = sv.DetectionDataset.from_yolo( - images_directory_path=..., - annotations_directory_path=..., - data_yaml_path=... - ) - - dataset = sv.DetectionDataset.from_pascal_voc( - images_directory_path=..., - annotations_directory_path=... - ) - - dataset = sv.DetectionDataset.from_coco( - images_directory_path=..., - annotations_path=... - ) - ``` + ```python + dataset = sv.DetectionDataset.from_yolo( + images_directory_path=..., + annotations_directory_path=..., + data_yaml_path=... + ) + + dataset = sv.DetectionDataset.from_pascal_voc( + images_directory_path=..., + annotations_directory_path=... + ) + + dataset = sv.DetectionDataset.from_coco( + images_directory_path=..., + annotations_path=... + ) + ``` - split - ```python - train_dataset, test_dataset = dataset.split(split_ratio=0.7) - test_dataset, valid_dataset = test_dataset.split(split_ratio=0.5) + ```python + train_dataset, test_dataset = dataset.split(split_ratio=0.7) + test_dataset, valid_dataset = test_dataset.split(split_ratio=0.5) - len(train_dataset), len(test_dataset), len(valid_dataset) - # (700, 150, 150) - ``` + len(train_dataset), len(test_dataset), len(valid_dataset) + # (700, 150, 150) + ``` - merge - ```python - ds_1 = sv.DetectionDataset(...) - len(ds_1) - # 100 - ds_1.classes - # ['dog', 'person'] - - ds_2 = sv.DetectionDataset(...) - len(ds_2) - # 200 - ds_2.classes - # ['cat'] - - ds_merged = sv.DetectionDataset.merge([ds_1, ds_2]) - len(ds_merged) - # 300 - ds_merged.classes - # ['cat', 'dog', 'person'] - ``` + ```python + ds_1 = sv.DetectionDataset(...) + len(ds_1) + # 100 + ds_1.classes + # ['dog', 'person'] + + ds_2 = sv.DetectionDataset(...) + len(ds_2) + # 200 + ds_2.classes + # ['cat'] + + ds_merged = sv.DetectionDataset.merge([ds_1, ds_2]) + len(ds_merged) + # 300 + ds_merged.classes + # ['cat', 'dog', 'person'] + ``` - save - ```python - dataset.as_yolo( - images_directory_path=..., - annotations_directory_path=..., - data_yaml_path=... - ) - - dataset.as_pascal_voc( - images_directory_path=..., - annotations_directory_path=... - ) - - dataset.as_coco( - images_directory_path=..., - annotations_path=... - ) - ``` + ```python + dataset.as_yolo( + images_directory_path=..., + annotations_directory_path=..., + data_yaml_path=... + ) + + dataset.as_pascal_voc( + images_directory_path=..., + annotations_directory_path=... + ) + + dataset.as_coco( + images_directory_path=..., + annotations_path=... + ) + ``` - convert - ```python - sv.DetectionDataset.from_yolo( - images_directory_path=..., - annotations_directory_path=..., - data_yaml_path=... - ).as_pascal_voc( - images_directory_path=..., - annotations_directory_path=... - ) - ``` + ```python + sv.DetectionDataset.from_yolo( + images_directory_path=..., + annotations_directory_path=..., + data_yaml_path=... + ).as_pascal_voc( + images_directory_path=..., + annotations_directory_path=... + ) + ``` diff --git a/docs/assets.md b/docs/assets.md index 84b3dfd62..2e38ad472 100644 --- a/docs/assets.md +++ b/docs/assets.md @@ -1,5 +1,6 @@ --- comments: true +status: new --- # Assets @@ -13,6 +14,7 @@ To install the Supervision assets utility, you can use `pip`. This utility is av as an extra within the Supervision package. !!! example "pip install" + ```bash pip install "supervision[assets]" ``` diff --git a/docs/changelog.md b/docs/changelog.md index 976a78d06..d845e6e9c 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,5 +1,149 @@ # CHANGELOG +### 0.25.0 Nov 12, 2024 + +- No removals or deprecations in this release! + +- Essential update to the [`LineZone`](https://supervision.roboflow.com/0.25.0/detection/tools/line_zone/): when computing line crossings, detections that jitter might be counted twice (or more). This can now be solved with the `minimum_crossing_threshold` argument. If you set it to `2` or more, extra frames will be used to confirm the crossing, improving the accuracy significantly. ([#1540](https://github.com/roboflow/supervision/pull/1540)) + +- It is now possible to track objects detected as [`KeyPoints`](https://supervision.roboflow.com/0.25.0/keypoint/core/#supervision.keypoint.core.KeyPoints). See the complete step-by-step guide in the [Object Tracking Guide](https://supervision.roboflow.com/latest/how_to/track_objects/#keypoints). ([#1658](https://github.com/roboflow/supervision/pull/1658)) + +```python +import numpy as np +import supervision as sv +from ultralytics import YOLO + +model = YOLO("yolov8m-pose.pt") +tracker = sv.ByteTrack() +trace_annotator = sv.TraceAnnotator() + +def callback(frame: np.ndarray, _: int) -> np.ndarray: + results = model(frame)[0] + key_points = sv.KeyPoints.from_ultralytics(results) + + detections = key_points.as_detections() + detections = tracker.update_with_detections(detections) + + annotated_image = trace_annotator.annotate(frame.copy(), detections) + return annotated_image + +sv.process_video( + source_path="input_video.mp4", + target_path="output_video.mp4", + callback=callback +) +``` + +- Added `is_empty` method to [`KeyPoints`](https://supervision.roboflow.com/0.25.0/keypoint/core/#supervision.keypoint.core.KeyPoints) to check if there are any keypoints in the object. ([#1658](https://github.com/roboflow/supervision/pull/1658)) + +- Added `as_detections` method to [`KeyPoints`](https://supervision.roboflow.com/0.25.0/keypoint/core/#supervision.keypoint.core.KeyPoints) that converts `KeyPoints` to `Detections`. ([#1658](https://github.com/roboflow/supervision/pull/1658)) + +- Added a new video to `supervision[assets]`. ([#1657](https://github.com/roboflow/supervision/pull/1657)) + +```python +from supervision.assets import download_assets, VideoAssets + +path_to_video = download_assets(VideoAssets.SKIING) +``` + +- Supervision can now be used with [`Python 3.13`](https://docs.python.org/3/whatsnew/3.13.html). The most renowned update is the ability to run Python [without Global Interpreter Lock (GIL)](https://docs.python.org/3/whatsnew/3.13.html#whatsnew313-free-threaded-cpython). We expect support for this among our dependencies to be inconsistent, but if you do attempt it - let us know the results! ([#1595](https://github.com/roboflow/supervision/pull/1595)) + +- Added [`Mean Average Recall`](https://supervision.roboflow.com/latest/metrics/mean_average_recall/) mAR metric, which returns a recall score, averaged over IoU thresholds, detected object classes, and limits imposed on maximum considered detections. ([#1661](https://github.com/roboflow/supervision/pull/1661)) + +```python +import supervision as sv +from supervision.metrics import MeanAverageRecall + +predictions = sv.Detections(...) +targets = sv.Detections(...) + +map_metric = MeanAverageRecall() +map_result = map_metric.update(predictions, targets).compute() + +map_result.plot() +``` + +- Added [`Precision`](https://supervision.roboflow.com/latest/metrics/precision/) and [`Recall`](https://supervision.roboflow.com/latest/metrics/recall/) metrics, providing a baseline for comparing model outputs to ground truth or another model ([#1609](https://github.com/roboflow/supervision/pull/1609)) + +```python +import supervision as sv +from supervision.metrics import Recall + +predictions = sv.Detections(...) +targets = sv.Detections(...) + +recall_metric = Recall() +recall_result = recall_metric.update(predictions, targets).compute() + +recall_result.plot() +``` + +- All Metrics now support Oriented Bounding Boxes (OBB) ([#1593](https://github.com/roboflow/supervision/pull/1593)) + +```python +import supervision as sv +from supervision.metrics import F1_Score + +predictions = sv.Detections(...) +targets = sv.Detections(...) + +f1_metric = MeanAverageRecall(metric_target=sv.MetricTarget.ORIENTED_BOUNDING_BOXES) +f1_result = f1_metric.update(predictions, targets).compute() +``` + +- Introducing Smart Labels! When `smart_position` is set for [`LabelAnnotator`](https://supervision.roboflow.com/0.25.0/detection/annotators/#supervision.annotators.core.LabelAnnotator), [`RichLabelAnnotator`](https://supervision.roboflow.com/0.25.0/detection/annotators/#supervision.annotators.core.RichLabelAnnotator) or [`VertexLabelAnnotator`](https://supervision.roboflow.com/0.25.0/detection/annotators/#supervision.annotators.core.RichLabelAnnotator), the labels will move around to avoid overlapping others. ([#1625](https://github.com/roboflow/supervision/pull/1625)) + +```python +import supervision as sv +from ultralytics import YOLO + +image = cv2.imread("image.jpg") + +label_annotator = sv.LabelAnnotator(smart_position=True) + +model = YOLO("yolo11m.pt") +results = model(image)[0] +detections = sv.Detections.from_ultralytics(results) + +annotated_frame = label_annotator.annotate(first_frame.copy(), detections) +sv.plot_image(annotated_frame) +``` + +- Added the `metadata` variable to [`Detections`](https://supervision.roboflow.com/0.25.0/detection/core/#supervision.detection.core.Detections). It allows you to store custom data per-image, rather than per-detected-object as was possible with `data` variable. For example, `metadata` could be used to store the source video path, camera model or camera parameters. ([#1589](https://github.com/roboflow/supervision/pull/1589)) + +```python +import supervision as sv +from ultralytics import YOLO + +model = YOLO("yolov8m") + +result = model("image.png")[0] +detections = sv.Detections.from_ultralytics(result) + +# Items in `data` must match length of detections +object_ids = [num for num in range(len(detections))] +detections.data["object_number"] = object_ids + +# Items in `metadata` can be of any length. +detections.metadata["camera_model"] = "Luxonis OAK-D" +``` + +- Added a `py.typed` type hints metafile. It should provide a stronger signal to type annotators and IDEs that type support is available. ([#1586](https://github.com/roboflow/supervision/pull/1586)) + +- `ByteTrack` no longer requires `detections` to have a `class_id` ([#1637](https://github.com/roboflow/supervision/pull/1637)) +- `draw_line`, `draw_rectangle`, `draw_filled_rectangle`, `draw_polygon`, `draw_filled_polygon` and `PolygonZoneAnnotator` now comes with a default color ([#1591](https://github.com/roboflow/supervision/pull/1591)) +- Dataset classes are treated as case-sensitive when merging multiple datasets. ([#1643](https://github.com/roboflow/supervision/pull/1643)) +- Expanded [metrics documentation](https://supervision.roboflow.com/0.25.0/metrics/f1_score/) with example plots and printed results ([#1660](https://github.com/roboflow/supervision/pull/1660)) +- Added usage example for polygon zone ([#1608](https://github.com/roboflow/supervision/pull/1608)) +- Small improvements to error handling in polygons: ([#1602](https://github.com/roboflow/supervision/pull/1602)) + +- Updated [`ByteTrack`](https://supervision.roboflow.com/0.25.0/trackers/#supervision.tracker.byte_tracker.core.ByteTrack), removing shared variables. Previously, multiple instances of `ByteTrack` would share some date, requiring liberal use of `tracker.reset()`. ([#1603](https://github.com/roboflow/supervision/pull/1603)), ([#1528](https://github.com/roboflow/supervision/pull/1528)) +- Fixed a bug where `class_agnostic` setting in `MeanAveragePrecision` would not work. ([#1577](https://github.com/roboflow/supervision/pull/1577)) hacktoberfest +- Removed welcome workflow from our CI system. ([#1596](https://github.com/roboflow/supervision/pull/1596)) + +- Large refactor of `ByteTrack`: STrack moved to separate class, removed superfluous `BaseTrack` class, removed unused variables ([#1603](https://github.com/roboflow/supervision/pull/1603)) +- Large refactor of `RichLabelAnnotator`, matching its contents with `LabelAnnotator`. ([#1625](https://github.com/roboflow/supervision/pull/1625)) + ### 0.24.0 Oct 4, 2024 - Added [F1 score](https://supervision.roboflow.com/0.24.0/metrics/f1_score/#supervision.metrics.f1_score.F1Score) as a new metric for detection and segmentation. [#1521](https://github.com/roboflow/supervision/pull/1521) diff --git a/docs/deprecated.md b/docs/deprecated.md index d355bf962..e71407eb0 100644 --- a/docs/deprecated.md +++ b/docs/deprecated.md @@ -19,6 +19,10 @@ These features are phased out due to better alternatives or potential issues in # Removed +### 0.25.0 + +No removals in this version! + ### 0.24.0 - The `frame_resolution_wh ` parameter in [`sv.PolygonZone`](detection/tools/polygon_zone.md/#supervision.detection.tools.polygon_zone.PolygonZone) has been removed. diff --git a/docs/detection/tools/inference_slicer.md b/docs/detection/tools/inference_slicer.md index 7a5d3e573..5d5d08bc5 100644 --- a/docs/detection/tools/inference_slicer.md +++ b/docs/detection/tools/inference_slicer.md @@ -1,6 +1,5 @@ --- comments: true -status: new --- # InferenceSlicer diff --git a/docs/detection/tools/polygon_zone.md b/docs/detection/tools/polygon_zone.md index 1d445d9fc..cbe76c20f 100644 --- a/docs/detection/tools/polygon_zone.md +++ b/docs/detection/tools/polygon_zone.md @@ -1,6 +1,5 @@ --- comments: true -status: new ---
diff --git a/docs/how_to/track_objects.md b/docs/how_to/track_objects.md index 464f6b8d9..1b321e7fe 100644 --- a/docs/how_to/track_objects.md +++ b/docs/how_to/track_objects.md @@ -1,18 +1,21 @@ --- comments: true +status: new --- # Track Objects Leverage Supervision's advanced capabilities for enhancing your video analysis by seamlessly [tracking](/latest/trackers/) objects recognized by -a multitude of object detection and segmentation models. This comprehensive guide will +a multitude of object detection, segmentation and keypoint models. This comprehensive guide will take you through the steps to perform inference using the YOLOv8 model via either the [Inference](https://github.com/roboflow/inference) or [Ultralytics](https://github.com/ultralytics/ultralytics) packages. Following this, you'll discover how to track these objects efficiently and annotate your video content for a deeper analysis. +## Object Detection & Segmentation + To make it easier for you to follow our tutorial download the video we will use as an example. You can do this using [`supervision[assets]`](/latest/assets/) extension. @@ -27,7 +30,7 @@ download_assets(VideoAssets.PEOPLE_WALKING) -## Run Inference +### Run Inference First, you'll need to obtain predictions from your object detection or segmentation model. In this tutorial, we are using the YOLOv8 model as an example. However, @@ -40,7 +43,12 @@ by obtaining model predictions and then annotating the frame based on these pred This `callback` function will be essential in the subsequent steps of the tutorial, as it will be modified to include tracking, labeling, and trace annotations. +!!! tip + + Both object detection and segmentation models are supported. Try it with `yolov8n.pt` or `yolov8n-640-seg`! + === "Ultralytics" + ```{ .py } import numpy as np import supervision as sv @@ -62,6 +70,7 @@ it will be modified to include tracking, labeling, and trace annotations. ``` === "Inference" + ```{ .py } import numpy as np import supervision as sv @@ -86,7 +95,7 @@ it will be modified to include tracking, labeling, and trace annotations. -## Tracking +### Tracking After running inference and obtaining predictions, the next step is to track the detected objects throughout the video. Utilizing Supervision’s @@ -95,6 +104,7 @@ functionality, each detected object is assigned a unique tracker ID, enabling the continuous following of the object's motion path across different frames. === "Ultralytics" + ```{ .py hl_lines="6 12" } import numpy as np import supervision as sv @@ -118,6 +128,7 @@ enabling the continuous following of the object's motion path across different f ``` === "Inference" + ```{ .py hl_lines="6 12" } import numpy as np import supervision as sv @@ -140,7 +151,7 @@ enabling the continuous following of the object's motion path across different f ) ``` -## Annotate Video with Tracking IDs +### Annotate Video with Tracking IDs Annotating the video with tracking IDs helps in distinguishing and following each object distinctly. With the @@ -149,6 +160,7 @@ in Supervision, we can overlay the tracker IDs and class labels on the detected offering a clear visual representation of each object's class and unique identifier. === "Ultralytics" + ```{ .py hl_lines="8 15-19 23-24" } import numpy as np import supervision as sv @@ -183,6 +195,7 @@ offering a clear visual representation of each object's class and unique identif ``` === "Inference" + ```{ .py hl_lines="8 15-19 23-24" } import numpy as np import supervision as sv @@ -220,7 +233,7 @@ offering a clear visual representation of each object's class and unique identif -## Annotate Video with Traces +### Annotate Video with Traces Adding traces to the video involves overlaying the historical paths of the detected objects. This feature, powered by the @@ -229,6 +242,7 @@ allows for visualizing the trajectories of objects, helping in understanding the movement patterns and interactions between objects in the video. === "Ultralytics" + ```{ .py hl_lines="9 26-27" } import numpy as np import supervision as sv @@ -266,6 +280,7 @@ movement patterns and interactions between objects in the video. ``` === "Inference" + ```{ .py hl_lines="9 26-27" } import numpy as np import supervision as sv @@ -306,6 +321,336 @@ movement patterns and interactions between objects in the video. -This structured walkthrough should give a detailed pathway to annotate videos -effectively using Supervision’s various functionalities, including object tracking and -trace annotations. +## Keypoints + +Models aren't limited to object detection and segmentation. Keypoint detection allows for detailed analysis of body joints and connections, especially valuable for applications like human pose estimation. This section introduces keypoint tracking. We'll walk through the steps of annotating keypoints, converting them into bounding box detections compatible with `ByteTrack`, and applying detection smoothing for enhanced stability. + +To make it easier for you to follow our tutorial, let's download the video we will use as an +example. You can do this using [`supervision[assets]`](/latest/assets/) extension. + +```python +from supervision.assets import download_assets, VideoAssets + +download_assets(VideoAssets.SKIING) +``` + + + +### Keypoint Detection + +First, you'll need to obtain predictions from your keypoint detection model. In this tutorial, we are using the YOLOv8 model as an example. However, +Supervision is versatile and compatible with various models. Check this [link](/latest/keypoint/core/) for guidance on how to plug in other models. + +We will define a `callback` function, which will process each frame of the video by obtaining model predictions and then annotating the frame based on these predictions. + +Let's immediately visualize the results with our [`EdgeAnnotator`](/latest/keypoint/annotators/#supervision.keypoint.annotators.EdgeAnnotator) and [`VertexAnnotator`](https://supervision.roboflow.com/latest/keypoint/annotators/#supervision.keypoint.annotators.VertexAnnotator). + +=== "Ultralytics" + + ```{ .py hl_lines="5 10-11" } + import numpy as np + import supervision as sv + from ultralytics import YOLO + + model = YOLO("yolov8m-pose.pt") + edge_annotator = sv.EdgeAnnotator() + vertex_annotator = sv.VertexAnnotator() + + def callback(frame: np.ndarray, _: int) -> np.ndarray: + results = model(frame)[0] + key_points = sv.KeyPoints.from_ultralytics(results) + + annotated_frame = edge_annotator.annotate( + frame.copy(), key_points=key_points) + return vertex_annotator.annotate( + annotated_frame, key_points=key_points) + + sv.process_video( + source_path="skiing.mp4", + target_path="result.mp4", + callback=callback + ) + ``` + +=== "Inference" + + ```{ .py hl_lines="5-6 11-12" } + import numpy as np + import supervision as sv + from inference.models.utils import get_roboflow_model + + model = get_roboflow_model( + model_id="yolov8m-pose-640", api_key=) + edge_annotator = sv.EdgeAnnotator() + vertex_annotator = sv.VertexAnnotator() + + def callback(frame: np.ndarray, _: int) -> np.ndarray: + results = model.infer(frame)[0] + key_points = sv.KeyPoints.from_inference(results) + + annotated_frame = edge_annotator.annotate( + frame.copy(), key_points=key_points) + return vertex_annotator.annotate( + annotated_frame, key_points=key_points) + + sv.process_video( + source_path="skiing.mp4", + target_path="result.mp4", + callback=callback + ) + ``` + + + +### Convert to Detections + +Keypoint tracking is currently supported via the conversion of `KeyPoints` to `Detections`. This is achieved with the [`KeyPoints.as_detections()`](/latest/keypoint/core/#supervision.keypoint.core.KeyPoints.as_detections) function. + +Let's convert to detections and visualize the results with our [`BoxAnnotator`](/latest/detection/annotators/#supervision.annotators.core.BoxAnnotator). + +!!! tip + + You may use the `selected_keypoint_indices` argument to specify a subset of keypoints to convert. This is useful when some keypoints could be occluded. For example: a person might swing their arm, causing the elbow to be occluded by the torso sometimes. + +=== "Ultralytics" + + ```{ .py hl_lines="8 13 19-20" } + import numpy as np + import supervision as sv + from ultralytics import YOLO + + model = YOLO("yolov8m-pose.pt") + edge_annotator = sv.EdgeAnnotator() + vertex_annotator = sv.VertexAnnotator() + box_annotator = sv.BoxAnnotator() + + def callback(frame: np.ndarray, _: int) -> np.ndarray: + results = model(frame)[0] + key_points = sv.KeyPoints.from_ultralytics(results) + detections = key_points.as_detections() + + annotated_frame = edge_annotator.annotate( + frame.copy(), key_points=key_points) + annotated_frame = vertex_annotator.annotate( + annotated_frame, key_points=key_points) + return box_annotator.annotate( + annotated_frame, detections=detections) + + sv.process_video( + source_path="skiing.mp4", + target_path="result.mp4", + callback=callback + ) + ``` + +=== "Inference" + + ```{ .py hl_lines="9 14 20-21" } + import numpy as np + import supervision as sv + from inference.models.utils import get_roboflow_model + + model = get_roboflow_model( + model_id="yolov8m-pose-640", api_key=) + edge_annotator = sv.EdgeAnnotator() + vertex_annotator = sv.VertexAnnotator() + box_annotator = sv.BoxAnnotator() + + def callback(frame: np.ndarray, _: int) -> np.ndarray: + results = model.infer(frame)[0] + key_points = sv.KeyPoints.from_inference(results) + detections = key_points.as_detections() + + annotated_frame = edge_annotator.annotate( + frame.copy(), key_points=key_points) + annotated_frame = vertex_annotator.annotate( + annotated_frame, key_points=key_points) + return box_annotator.annotate( + annotated_frame, detections=detections) + + sv.process_video( + source_path="skiing.mp4", + target_path="result.mp4", + callback=callback + ) + ``` + + + +### Keypoint Tracking + +Now that we have a `Detections` object, we can track it throughout the video. Utilizing Supervision’s [`sv.ByteTrack`](/latest/trackers/#supervision.tracker.byte_tracker.core.ByteTrack) functionality, each detected object is assigned a unique tracker ID, enabling the continuous following of the object's motion path across different frames. We shall visualize the result with `TraceAnnotator`. + +=== "Ultralytics" + + ```{ .py hl_lines="10-11 17 25-26" } + import numpy as np + import supervision as sv + from ultralytics import YOLO + + model = YOLO("yolov8m-pose.pt") + edge_annotator = sv.EdgeAnnotator() + vertex_annotator = sv.VertexAnnotator() + box_annotator = sv.BoxAnnotator() + + tracker = sv.ByteTrack() + trace_annotator = sv.TraceAnnotator() + + def callback(frame: np.ndarray, _: int) -> np.ndarray: + results = model(frame)[0] + key_points = sv.KeyPoints.from_ultralytics(results) + detections = key_points.as_detections() + detections = tracker.update_with_detections(detections) + + annotated_frame = edge_annotator.annotate( + frame.copy(), key_points=key_points) + annotated_frame = vertex_annotator.annotate( + annotated_frame, key_points=key_points) + annotated_frame = box_annotator.annotate( + annotated_frame, detections=detections) + return trace_annotator.annotate( + annotated_frame, detections=detections) + + sv.process_video( + source_path="skiing.mp4", + target_path="result.mp4", + callback=callback + ) + ``` + +=== "Inference" + + ```{ .py hl_lines="11-12 18 26-27" } + import numpy as np + import supervision as sv + from inference.models.utils import get_roboflow_model + + model = get_roboflow_model( + model_id="yolov8m-pose-640", api_key=) + edge_annotator = sv.EdgeAnnotator() + vertex_annotator = sv.VertexAnnotator() + box_annotator = sv.BoxAnnotator() + + tracker = sv.ByteTrack() + trace_annotator = sv.TraceAnnotator() + + def callback(frame: np.ndarray, _: int) -> np.ndarray: + results = model.infer(frame)[0] + key_points = sv.KeyPoints.from_inference(results) + detections = key_points.as_detections() + detections = tracker.update_with_detections(detections) + + annotated_frame = edge_annotator.annotate( + frame.copy(), key_points=key_points) + annotated_frame = vertex_annotator.annotate( + annotated_frame, key_points=key_points) + annotated_frame = box_annotator.annotate( + annotated_frame, detections=detections) + return trace_annotator.annotate( + annotated_frame, detections=detections) + + sv.process_video( + source_path="skiing.mp4", + target_path="result.mp4", + callback=callback + ) + ``` + + + +### Bonus: Smoothing + +We could stop here as we have successfully tracked the object detected by the keypoint model. However, we can further enhance the stability of the boxes by applying [`DetectionsSmoother`](/latest/detection/tools/smoother/). This tool helps in stabilizing the boxes by smoothing the bounding box coordinates across frames. It is very simple to use: + +=== "Ultralytics" + + ```{ .py hl_lines="11 19" } + import numpy as np + import supervision as sv + from ultralytics import YOLO + + model = YOLO("yolov8m-pose.pt") + edge_annotator = sv.EdgeAnnotator() + vertex_annotator = sv.VertexAnnotator() + box_annotator = sv.BoxAnnotator() + + tracker = sv.ByteTrack() + smoother = sv.DetectionsSmoother() + trace_annotator = sv.TraceAnnotator() + + def callback(frame: np.ndarray, _: int) -> np.ndarray: + results = model(frame)[0] + key_points = sv.KeyPoints.from_ultralytics(results) + detections = key_points.as_detections() + detections = tracker.update_with_detections(detections) + detections = smoother.update_with_detections(detections) + + annotated_frame = edge_annotator.annotate( + frame.copy(), key_points=key_points) + annotated_frame = vertex_annotator.annotate( + annotated_frame, key_points=key_points) + annotated_frame = box_annotator.annotate( + annotated_frame, detections=detections) + return trace_annotator.annotate( + annotated_frame, detections=detections) + + sv.process_video( + source_path="skiing.mp4", + target_path="result.mp4", + callback=callback + ) + ``` + +=== "Inference" + + ```{ .py hl_lines="12 20" } + import numpy as np + import supervision as sv + from inference.models.utils import get_roboflow_model + + model = get_roboflow_model( + model_id="yolov8m-pose-640", api_key=) + edge_annotator = sv.EdgeAnnotator() + vertex_annotator = sv.VertexAnnotator() + box_annotator = sv.BoxAnnotator() + + tracker = sv.ByteTrack() + smoother = sv.DetectionsSmoother() + trace_annotator = sv.TraceAnnotator() + + def callback(frame: np.ndarray, _: int) -> np.ndarray: + results = model.infer(frame)[0] + key_points = sv.KeyPoints.from_inference(results) + detections = key_points.as_detections() + detections = tracker.update_with_detections(detections) + detections = smoother.update_with_detections(detections) + + annotated_frame = edge_annotator.annotate( + frame.copy(), key_points=key_points) + annotated_frame = vertex_annotator.annotate( + annotated_frame, key_points=key_points) + annotated_frame = box_annotator.annotate( + annotated_frame, detections=detections) + return trace_annotator.annotate( + annotated_frame, detections=detections) + + sv.process_video( + source_path="skiing.mp4", + target_path="result.mp4", + callback=callback + ) + ``` + + + +This structured walkthrough should give a detailed pathway to annotate videos effectively using Supervision’s various functionalities, including object tracking and trace annotations. diff --git a/docs/keypoint/annotators.md b/docs/keypoint/annotators.md index 32f30626b..30a970ecd 100644 --- a/docs/keypoint/annotators.md +++ b/docs/keypoint/annotators.md @@ -1,5 +1,6 @@ --- comments: true +status: new --- # Annotators diff --git a/docs/keypoint/core.md b/docs/keypoint/core.md index 6f42c254d..7354babab 100644 --- a/docs/keypoint/core.md +++ b/docs/keypoint/core.md @@ -1,5 +1,6 @@ --- comments: true +status: new --- # Keypoint Detection diff --git a/docs/metrics/common_values.md b/docs/metrics/common_values.md new file mode 100644 index 000000000..b7600f3f1 --- /dev/null +++ b/docs/metrics/common_values.md @@ -0,0 +1,20 @@ +--- +comments: true +status: new +--- + +# Common Values + +This page contains supplementary values, types and enums that metrics use. + + + +:::supervision.metrics.core.MetricTarget + + + +:::supervision.metrics.core.AveragingMethod diff --git a/docs/metrics/mean_average_recall.md b/docs/metrics/mean_average_recall.md new file mode 100644 index 000000000..5cc0bf0a2 --- /dev/null +++ b/docs/metrics/mean_average_recall.md @@ -0,0 +1,18 @@ +--- +comments: true +status: new +--- + +# Mean Average Recall + + + +:::supervision.metrics.mean_average_recall.MeanAverageRecall + + + +:::supervision.metrics.mean_average_recall.MeanAverageRecallResult diff --git a/docs/metrics/precision.md b/docs/metrics/precision.md new file mode 100644 index 000000000..c704452ee --- /dev/null +++ b/docs/metrics/precision.md @@ -0,0 +1,18 @@ +--- +comments: true +status: new +--- + +# Precision + + + +:::supervision.metrics.precision.Precision + + + +:::supervision.metrics.precision.PrecisionResult diff --git a/docs/metrics/recall.md b/docs/metrics/recall.md new file mode 100644 index 000000000..78dde8334 --- /dev/null +++ b/docs/metrics/recall.md @@ -0,0 +1,18 @@ +--- +comments: true +status: new +--- + +# Recall + +
+

Recall

+
+ +:::supervision.metrics.recall.Recall + + + +:::supervision.metrics.recall.RecallResult diff --git a/docs/notebooks/small-object-detection-with-sahi.ipynb b/docs/notebooks/small-object-detection-with-sahi.ipynb index 1654ff3c0..db69b0852 100644 --- a/docs/notebooks/small-object-detection-with-sahi.ipynb +++ b/docs/notebooks/small-object-detection-with-sahi.ipynb @@ -15,7 +15,7 @@ "\n", "This cookbook shows how to use [Slicing Aided Hyper Inference (SAHI) ](https://arxiv.org/abs/2202.06934) for small object detection with `supervision`.\n", "\n", - "![\"Small Object Detection\"](https://raw.githubusercontent.com/ediardo/notebooks/main/sahi/animation.gif \"Small Object Detection\")\n", + "![\"Small Object Detection\"](https://media.roboflow.com/supervision/cookbooks/sahi/animation.gif \"Small Object Detection\")\n", "\n", "Click the Open in Colab button to run the cookbook on Google Colab.\n", "\n", @@ -70,7 +70,7 @@ "\n", "Detecting people (or their heads) is a common problem that has been addressed by many researchers in the past. In this project, we\u2019ll use an open-source public dataset and a fine-tuned model to perform inference on images.\n", "\n", - "![Roboflow Universe](https://raw.githubusercontent.com/ediardo/notebooks/main/sahi/roboflow_universe.png \"Open source model for counting people's heads\")\n", + "![Roboflow Universe](https://media.roboflow.com/supervision/cookbooks/sahi/roboflow_universe.png \"Open source model for counting people's heads\")\n", "\n", "Some details about the project [\"people_counterv0 Computer Vision Project\"](https://universe.roboflow.com/sit-cx0ng/people_counterv0):\n", "\n", @@ -782,9 +782,9 @@ "\n", "| Example| Observations |\n", "|----|----|\n", - "| ![Overlapping](https://github.com/ediardo/notebooks/blob/main/sahi/overlapping_1.png?raw=true \"Overlapping\") | False Negative, Incomplete bbox |\n", - "| ![Overlapping](https://raw.githubusercontent.com/ediardo/notebooks/main/sahi/overlapping_2.png \"Overlapping\")| Double detection, Incomplete bbox|\n", - "| ![Overlapping](https://raw.githubusercontent.com/ediardo/notebooks/main/sahi/overlapping_3.png \"Overlapping\")| Incomplete bounding box|\n", + "| ![Overlapping](https://media.roboflow.com/supervision/cookbooks/sahi/overlapping_1.png \"Overlapping\") | False Negative, Incomplete bbox |\n", + "| ![Overlapping](https://media.roboflow.com/supervision/cookbooks/sahi/overlapping_2.png \"Overlapping\")| Double detection, Incomplete bbox|\n", + "| ![Overlapping](https://media.roboflow.com/supervision/cookbooks/sahi/overlapping_3.png \"Overlapping\")| Incomplete bounding box|\n", "\n", "## Improving Object Detection Near Boundaries with Overlapping\n", "\n", diff --git a/docs/trackers.md b/docs/trackers.md index cb44441f1..47f700619 100644 --- a/docs/trackers.md +++ b/docs/trackers.md @@ -1,5 +1,6 @@ --- comments: true +status: new --- # ByteTrack diff --git a/mkdocs.yml b/mkdocs.yml index 3cd867590..6d013a730 100644 --- a/mkdocs.yml +++ b/mkdocs.yml @@ -66,7 +66,11 @@ nav: - Utils: datasets/utils.md - Metrics: - mAP: metrics/mean_average_precision.md + - mAR: metrics/mean_average_recall.md + - Precision: metrics/precision.md + - Recall: metrics/recall.md - F1 Score: metrics/f1_score.md + - Common Values: metrics/common_values.md - Legacy Metrics: detection/metrics.md - Utils: - Video: utils/video.md diff --git a/poetry.lock b/poetry.lock index 519b7a2f8..0d69292c6 100644 --- a/poetry.lock +++ b/poetry.lock @@ -2,13 +2,13 @@ [[package]] name = "anyio" -version = "4.5.0" +version = "4.6.2" description = "High level compatibility layer for multiple asynchronous event loop implementations" optional = false python-versions = ">=3.8" files = [ - {file = "anyio-4.5.0-py3-none-any.whl", hash = "sha256:fdeb095b7cc5a5563175eedd926ec4ae55413bb4be5770c424af0ba46ccb4a78"}, - {file = "anyio-4.5.0.tar.gz", hash = "sha256:c5a275fe5ca0afd788001f58fca1e69e29ce706d746e317d660e21f70c530ef9"}, + {file = "anyio-4.6.2-py3-none-any.whl", hash = "sha256:6caec6b1391f6f6d7b2ef2258d2902d36753149f67478f7df4be8e54d03a8f54"}, + {file = "anyio-4.6.2.tar.gz", hash = "sha256:f72a7bb3dd0752b3bd8b17a844a019d7fbf6ae218c588f4f9ba1b2f600b12347"}, ] [package.dependencies] @@ -19,7 +19,7 @@ typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""} [package.extras] doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"] -test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.21.0b1)"] +test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"] trio = ["trio (>=0.26.1)"] [[package]] @@ -259,13 +259,13 @@ css = ["tinycss2 (>=1.1.0,<1.3)"] [[package]] name = "build" -version = "1.2.2" +version = "1.2.2.post1" description = "A simple, correct Python build frontend" optional = false python-versions = ">=3.8" files = [ - {file = "build-1.2.2-py3-none-any.whl", hash = "sha256:277ccc71619d98afdd841a0e96ac9fe1593b823af481d3b0cea748e8894e0613"}, - {file = "build-1.2.2.tar.gz", hash = "sha256:119b2fb462adef986483438377a13b2f42064a2a3a4161f24a0cca698a07ac8c"}, + {file = "build-1.2.2.post1-py3-none-any.whl", hash = "sha256:1d61c0887fa860c01971625baae8bdd338e517b836a2f70dd1f7aa3a6b2fc5b5"}, + {file = "build-1.2.2.post1.tar.gz", hash = "sha256:b36993e92ca9375a219c99e606a122ff365a760a2d4bba0caa09bd5278b608b7"}, ] [package.dependencies] @@ -448,101 +448,116 @@ files = [ [[package]] name = "charset-normalizer" -version = "3.3.2" +version = "3.4.0" description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." optional = false python-versions = ">=3.7.0" files = [ - {file = "charset-normalizer-3.3.2.tar.gz", hash = "sha256:f30c3cb33b24454a82faecaf01b19c18562b1e89558fb6c56de4d9118a032fd5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:25baf083bf6f6b341f4121c2f3c548875ee6f5339300e08be3f2b2ba1721cdd3"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:06435b539f889b1f6f4ac1758871aae42dc3a8c0e24ac9e60c2384973ad73027"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9063e24fdb1e498ab71cb7419e24622516c4a04476b17a2dab57e8baa30d6e03"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6897af51655e3691ff853668779c7bad41579facacf5fd7253b0133308cf000d"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d3193f4a680c64b4b6a9115943538edb896edc190f0b222e73761716519268e"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd70574b12bb8a4d2aaa0094515df2463cb429d8536cfb6c7ce983246983e5a6"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8465322196c8b4d7ab6d1e049e4c5cb460d0394da4a27d23cc242fbf0034b6b5"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a9a8e9031d613fd2009c182b69c7b2c1ef8239a0efb1df3f7c8da66d5dd3d537"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:beb58fe5cdb101e3a055192ac291b7a21e3b7ef4f67fa1d74e331a7f2124341c"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e06ed3eb3218bc64786f7db41917d4e686cc4856944f53d5bdf83a6884432e12"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:2e81c7b9c8979ce92ed306c249d46894776a909505d8f5a4ba55b14206e3222f"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:572c3763a264ba47b3cf708a44ce965d98555f618ca42c926a9c1616d8f34269"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:fd1abc0d89e30cc4e02e4064dc67fcc51bd941eb395c502aac3ec19fab46b519"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win32.whl", hash = "sha256:3d47fa203a7bd9c5b6cee4736ee84ca03b8ef23193c0d1ca99b5089f72645c73"}, - {file = "charset_normalizer-3.3.2-cp310-cp310-win_amd64.whl", hash = "sha256:10955842570876604d404661fbccbc9c7e684caf432c09c715ec38fbae45ae09"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:802fe99cca7457642125a8a88a084cef28ff0cf9407060f7b93dca5aa25480db"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:573f6eac48f4769d667c4442081b1794f52919e7edada77495aaed9236d13a96"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:549a3a73da901d5bc3ce8d24e0600d1fa85524c10287f6004fbab87672bf3e1e"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f27273b60488abe721a075bcca6d7f3964f9f6f067c8c4c605743023d7d3944f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ceae2f17a9c33cb48e3263960dc5fc8005351ee19db217e9b1bb15d28c02574"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:65f6f63034100ead094b8744b3b97965785388f308a64cf8d7c34f2f2e5be0c4"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:753f10e867343b4511128c6ed8c82f7bec3bd026875576dfd88483c5c73b2fd8"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4a78b2b446bd7c934f5dcedc588903fb2f5eec172f3d29e52a9096a43722adfc"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e537484df0d8f426ce2afb2d0f8e1c3d0b114b83f8850e5f2fbea0e797bd82ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:eb6904c354526e758fda7167b33005998fb68c46fbc10e013ca97f21ca5c8887"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:deb6be0ac38ece9ba87dea880e438f25ca3eddfac8b002a2ec3d9183a454e8ae"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4ab2fe47fae9e0f9dee8c04187ce5d09f48eabe611be8259444906793ab7cbce"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:80402cd6ee291dcb72644d6eac93785fe2c8b9cb30893c1af5b8fdd753b9d40f"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win32.whl", hash = "sha256:7cd13a2e3ddeed6913a65e66e94b51d80a041145a026c27e6bb76c31a853c6ab"}, - {file = "charset_normalizer-3.3.2-cp311-cp311-win_amd64.whl", hash = "sha256:663946639d296df6a2bb2aa51b60a2454ca1cb29835324c640dafb5ff2131a77"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0b2b64d2bb6d3fb9112bafa732def486049e63de9618b5843bcdd081d8144cd8"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:ddbb2551d7e0102e7252db79ba445cdab71b26640817ab1e3e3648dad515003b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:55086ee1064215781fff39a1af09518bc9255b50d6333f2e4c74ca09fac6a8f6"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8f4a014bc36d3c57402e2977dada34f9c12300af536839dc38c0beab8878f38a"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a10af20b82360ab00827f916a6058451b723b4e65030c5a18577c8b2de5b3389"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8d756e44e94489e49571086ef83b2bb8ce311e730092d2c34ca8f7d925cb20aa"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90d558489962fd4918143277a773316e56c72da56ec7aa3dc3dbbe20fdfed15b"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6ac7ffc7ad6d040517be39eb591cac5ff87416c2537df6ba3cba3bae290c0fed"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:7ed9e526742851e8d5cc9e6cf41427dfc6068d4f5a3bb03659444b4cabf6bc26"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:8bdb58ff7ba23002a4c5808d608e4e6c687175724f54a5dade5fa8c67b604e4d"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_ppc64le.whl", hash = "sha256:6b3251890fff30ee142c44144871185dbe13b11bab478a88887a639655be1068"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_s390x.whl", hash = "sha256:b4a23f61ce87adf89be746c8a8974fe1c823c891d8f86eb218bb957c924bb143"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:efcb3f6676480691518c177e3b465bcddf57cea040302f9f4e6e191af91174d4"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win32.whl", hash = "sha256:d965bba47ddeec8cd560687584e88cf699fd28f192ceb452d1d7ee807c5597b7"}, - {file = "charset_normalizer-3.3.2-cp312-cp312-win_amd64.whl", hash = "sha256:96b02a3dc4381e5494fad39be677abcb5e6634bf7b4fa83a6dd3112607547001"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:95f2a5796329323b8f0512e09dbb7a1860c46a39da62ecb2324f116fa8fdc85c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c002b4ffc0be611f0d9da932eb0f704fe2602a9a949d1f738e4c34c75b0863d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a981a536974bbc7a512cf44ed14938cf01030a99e9b3a06dd59578882f06f985"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3287761bc4ee9e33561a7e058c72ac0938c4f57fe49a09eae428fd88aafe7bb6"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:42cb296636fcc8b0644486d15c12376cb9fa75443e00fb25de0b8602e64c1714"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0a55554a2fa0d408816b3b5cedf0045f4b8e1a6065aec45849de2d6f3f8e9786"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:c083af607d2515612056a31f0a8d9e0fcb5876b7bfc0abad3ecd275bc4ebc2d5"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:87d1351268731db79e0f8e745d92493ee2841c974128ef629dc518b937d9194c"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bd8f7df7d12c2db9fab40bdd87a7c09b1530128315d047a086fa3ae3435cb3a8"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:c180f51afb394e165eafe4ac2936a14bee3eb10debc9d9e4db8958fe36afe711"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:8c622a5fe39a48f78944a87d4fb8a53ee07344641b0562c540d840748571b811"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win32.whl", hash = "sha256:db364eca23f876da6f9e16c9da0df51aa4f104a972735574842618b8c6d999d4"}, - {file = "charset_normalizer-3.3.2-cp37-cp37m-win_amd64.whl", hash = "sha256:86216b5cee4b06df986d214f664305142d9c76df9b6512be2738aa72a2048f99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:6463effa3186ea09411d50efc7d85360b38d5f09b870c48e4600f63af490e56a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6c4caeef8fa63d06bd437cd4bdcf3ffefe6738fb1b25951440d80dc7df8c03ac"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:37e55c8e51c236f95b033f6fb391d7d7970ba5fe7ff453dad675e88cf303377a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fb69256e180cb6c8a894fee62b3afebae785babc1ee98b81cdf68bbca1987f33"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ae5f4161f18c61806f411a13b0310bea87f987c7d2ecdbdaad0e94eb2e404238"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b2b0a0c0517616b6869869f8c581d4eb2dd83a4d79e0ebcb7d373ef9956aeb0a"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:45485e01ff4d3630ec0d9617310448a8702f70e9c01906b0d0118bdf9d124cf2"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eb00ed941194665c332bf8e078baf037d6c35d7c4f3102ea2d4f16ca94a26dc8"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2127566c664442652f024c837091890cb1942c30937add288223dc895793f898"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a50aebfa173e157099939b17f18600f72f84eed3049e743b68ad15bd69b6bf99"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4d0d1650369165a14e14e1e47b372cfcb31d6ab44e6e33cb2d4e57265290044d"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:923c0c831b7cfcb071580d3f46c4baf50f174be571576556269530f4bbd79d04"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:06a81e93cd441c56a9b65d8e1d043daeb97a3d0856d177d5c90ba85acb3db087"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win32.whl", hash = "sha256:6ef1d82a3af9d3eecdba2321dc1b3c238245d890843e040e41e470ffa64c3e25"}, - {file = "charset_normalizer-3.3.2-cp38-cp38-win_amd64.whl", hash = "sha256:eb8821e09e916165e160797a6c17edda0679379a4be5c716c260e836e122f54b"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:c235ebd9baae02f1b77bcea61bce332cb4331dc3617d254df3323aa01ab47bd4"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:5b4c145409bef602a690e7cfad0a15a55c13320ff7a3ad7ca59c13bb8ba4d45d"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:68d1f8a9e9e37c1223b656399be5d6b448dea850bed7d0f87a8311f1ff3dabb0"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22afcb9f253dac0696b5a4be4a1c0f8762f8239e21b99680099abd9b2b1b2269"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e27ad930a842b4c5eb8ac0016b0a54f5aebbe679340c26101df33424142c143c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f79682fbe303db92bc2b1136016a38a42e835d932bab5b3b1bfcfbf0640e519"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b261ccdec7821281dade748d088bb6e9b69e6d15b30652b74cbbac25e280b796"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:122c7fa62b130ed55f8f285bfd56d5f4b4a5b503609d181f9ad85e55c89f4185"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:d0eccceffcb53201b5bfebb52600a5fb483a20b61da9dbc885f8b103cbe7598c"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9f96df6923e21816da7e0ad3fd47dd8f94b2a5ce594e00677c0013018b813458"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7f04c839ed0b6b98b1a7501a002144b76c18fb1c1850c8b98d458ac269e26ed2"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:34d1c8da1e78d2e001f363791c98a272bb734000fcef47a491c1e3b0505657a8"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ff8fa367d09b717b2a17a052544193ad76cd49979c805768879cb63d9ca50561"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win32.whl", hash = "sha256:aed38f6e4fb3f5d6bf81bfa990a07806be9d83cf7bacef998ab1a9bd660a581f"}, - {file = "charset_normalizer-3.3.2-cp39-cp39-win_amd64.whl", hash = "sha256:b01b88d45a6fcb69667cd6d2f7a9aeb4bf53760d7fc536bf679ec94fe9f3ff3d"}, - {file = "charset_normalizer-3.3.2-py3-none-any.whl", hash = "sha256:3e4d1f6587322d2788836a99c69062fbb091331ec940e02d12d179c1d53e25fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:4f9fc98dad6c2eaa32fc3af1417d95b5e3d08aff968df0cd320066def971f9a6"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:0de7b687289d3c1b3e8660d0741874abe7888100efe14bd0f9fd7141bcbda92b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:5ed2e36c3e9b4f21dd9422f6893dec0abf2cca553af509b10cd630f878d3eb99"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:40d3ff7fc90b98c637bda91c89d51264a3dcf210cade3a2c6f838c7268d7a4ca"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1110e22af8ca26b90bd6364fe4c763329b0ebf1ee213ba32b68c73de5752323d"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:86f4e8cca779080f66ff4f191a685ced73d2f72d50216f7112185dc02b90b9b7"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f683ddc7eedd742e2889d2bfb96d69573fde1d92fcb811979cdb7165bb9c7d3"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:27623ba66c183eca01bf9ff833875b459cad267aeeb044477fedac35e19ba907"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:f606a1881d2663630ea5b8ce2efe2111740df4b687bd78b34a8131baa007f79b"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:0b309d1747110feb25d7ed6b01afdec269c647d382c857ef4663bbe6ad95a912"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:136815f06a3ae311fae551c3df1f998a1ebd01ddd424aa5603a4336997629e95"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:14215b71a762336254351b00ec720a8e85cada43b987da5a042e4ce3e82bd68e"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:79983512b108e4a164b9c8d34de3992f76d48cadc9554c9e60b43f308988aabe"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win32.whl", hash = "sha256:c94057af19bc953643a33581844649a7fdab902624d2eb739738a30e2b3e60fc"}, + {file = "charset_normalizer-3.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:55f56e2ebd4e3bc50442fbc0888c9d8c94e4e06a933804e2af3e89e2f9c1c749"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0d99dd8ff461990f12d6e42c7347fd9ab2532fb70e9621ba520f9e8637161d7c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:c57516e58fd17d03ebe67e181a4e4e2ccab1168f8c2976c6a334d4f819fe5944"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:6dba5d19c4dfab08e58d5b36304b3f92f3bd5d42c1a3fa37b5ba5cdf6dfcbcee"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf4475b82be41b07cc5e5ff94810e6a01f276e37c2d55571e3fe175e467a1a1c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ce031db0408e487fd2775d745ce30a7cd2923667cf3b69d48d219f1d8f5ddeb6"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ff4e7cdfdb1ab5698e675ca622e72d58a6fa2a8aa58195de0c0061288e6e3ea"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3710a9751938947e6327ea9f3ea6332a09bf0ba0c09cae9cb1f250bd1f1549bc"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:82357d85de703176b5587dbe6ade8ff67f9f69a41c0733cf2425378b49954de5"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:47334db71978b23ebcf3c0f9f5ee98b8d65992b65c9c4f2d34c2eaf5bcaf0594"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:8ce7fd6767a1cc5a92a639b391891bf1c268b03ec7e021c7d6d902285259685c"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:f1a2f519ae173b5b6a2c9d5fa3116ce16e48b3462c8b96dfdded11055e3d6365"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:63bc5c4ae26e4bc6be6469943b8253c0fd4e4186c43ad46e713ea61a0ba49129"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:bcb4f8ea87d03bc51ad04add8ceaf9b0f085ac045ab4d74e73bbc2dc033f0236"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win32.whl", hash = "sha256:9ae4ef0b3f6b41bad6366fb0ea4fc1d7ed051528e113a60fa2a65a9abb5b1d99"}, + {file = "charset_normalizer-3.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:cee4373f4d3ad28f1ab6290684d8e2ebdb9e7a1b74fdc39e4c211995f77bec27"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:0713f3adb9d03d49d365b70b84775d0a0d18e4ab08d12bc46baa6132ba78aaf6"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:de7376c29d95d6719048c194a9cf1a1b0393fbe8488a22008610b0361d834ecf"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:4a51b48f42d9358460b78725283f04bddaf44a9358197b889657deba38f329db"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b295729485b06c1a0683af02a9e42d2caa9db04a373dc38a6a58cdd1e8abddf1"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee803480535c44e7f5ad00788526da7d85525cfefaf8acf8ab9a310000be4b03"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3d59d125ffbd6d552765510e3f31ed75ebac2c7470c7274195b9161a32350284"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8cda06946eac330cbe6598f77bb54e690b4ca93f593dee1568ad22b04f347c15"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07afec21bbbbf8a5cc3651aa96b980afe2526e7f048fdfb7f1014d84acc8b6d8"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:6b40e8d38afe634559e398cc32b1472f376a4099c75fe6299ae607e404c033b2"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:b8dcd239c743aa2f9c22ce674a145e0a25cb1566c495928440a181ca1ccf6719"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:84450ba661fb96e9fd67629b93d2941c871ca86fc38d835d19d4225ff946a631"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:44aeb140295a2f0659e113b31cfe92c9061622cadbc9e2a2f7b8ef6b1e29ef4b"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:1db4e7fefefd0f548d73e2e2e041f9df5c59e178b4c72fbac4cc6f535cfb1565"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win32.whl", hash = "sha256:5726cf76c982532c1863fb64d8c6dd0e4c90b6ece9feb06c9f202417a31f7dd7"}, + {file = "charset_normalizer-3.4.0-cp312-cp312-win_amd64.whl", hash = "sha256:b197e7094f232959f8f20541ead1d9862ac5ebea1d58e9849c1bf979255dfac9"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:dd4eda173a9fcccb5f2e2bd2a9f423d180194b1bf17cf59e3269899235b2a114"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:e9e3c4c9e1ed40ea53acf11e2a386383c3304212c965773704e4603d589343ed"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:92a7e36b000bf022ef3dbb9c46bfe2d52c047d5e3f3343f43204263c5addc250"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:54b6a92d009cbe2fb11054ba694bc9e284dad30a26757b1e372a1fdddaf21920"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1ffd9493de4c922f2a38c2bf62b831dcec90ac673ed1ca182fe11b4d8e9f2a64"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:35c404d74c2926d0287fbd63ed5d27eb911eb9e4a3bb2c6d294f3cfd4a9e0c23"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4796efc4faf6b53a18e3d46343535caed491776a22af773f366534056c4e1fbc"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e7fdd52961feb4c96507aa649550ec2a0d527c086d284749b2f582f2d40a2e0d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:92db3c28b5b2a273346bebb24857fda45601aef6ae1c011c0a997106581e8a88"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:ab973df98fc99ab39080bfb0eb3a925181454d7c3ac8a1e695fddfae696d9e90"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:4b67fdab07fdd3c10bb21edab3cbfe8cf5696f453afce75d815d9d7223fbe88b"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:aa41e526a5d4a9dfcfbab0716c7e8a1b215abd3f3df5a45cf18a12721d31cb5d"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:ffc519621dce0c767e96b9c53f09c5d215578e10b02c285809f76509a3931482"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win32.whl", hash = "sha256:f19c1585933c82098c2a520f8ec1227f20e339e33aca8fa6f956f6691b784e67"}, + {file = "charset_normalizer-3.4.0-cp313-cp313-win_amd64.whl", hash = "sha256:707b82d19e65c9bd28b81dde95249b07bf9f5b90ebe1ef17d9b57473f8a64b7b"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:dbe03226baf438ac4fda9e2d0715022fd579cb641c4cf639fa40d53b2fe6f3e2"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dd9a8bd8900e65504a305bf8ae6fa9fbc66de94178c420791d0293702fce2df7"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b8831399554b92b72af5932cdbbd4ddc55c55f631bb13ff8fe4e6536a06c5c51"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a14969b8691f7998e74663b77b4c36c0337cb1df552da83d5c9004a93afdb574"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dcaf7c1524c0542ee2fc82cc8ec337f7a9f7edee2532421ab200d2b920fc97cf"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:425c5f215d0eecee9a56cdb703203dda90423247421bf0d67125add85d0c4455"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:d5b054862739d276e09928de37c79ddeec42a6e1bfc55863be96a36ba22926f6"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:f3e73a4255342d4eb26ef6df01e3962e73aa29baa3124a8e824c5d3364a65748"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:2f6c34da58ea9c1a9515621f4d9ac379871a8f21168ba1b5e09d74250de5ad62"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:f09cb5a7bbe1ecae6e87901a2eb23e0256bb524a79ccc53eb0b7629fbe7677c4"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:0099d79bdfcf5c1f0c2c72f91516702ebf8b0b8ddd8905f97a8aecf49712c621"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win32.whl", hash = "sha256:9c98230f5042f4945f957d006edccc2af1e03ed5e37ce7c373f00a5a4daa6149"}, + {file = "charset_normalizer-3.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:62f60aebecfc7f4b82e3f639a7d1433a20ec32824db2199a11ad4f5e146ef5ee"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:af73657b7a68211996527dbfeffbb0864e043d270580c5aef06dc4b659a4b578"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cab5d0b79d987c67f3b9e9c53f54a61360422a5a0bc075f43cab5621d530c3b6"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:9289fd5dddcf57bab41d044f1756550f9e7cf0c8e373b8cdf0ce8773dc4bd417"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6b493a043635eb376e50eedf7818f2f322eabbaa974e948bd8bdd29eb7ef2a51"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9fa2566ca27d67c86569e8c85297aaf413ffab85a8960500f12ea34ff98e4c41"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8e538f46104c815be19c975572d74afb53f29650ea2025bbfaef359d2de2f7f"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6fd30dc99682dc2c603c2b315bded2799019cea829f8bf57dc6b61efde6611c8"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2006769bd1640bdf4d5641c69a3d63b71b81445473cac5ded39740a226fa88ab"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:dc15e99b2d8a656f8e666854404f1ba54765871104e50c8e9813af8a7db07f12"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:ab2e5bef076f5a235c3774b4f4028a680432cded7cad37bba0fd90d64b187d19"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:4ec9dd88a5b71abfc74e9df5ebe7921c35cbb3b641181a531ca65cdb5e8e4dea"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:43193c5cda5d612f247172016c4bb71251c784d7a4d9314677186a838ad34858"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:aa693779a8b50cd97570e5a0f343538a8dbd3e496fa5dcb87e29406ad0299654"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win32.whl", hash = "sha256:7706f5850360ac01d80c89bcef1640683cc12ed87f42579dab6c5d3ed6888613"}, + {file = "charset_normalizer-3.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:c3e446d253bd88f6377260d07c895816ebf33ffffd56c1c792b13bff9c3e1ade"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:980b4f289d1d90ca5efcf07958d3eb38ed9c0b7676bf2831a54d4f66f9c27dfa"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f28f891ccd15c514a0981f3b9db9aa23d62fe1a99997512b0491d2ed323d229a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a8aacce6e2e1edcb6ac625fb0f8c3a9570ccc7bfba1f63419b3769ccf6a00ed0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bd7af3717683bea4c87acd8c0d3d5b44d56120b26fd3f8a692bdd2d5260c620a"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5ff2ed8194587faf56555927b3aa10e6fb69d931e33953943bc4f837dfee2242"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e91f541a85298cf35433bf66f3fab2a4a2cff05c127eeca4af174f6d497f0d4b"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:309a7de0a0ff3040acaebb35ec45d18db4b28232f21998851cfa709eeff49d62"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:285e96d9d53422efc0d7a17c60e59f37fbf3dfa942073f666db4ac71e8d726d0"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:5d447056e2ca60382d460a604b6302d8db69476fd2015c81e7c35417cfabe4cd"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:20587d20f557fe189b7947d8e7ec5afa110ccf72a3128d61a2a387c3313f46be"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:130272c698667a982a5d0e626851ceff662565379baf0ff2cc58067b81d4f11d"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:ab22fbd9765e6954bc0bcff24c25ff71dcbfdb185fcdaca49e81bac68fe724d3"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:7782afc9b6b42200f7362858f9e73b1f8316afb276d316336c0ec3bd73312742"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win32.whl", hash = "sha256:2de62e8801ddfff069cd5c504ce3bc9672b23266597d4e4f50eda28846c322f2"}, + {file = "charset_normalizer-3.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:95c3c157765b031331dd4db3c775e58deaee050a3042fcad72cbc4189d7c8dca"}, + {file = "charset_normalizer-3.4.0-py3-none-any.whl", hash = "sha256:fe9f97feb71aa9896b81973a7bbada8c49501dc73e58a10fcef6663af95e5079"}, + {file = "charset_normalizer-3.4.0.tar.gz", hash = "sha256:223217c3d4f82c3ac5e29032b3f1c2eb0fb591b72161f86d93f5719079dae93e"}, ] [[package]] @@ -587,6 +602,68 @@ traitlets = ">=4" [package.extras] test = ["pytest"] +[[package]] +name = "contourpy" +version = "1.1.0" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.8" +files = [ + {file = "contourpy-1.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:89f06eff3ce2f4b3eb24c1055a26981bffe4e7264acd86f15b97e40530b794bc"}, + {file = "contourpy-1.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:dffcc2ddec1782dd2f2ce1ef16f070861af4fb78c69862ce0aab801495dda6a3"}, + {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:25ae46595e22f93592d39a7eac3d638cda552c3e1160255258b695f7b58e5655"}, + {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:17cfaf5ec9862bc93af1ec1f302457371c34e688fbd381f4035a06cd47324f48"}, + {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:18a64814ae7bce73925131381603fff0116e2df25230dfc80d6d690aa6e20b37"}, + {file = "contourpy-1.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:90c81f22b4f572f8a2110b0b741bb64e5a6427e0a198b2cdc1fbaf85f352a3aa"}, + {file = "contourpy-1.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:53cc3a40635abedbec7f1bde60f8c189c49e84ac180c665f2cd7c162cc454baa"}, + {file = "contourpy-1.1.0-cp310-cp310-win32.whl", hash = "sha256:9b2dd2ca3ac561aceef4c7c13ba654aaa404cf885b187427760d7f7d4c57cff8"}, + {file = "contourpy-1.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:1f795597073b09d631782e7245016a4323cf1cf0b4e06eef7ea6627e06a37ff2"}, + {file = "contourpy-1.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0b7b04ed0961647691cfe5d82115dd072af7ce8846d31a5fac6c142dcce8b882"}, + {file = "contourpy-1.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:27bc79200c742f9746d7dd51a734ee326a292d77e7d94c8af6e08d1e6c15d545"}, + {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:052cc634bf903c604ef1a00a5aa093c54f81a2612faedaa43295809ffdde885e"}, + {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9382a1c0bc46230fb881c36229bfa23d8c303b889b788b939365578d762b5c18"}, + {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5cec36c5090e75a9ac9dbd0ff4a8cf7cecd60f1b6dc23a374c7d980a1cd710e"}, + {file = "contourpy-1.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1f0cbd657e9bde94cd0e33aa7df94fb73c1ab7799378d3b3f902eb8eb2e04a3a"}, + {file = "contourpy-1.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:181cbace49874f4358e2929aaf7ba84006acb76694102e88dd15af861996c16e"}, + {file = "contourpy-1.1.0-cp311-cp311-win32.whl", hash = "sha256:edb989d31065b1acef3828a3688f88b2abb799a7db891c9e282df5ec7e46221b"}, + {file = "contourpy-1.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:fb3b7d9e6243bfa1efb93ccfe64ec610d85cfe5aec2c25f97fbbd2e58b531256"}, + {file = "contourpy-1.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:bcb41692aa09aeb19c7c213411854402f29f6613845ad2453d30bf421fe68fed"}, + {file = "contourpy-1.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:5d123a5bc63cd34c27ff9c7ac1cd978909e9c71da12e05be0231c608048bb2ae"}, + {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62013a2cf68abc80dadfd2307299bfa8f5aa0dcaec5b2954caeb5fa094171103"}, + {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0b6616375d7de55797d7a66ee7d087efe27f03d336c27cf1f32c02b8c1a5ac70"}, + {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:317267d915490d1e84577924bd61ba71bf8681a30e0d6c545f577363157e5e94"}, + {file = "contourpy-1.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d551f3a442655f3dcc1285723f9acd646ca5858834efeab4598d706206b09c9f"}, + {file = "contourpy-1.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e7a117ce7df5a938fe035cad481b0189049e8d92433b4b33aa7fc609344aafa1"}, + {file = "contourpy-1.1.0-cp38-cp38-win32.whl", hash = "sha256:108dfb5b3e731046a96c60bdc46a1a0ebee0760418951abecbe0fc07b5b93b27"}, + {file = "contourpy-1.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:d4f26b25b4f86087e7d75e63212756c38546e70f2a92d2be44f80114826e1cd4"}, + {file = "contourpy-1.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc00bb4225d57bff7ebb634646c0ee2a1298402ec10a5fe7af79df9a51c1bfd9"}, + {file = "contourpy-1.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:189ceb1525eb0655ab8487a9a9c41f42a73ba52d6789754788d1883fb06b2d8a"}, + {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9f2931ed4741f98f74b410b16e5213f71dcccee67518970c42f64153ea9313b9"}, + {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:30f511c05fab7f12e0b1b7730ebdc2ec8deedcfb505bc27eb570ff47c51a8f15"}, + {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:143dde50520a9f90e4a2703f367cf8ec96a73042b72e68fcd184e1279962eb6f"}, + {file = "contourpy-1.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e94bef2580e25b5fdb183bf98a2faa2adc5b638736b2c0a4da98691da641316a"}, + {file = "contourpy-1.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:ed614aea8462735e7d70141374bd7650afd1c3f3cb0c2dbbcbe44e14331bf002"}, + {file = "contourpy-1.1.0-cp39-cp39-win32.whl", hash = "sha256:71551f9520f008b2950bef5f16b0e3587506ef4f23c734b71ffb7b89f8721999"}, + {file = "contourpy-1.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:438ba416d02f82b692e371858143970ed2eb6337d9cdbbede0d8ad9f3d7dd17d"}, + {file = "contourpy-1.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:a698c6a7a432789e587168573a864a7ea374c6be8d4f31f9d87c001d5a843493"}, + {file = "contourpy-1.1.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:397b0ac8a12880412da3551a8cb5a187d3298a72802b45a3bd1805e204ad8439"}, + {file = "contourpy-1.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:a67259c2b493b00e5a4d0f7bfae51fb4b3371395e47d079a4446e9b0f4d70e76"}, + {file = "contourpy-1.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:2b836d22bd2c7bb2700348e4521b25e077255ebb6ab68e351ab5aa91ca27e027"}, + {file = "contourpy-1.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:084eaa568400cfaf7179b847ac871582199b1b44d5699198e9602ecbbb5f6104"}, + {file = "contourpy-1.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:911ff4fd53e26b019f898f32db0d4956c9d227d51338fb3b03ec72ff0084ee5f"}, + {file = "contourpy-1.1.0.tar.gz", hash = "sha256:e53046c3863828d21d531cc3b53786e6580eb1ba02477e8681009b6aa0870b21"}, +] + +[package.dependencies] +numpy = ">=1.16" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.2.0)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "wurlitzer"] + [[package]] name = "contourpy" version = "1.1.1" @@ -649,10 +726,7 @@ files = [ ] [package.dependencies] -numpy = [ - {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""}, - {version = ">=1.26.0rc1,<2.0", markers = "python_version >= \"3.12\""}, -] +numpy = {version = ">=1.16,<2.0", markers = "python_version <= \"3.11\""} [package.extras] bokeh = ["bokeh", "selenium"] @@ -661,6 +735,90 @@ mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.4.1)", "types-Pill test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] test-no-images = ["pytest", "pytest-cov", "wurlitzer"] +[[package]] +name = "contourpy" +version = "1.3.0" +description = "Python library for calculating contours of 2D quadrilateral grids" +optional = false +python-versions = ">=3.9" +files = [ + {file = "contourpy-1.3.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:880ea32e5c774634f9fcd46504bf9f080a41ad855f4fef54f5380f5133d343c7"}, + {file = "contourpy-1.3.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:76c905ef940a4474a6289c71d53122a4f77766eef23c03cd57016ce19d0f7b42"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92f8557cbb07415a4d6fa191f20fd9d2d9eb9c0b61d1b2f52a8926e43c6e9af7"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:36f965570cff02b874773c49bfe85562b47030805d7d8360748f3eca570f4cab"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cacd81e2d4b6f89c9f8a5b69b86490152ff39afc58a95af002a398273e5ce589"}, + {file = "contourpy-1.3.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69375194457ad0fad3a839b9e29aa0b0ed53bb54db1bfb6c3ae43d111c31ce41"}, + {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:7a52040312b1a858b5e31ef28c2e865376a386c60c0e248370bbea2d3f3b760d"}, + {file = "contourpy-1.3.0-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:3faeb2998e4fcb256542e8a926d08da08977f7f5e62cf733f3c211c2a5586223"}, + {file = "contourpy-1.3.0-cp310-cp310-win32.whl", hash = "sha256:36e0cff201bcb17a0a8ecc7f454fe078437fa6bda730e695a92f2d9932bd507f"}, + {file = "contourpy-1.3.0-cp310-cp310-win_amd64.whl", hash = "sha256:87ddffef1dbe5e669b5c2440b643d3fdd8622a348fe1983fad7a0f0ccb1cd67b"}, + {file = "contourpy-1.3.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:0fa4c02abe6c446ba70d96ece336e621efa4aecae43eaa9b030ae5fb92b309ad"}, + {file = "contourpy-1.3.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:834e0cfe17ba12f79963861e0f908556b2cedd52e1f75e6578801febcc6a9f49"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dbc4c3217eee163fa3984fd1567632b48d6dfd29216da3ded3d7b844a8014a66"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4865cd1d419e0c7a7bf6de1777b185eebdc51470800a9f42b9e9decf17762081"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:303c252947ab4b14c08afeb52375b26781ccd6a5ccd81abcdfc1fafd14cf93c1"}, + {file = "contourpy-1.3.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:637f674226be46f6ba372fd29d9523dd977a291f66ab2a74fbeb5530bb3f445d"}, + {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:76a896b2f195b57db25d6b44e7e03f221d32fe318d03ede41f8b4d9ba1bff53c"}, + {file = "contourpy-1.3.0-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:e1fd23e9d01591bab45546c089ae89d926917a66dceb3abcf01f6105d927e2cb"}, + {file = "contourpy-1.3.0-cp311-cp311-win32.whl", hash = "sha256:d402880b84df3bec6eab53cd0cf802cae6a2ef9537e70cf75e91618a3801c20c"}, + {file = "contourpy-1.3.0-cp311-cp311-win_amd64.whl", hash = "sha256:6cb6cc968059db9c62cb35fbf70248f40994dfcd7aa10444bbf8b3faeb7c2d67"}, + {file = "contourpy-1.3.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:570ef7cf892f0afbe5b2ee410c507ce12e15a5fa91017a0009f79f7d93a1268f"}, + {file = "contourpy-1.3.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:da84c537cb8b97d153e9fb208c221c45605f73147bd4cadd23bdae915042aad6"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0be4d8425bfa755e0fd76ee1e019636ccc7c29f77a7c86b4328a9eb6a26d0639"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9c0da700bf58f6e0b65312d0a5e695179a71d0163957fa381bb3c1f72972537c"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:eb8b141bb00fa977d9122636b16aa67d37fd40a3d8b52dd837e536d64b9a4d06"}, + {file = "contourpy-1.3.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3634b5385c6716c258d0419c46d05c8aa7dc8cb70326c9a4fb66b69ad2b52e09"}, + {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:0dce35502151b6bd35027ac39ba6e5a44be13a68f55735c3612c568cac3805fd"}, + {file = "contourpy-1.3.0-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:aea348f053c645100612b333adc5983d87be69acdc6d77d3169c090d3b01dc35"}, + {file = "contourpy-1.3.0-cp312-cp312-win32.whl", hash = "sha256:90f73a5116ad1ba7174341ef3ea5c3150ddf20b024b98fb0c3b29034752c8aeb"}, + {file = "contourpy-1.3.0-cp312-cp312-win_amd64.whl", hash = "sha256:b11b39aea6be6764f84360fce6c82211a9db32a7c7de8fa6dd5397cf1d079c3b"}, + {file = "contourpy-1.3.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:3e1c7fa44aaae40a2247e2e8e0627f4bea3dd257014764aa644f319a5f8600e3"}, + {file = "contourpy-1.3.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:364174c2a76057feef647c802652f00953b575723062560498dc7930fc9b1cb7"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:32b238b3b3b649e09ce9aaf51f0c261d38644bdfa35cbaf7b263457850957a84"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:d51fca85f9f7ad0b65b4b9fe800406d0d77017d7270d31ec3fb1cc07358fdea0"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:732896af21716b29ab3e988d4ce14bc5133733b85956316fb0c56355f398099b"}, + {file = "contourpy-1.3.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d73f659398a0904e125280836ae6f88ba9b178b2fed6884f3b1f95b989d2c8da"}, + {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:c6c7c2408b7048082932cf4e641fa3b8ca848259212f51c8c59c45aa7ac18f14"}, + {file = "contourpy-1.3.0-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:f317576606de89da6b7e0861cf6061f6146ead3528acabff9236458a6ba467f8"}, + {file = "contourpy-1.3.0-cp313-cp313-win32.whl", hash = "sha256:31cd3a85dbdf1fc002280c65caa7e2b5f65e4a973fcdf70dd2fdcb9868069294"}, + {file = "contourpy-1.3.0-cp313-cp313-win_amd64.whl", hash = "sha256:4553c421929ec95fb07b3aaca0fae668b2eb5a5203d1217ca7c34c063c53d087"}, + {file = "contourpy-1.3.0-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:345af746d7766821d05d72cb8f3845dfd08dd137101a2cb9b24de277d716def8"}, + {file = "contourpy-1.3.0-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:3bb3808858a9dc68f6f03d319acd5f1b8a337e6cdda197f02f4b8ff67ad2057b"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:420d39daa61aab1221567b42eecb01112908b2cab7f1b4106a52caaec8d36973"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4d63ee447261e963af02642ffcb864e5a2ee4cbfd78080657a9880b8b1868e18"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:167d6c890815e1dac9536dca00828b445d5d0df4d6a8c6adb4a7ec3166812fa8"}, + {file = "contourpy-1.3.0-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:710a26b3dc80c0e4febf04555de66f5fd17e9cf7170a7b08000601a10570bda6"}, + {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:75ee7cb1a14c617f34a51d11fa7524173e56551646828353c4af859c56b766e2"}, + {file = "contourpy-1.3.0-cp313-cp313t-musllinux_1_2_x86_64.whl", hash = "sha256:33c92cdae89ec5135d036e7218e69b0bb2851206077251f04a6c4e0e21f03927"}, + {file = "contourpy-1.3.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a11077e395f67ffc2c44ec2418cfebed032cd6da3022a94fc227b6faf8e2acb8"}, + {file = "contourpy-1.3.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e8134301d7e204c88ed7ab50028ba06c683000040ede1d617298611f9dc6240c"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e12968fdfd5bb45ffdf6192a590bd8ddd3ba9e58360b29683c6bb71a7b41edca"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fd2a0fc506eccaaa7595b7e1418951f213cf8255be2600f1ea1b61e46a60c55f"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4cfb5c62ce023dfc410d6059c936dcf96442ba40814aefbfa575425a3a7f19dc"}, + {file = "contourpy-1.3.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:68a32389b06b82c2fdd68276148d7b9275b5f5cf13e5417e4252f6d1a34f72a2"}, + {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:94e848a6b83da10898cbf1311a815f770acc9b6a3f2d646f330d57eb4e87592e"}, + {file = "contourpy-1.3.0-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:d78ab28a03c854a873787a0a42254a0ccb3cb133c672f645c9f9c8f3ae9d0800"}, + {file = "contourpy-1.3.0-cp39-cp39-win32.whl", hash = "sha256:81cb5ed4952aae6014bc9d0421dec7c5835c9c8c31cdf51910b708f548cf58e5"}, + {file = "contourpy-1.3.0-cp39-cp39-win_amd64.whl", hash = "sha256:14e262f67bd7e6eb6880bc564dcda30b15e351a594657e55b7eec94b6ef72843"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:fe41b41505a5a33aeaed2a613dccaeaa74e0e3ead6dd6fd3a118fb471644fd6c"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eca7e17a65f72a5133bdbec9ecf22401c62bcf4821361ef7811faee695799779"}, + {file = "contourpy-1.3.0-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:1ec4dc6bf570f5b22ed0d7efba0dfa9c5b9e0431aeea7581aa217542d9e809a4"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-macosx_10_15_x86_64.whl", hash = "sha256:00ccd0dbaad6d804ab259820fa7cb0b8036bda0686ef844d24125d8287178ce0"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8ca947601224119117f7c19c9cdf6b3ab54c5726ef1d906aa4a69dfb6dd58102"}, + {file = "contourpy-1.3.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:c6ec93afeb848a0845a18989da3beca3eec2c0f852322efe21af1931147d12cb"}, + {file = "contourpy-1.3.0.tar.gz", hash = "sha256:7ffa0db17717a8ffb127efd0c95a4362d996b892c2904db72428d5b52e1938a4"}, +] + +[package.dependencies] +numpy = ">=1.23" + +[package.extras] +bokeh = ["bokeh", "selenium"] +docs = ["furo", "sphinx (>=7.2)", "sphinx-copybutton"] +mypy = ["contourpy[bokeh,docs]", "docutils-stubs", "mypy (==1.11.1)", "types-Pillow"] +test = ["Pillow", "contourpy[test-no-images]", "matplotlib"] +test-no-images = ["pytest", "pytest-cov", "pytest-rerunfailures", "pytest-xdist", "wurlitzer"] + [[package]] name = "cryptography" version = "43.0.1" @@ -746,33 +904,37 @@ tests = ["pytest", "pytest-cov", "pytest-xdist"] [[package]] name = "debugpy" -version = "1.8.6" +version = "1.8.7" description = "An implementation of the Debug Adapter Protocol for Python" optional = false python-versions = ">=3.8" files = [ - {file = "debugpy-1.8.6-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:30f467c5345d9dfdcc0afdb10e018e47f092e383447500f125b4e013236bf14b"}, - {file = "debugpy-1.8.6-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d73d8c52614432f4215d0fe79a7e595d0dd162b5c15233762565be2f014803b"}, - {file = "debugpy-1.8.6-cp310-cp310-win32.whl", hash = "sha256:e3e182cd98eac20ee23a00653503315085b29ab44ed66269482349d307b08df9"}, - {file = "debugpy-1.8.6-cp310-cp310-win_amd64.whl", hash = "sha256:e3a82da039cfe717b6fb1886cbbe5c4a3f15d7df4765af857f4307585121c2dd"}, - {file = "debugpy-1.8.6-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:67479a94cf5fd2c2d88f9615e087fcb4fec169ec780464a3f2ba4a9a2bb79955"}, - {file = "debugpy-1.8.6-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9fb8653f6cbf1dd0a305ac1aa66ec246002145074ea57933978346ea5afdf70b"}, - {file = "debugpy-1.8.6-cp311-cp311-win32.whl", hash = "sha256:cdaf0b9691879da2d13fa39b61c01887c34558d1ff6e5c30e2eb698f5384cd43"}, - {file = "debugpy-1.8.6-cp311-cp311-win_amd64.whl", hash = "sha256:43996632bee7435583952155c06881074b9a742a86cee74e701d87ca532fe833"}, - {file = "debugpy-1.8.6-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:db891b141fc6ee4b5fc6d1cc8035ec329cabc64bdd2ae672b4550c87d4ecb128"}, - {file = "debugpy-1.8.6-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:567419081ff67da766c898ccf21e79f1adad0e321381b0dfc7a9c8f7a9347972"}, - {file = "debugpy-1.8.6-cp312-cp312-win32.whl", hash = "sha256:c9834dfd701a1f6bf0f7f0b8b1573970ae99ebbeee68314116e0ccc5c78eea3c"}, - {file = "debugpy-1.8.6-cp312-cp312-win_amd64.whl", hash = "sha256:e4ce0570aa4aca87137890d23b86faeadf184924ad892d20c54237bcaab75d8f"}, - {file = "debugpy-1.8.6-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:df5dc9eb4ca050273b8e374a4cd967c43be1327eeb42bfe2f58b3cdfe7c68dcb"}, - {file = "debugpy-1.8.6-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0a85707c6a84b0c5b3db92a2df685b5230dd8fb8c108298ba4f11dba157a615a"}, - {file = "debugpy-1.8.6-cp38-cp38-win32.whl", hash = "sha256:538c6cdcdcdad310bbefd96d7850be1cd46e703079cc9e67d42a9ca776cdc8a8"}, - {file = "debugpy-1.8.6-cp38-cp38-win_amd64.whl", hash = "sha256:22140bc02c66cda6053b6eb56dfe01bbe22a4447846581ba1dd6df2c9f97982d"}, - {file = "debugpy-1.8.6-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:c1cef65cffbc96e7b392d9178dbfd524ab0750da6c0023c027ddcac968fd1caa"}, - {file = "debugpy-1.8.6-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f1e60bd06bb3cc5c0e957df748d1fab501e01416c43a7bdc756d2a992ea1b881"}, - {file = "debugpy-1.8.6-cp39-cp39-win32.whl", hash = "sha256:f7158252803d0752ed5398d291dee4c553bb12d14547c0e1843ab74ee9c31123"}, - {file = "debugpy-1.8.6-cp39-cp39-win_amd64.whl", hash = "sha256:3358aa619a073b620cd0d51d8a6176590af24abcc3fe2e479929a154bf591b51"}, - {file = "debugpy-1.8.6-py2.py3-none-any.whl", hash = "sha256:b48892df4d810eff21d3ef37274f4c60d32cdcafc462ad5647239036b0f0649f"}, - {file = "debugpy-1.8.6.zip", hash = "sha256:c931a9371a86784cee25dec8d65bc2dc7a21f3f1552e3833d9ef8f919d22280a"}, + {file = "debugpy-1.8.7-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:95fe04a573b8b22896c404365e03f4eda0ce0ba135b7667a1e57bd079793b96b"}, + {file = "debugpy-1.8.7-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:628a11f4b295ffb4141d8242a9bb52b77ad4a63a2ad19217a93be0f77f2c28c9"}, + {file = "debugpy-1.8.7-cp310-cp310-win32.whl", hash = "sha256:85ce9c1d0eebf622f86cc68618ad64bf66c4fc3197d88f74bb695a416837dd55"}, + {file = "debugpy-1.8.7-cp310-cp310-win_amd64.whl", hash = "sha256:29e1571c276d643757ea126d014abda081eb5ea4c851628b33de0c2b6245b037"}, + {file = "debugpy-1.8.7-cp311-cp311-macosx_14_0_universal2.whl", hash = "sha256:caf528ff9e7308b74a1749c183d6808ffbedbb9fb6af78b033c28974d9b8831f"}, + {file = "debugpy-1.8.7-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cba1d078cf2e1e0b8402e6bda528bf8fda7ccd158c3dba6c012b7897747c41a0"}, + {file = "debugpy-1.8.7-cp311-cp311-win32.whl", hash = "sha256:171899588bcd412151e593bd40d9907133a7622cd6ecdbdb75f89d1551df13c2"}, + {file = "debugpy-1.8.7-cp311-cp311-win_amd64.whl", hash = "sha256:6e1c4ffb0c79f66e89dfd97944f335880f0d50ad29525dc792785384923e2211"}, + {file = "debugpy-1.8.7-cp312-cp312-macosx_14_0_universal2.whl", hash = "sha256:4d27d842311353ede0ad572600c62e4bcd74f458ee01ab0dd3a1a4457e7e3706"}, + {file = "debugpy-1.8.7-cp312-cp312-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:703c1fd62ae0356e194f3e7b7a92acd931f71fe81c4b3be2c17a7b8a4b546ec2"}, + {file = "debugpy-1.8.7-cp312-cp312-win32.whl", hash = "sha256:2f729228430ef191c1e4df72a75ac94e9bf77413ce5f3f900018712c9da0aaca"}, + {file = "debugpy-1.8.7-cp312-cp312-win_amd64.whl", hash = "sha256:45c30aaefb3e1975e8a0258f5bbd26cd40cde9bfe71e9e5a7ac82e79bad64e39"}, + {file = "debugpy-1.8.7-cp313-cp313-macosx_14_0_universal2.whl", hash = "sha256:d050a1ec7e925f514f0f6594a1e522580317da31fbda1af71d1530d6ea1f2b40"}, + {file = "debugpy-1.8.7-cp313-cp313-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f2f4349a28e3228a42958f8ddaa6333d6f8282d5edaea456070e48609c5983b7"}, + {file = "debugpy-1.8.7-cp313-cp313-win32.whl", hash = "sha256:11ad72eb9ddb436afb8337891a986302e14944f0f755fd94e90d0d71e9100bba"}, + {file = "debugpy-1.8.7-cp313-cp313-win_amd64.whl", hash = "sha256:2efb84d6789352d7950b03d7f866e6d180284bc02c7e12cb37b489b7083d81aa"}, + {file = "debugpy-1.8.7-cp38-cp38-macosx_14_0_x86_64.whl", hash = "sha256:4b908291a1d051ef3331484de8e959ef3e66f12b5e610c203b5b75d2725613a7"}, + {file = "debugpy-1.8.7-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:da8df5b89a41f1fd31503b179d0a84a5fdb752dddd5b5388dbd1ae23cda31ce9"}, + {file = "debugpy-1.8.7-cp38-cp38-win32.whl", hash = "sha256:b12515e04720e9e5c2216cc7086d0edadf25d7ab7e3564ec8b4521cf111b4f8c"}, + {file = "debugpy-1.8.7-cp38-cp38-win_amd64.whl", hash = "sha256:93176e7672551cb5281577cdb62c63aadc87ec036f0c6a486f0ded337c504596"}, + {file = "debugpy-1.8.7-cp39-cp39-macosx_14_0_x86_64.whl", hash = "sha256:90d93e4f2db442f8222dec5ec55ccfc8005821028982f1968ebf551d32b28907"}, + {file = "debugpy-1.8.7-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b6db2a370e2700557a976eaadb16243ec9c91bd46f1b3bb15376d7aaa7632c81"}, + {file = "debugpy-1.8.7-cp39-cp39-win32.whl", hash = "sha256:a6cf2510740e0c0b4a40330640e4b454f928c7b99b0c9dbf48b11efba08a8cda"}, + {file = "debugpy-1.8.7-cp39-cp39-win_amd64.whl", hash = "sha256:6a9d9d6d31846d8e34f52987ee0f1a904c7baa4912bf4843ab39dadf9b8f3e0d"}, + {file = "debugpy-1.8.7-py2.py3-none-any.whl", hash = "sha256:57b00de1c8d2c84a61b90880f7e5b6deaf4c312ecbde3a0e8912f2a56c4ac9ae"}, + {file = "debugpy-1.8.7.zip", hash = "sha256:18b8f731ed3e2e1df8e9cdaa23fb1fc9c24e570cd0081625308ec51c82efe42e"}, ] [[package]] @@ -799,13 +961,13 @@ files = [ [[package]] name = "distlib" -version = "0.3.8" +version = "0.3.9" description = "Distribution utilities" optional = false python-versions = "*" files = [ - {file = "distlib-0.3.8-py2.py3-none-any.whl", hash = "sha256:034db59a0b96f8ca18035f36290806a9a6e6bd9d1ff91e45a7f172eb17e51784"}, - {file = "distlib-0.3.8.tar.gz", hash = "sha256:1530ea13e350031b6312d8580ddb6b27a104275a31106523b8f123787f494f64"}, + {file = "distlib-0.3.9-py2.py3-none-any.whl", hash = "sha256:47f8c22fd27c27e25a65601af709b38e4f0a45ea4fc2e710f65755fa8caaaf87"}, + {file = "distlib-0.3.9.tar.gz", hash = "sha256:a60f20dea646b8a33f3e7772f74dc0b2d0772d2837ee1342a00645c81edf9403"}, ] [[package]] @@ -1021,13 +1183,13 @@ test = ["coverage[toml]", "ddt (>=1.1.1,!=1.4.3)", "mock", "mypy", "pre-commit", [[package]] name = "griffe" -version = "1.3.1" +version = "1.4.0" description = "Signatures for entire Python programs. Extract the structure, the frame, the skeleton of your project, to generate API documentation or find breaking changes in your API." optional = false python-versions = ">=3.8" files = [ - {file = "griffe-1.3.1-py3-none-any.whl", hash = "sha256:940aeb630bc3054b4369567f150b6365be6f11eef46b0ed8623aea96e6d17b19"}, - {file = "griffe-1.3.1.tar.gz", hash = "sha256:3f86a716b631a4c0f96a43cb75d05d3c85975003c20540426c0eba3b0581c56a"}, + {file = "griffe-1.4.0-py3-none-any.whl", hash = "sha256:e589de8b8c137e99a46ec45f9598fc0ac5b6868ce824b24db09c02d117b89bc5"}, + {file = "griffe-1.4.0.tar.gz", hash = "sha256:8fccc585896d13f1221035d32c50dec65830c87d23f9adb9b1e6f3d63574f7f5"}, ] [package.dependencies] @@ -1047,13 +1209,13 @@ files = [ [[package]] name = "httpcore" -version = "1.0.5" +version = "1.0.6" description = "A minimal low-level HTTP client." optional = false python-versions = ">=3.8" files = [ - {file = "httpcore-1.0.5-py3-none-any.whl", hash = "sha256:421f18bac248b25d310f3cacd198d55b8e6125c107797b609ff9b7a6ba7991b5"}, - {file = "httpcore-1.0.5.tar.gz", hash = "sha256:34a38e2f9291467ee3b44e89dd52615370e152954ba21721378a87b2960f7a61"}, + {file = "httpcore-1.0.6-py3-none-any.whl", hash = "sha256:27b59625743b85577a8c0e10e55b50b5368a4f2cfe8cc7bcfa9cf00829c2682f"}, + {file = "httpcore-1.0.6.tar.gz", hash = "sha256:73f6dbd6eb8c21bbf7ef8efad555481853f5f6acdeaff1edb0694289269ee17f"}, ] [package.dependencies] @@ -1064,7 +1226,7 @@ h11 = ">=0.13,<0.15" asyncio = ["anyio (>=4.0,<5.0)"] http2 = ["h2 (>=3,<5)"] socks = ["socksio (==1.*)"] -trio = ["trio (>=0.22.0,<0.26.0)"] +trio = ["trio (>=0.22.0,<1.0)"] [[package]] name = "httpx" @@ -1320,21 +1482,25 @@ test = ["portend", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-c [[package]] name = "jaraco-functools" -version = "4.0.2" +version = "4.1.0" description = "Functools like those found in stdlib" optional = false python-versions = ">=3.8" files = [ - {file = "jaraco.functools-4.0.2-py3-none-any.whl", hash = "sha256:c9d16a3ed4ccb5a889ad8e0b7a343401ee5b2a71cee6ed192d3f68bc351e94e3"}, - {file = "jaraco_functools-4.0.2.tar.gz", hash = "sha256:3460c74cd0d32bf82b9576bbb3527c4364d5b27a21f5158a62aed6c4b42e23f5"}, + {file = "jaraco.functools-4.1.0-py3-none-any.whl", hash = "sha256:ad159f13428bc4acbf5541ad6dec511f91573b90fba04df61dafa2a1231cf649"}, + {file = "jaraco_functools-4.1.0.tar.gz", hash = "sha256:70f7e0e2ae076498e212562325e805204fc092d7b4c17e0e86c959e249701a9d"}, ] [package.dependencies] more-itertools = "*" [package.extras] +check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)"] +cover = ["pytest-cov"] doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] -test = ["jaraco.classes", "pytest (>=6,!=8.1.*)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy", "pytest-ruff (>=0.2.1)"] +enabler = ["pytest-enabler (>=2.2)"] +test = ["jaraco.classes", "pytest (>=6,!=8.1.*)"] +type = ["pytest-mypy"] [[package]] name = "jedi" @@ -1972,58 +2138,52 @@ files = [ [[package]] name = "matplotlib" -version = "3.7.5" +version = "3.7.2" description = "Python plotting package" optional = false python-versions = ">=3.8" files = [ - {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:4a87b69cb1cb20943010f63feb0b2901c17a3b435f75349fd9865713bfa63925"}, - {file = "matplotlib-3.7.5-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:d3ce45010fefb028359accebb852ca0c21bd77ec0f281952831d235228f15810"}, - {file = "matplotlib-3.7.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fbea1e762b28400393d71be1a02144aa16692a3c4c676ba0178ce83fc2928fdd"}, - {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec0e1adc0ad70ba8227e957551e25a9d2995e319c29f94a97575bb90fa1d4469"}, - {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6738c89a635ced486c8a20e20111d33f6398a9cbebce1ced59c211e12cd61455"}, - {file = "matplotlib-3.7.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1210b7919b4ed94b5573870f316bca26de3e3b07ffdb563e79327dc0e6bba515"}, - {file = "matplotlib-3.7.5-cp310-cp310-win32.whl", hash = "sha256:068ebcc59c072781d9dcdb82f0d3f1458271c2de7ca9c78f5bd672141091e9e1"}, - {file = "matplotlib-3.7.5-cp310-cp310-win_amd64.whl", hash = "sha256:f098ffbaab9df1e3ef04e5a5586a1e6b1791380698e84938d8640961c79b1fc0"}, - {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:f65342c147572673f02a4abec2d5a23ad9c3898167df9b47c149f32ce61ca078"}, - {file = "matplotlib-3.7.5-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:4ddf7fc0e0dc553891a117aa083039088d8a07686d4c93fb8a810adca68810af"}, - {file = "matplotlib-3.7.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0ccb830fc29442360d91be48527809f23a5dcaee8da5f4d9b2d5b867c1b087b8"}, - {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:efc6bb28178e844d1f408dd4d6341ee8a2e906fc9e0fa3dae497da4e0cab775d"}, - {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b15c4c2d374f249f324f46e883340d494c01768dd5287f8bc00b65b625ab56c"}, - {file = "matplotlib-3.7.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3d028555421912307845e59e3de328260b26d055c5dac9b182cc9783854e98fb"}, - {file = "matplotlib-3.7.5-cp311-cp311-win32.whl", hash = "sha256:fe184b4625b4052fa88ef350b815559dd90cc6cc8e97b62f966e1ca84074aafa"}, - {file = "matplotlib-3.7.5-cp311-cp311-win_amd64.whl", hash = "sha256:084f1f0f2f1010868c6f1f50b4e1c6f2fb201c58475494f1e5b66fed66093647"}, - {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_universal2.whl", hash = "sha256:34bceb9d8ddb142055ff27cd7135f539f2f01be2ce0bafbace4117abe58f8fe4"}, - {file = "matplotlib-3.7.5-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:c5a2134162273eb8cdfd320ae907bf84d171de948e62180fa372a3ca7cf0f433"}, - {file = "matplotlib-3.7.5-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:039ad54683a814002ff37bf7981aa1faa40b91f4ff84149beb53d1eb64617980"}, - {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4d742ccd1b09e863b4ca58291728db645b51dab343eebb08d5d4b31b308296ce"}, - {file = "matplotlib-3.7.5-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:743b1c488ca6a2bc7f56079d282e44d236bf375968bfd1b7ba701fd4d0fa32d6"}, - {file = "matplotlib-3.7.5-cp312-cp312-win_amd64.whl", hash = "sha256:fbf730fca3e1f23713bc1fae0a57db386e39dc81ea57dc305c67f628c1d7a342"}, - {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:cfff9b838531698ee40e40ea1a8a9dc2c01edb400b27d38de6ba44c1f9a8e3d2"}, - {file = "matplotlib-3.7.5-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:1dbcca4508bca7847fe2d64a05b237a3dcaec1f959aedb756d5b1c67b770c5ee"}, - {file = "matplotlib-3.7.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4cdf4ef46c2a1609a50411b66940b31778db1e4b73d4ecc2eaa40bd588979b13"}, - {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:167200ccfefd1674b60e957186dfd9baf58b324562ad1a28e5d0a6b3bea77905"}, - {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:53e64522934df6e1818b25fd48cf3b645b11740d78e6ef765fbb5fa5ce080d02"}, - {file = "matplotlib-3.7.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d3e3bc79b2d7d615067bd010caff9243ead1fc95cf735c16e4b2583173f717eb"}, - {file = "matplotlib-3.7.5-cp38-cp38-win32.whl", hash = "sha256:6b641b48c6819726ed47c55835cdd330e53747d4efff574109fd79b2d8a13748"}, - {file = "matplotlib-3.7.5-cp38-cp38-win_amd64.whl", hash = "sha256:f0b60993ed3488b4532ec6b697059897891927cbfc2b8d458a891b60ec03d9d7"}, - {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:090964d0afaff9c90e4d8de7836757e72ecfb252fb02884016d809239f715651"}, - {file = "matplotlib-3.7.5-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:9fc6fcfbc55cd719bc0bfa60bde248eb68cf43876d4c22864603bdd23962ba25"}, - {file = "matplotlib-3.7.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:5e7cc3078b019bb863752b8b60e8b269423000f1603cb2299608231996bd9d54"}, - {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1e4e9a868e8163abaaa8259842d85f949a919e1ead17644fb77a60427c90473c"}, - {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:fa7ebc995a7d747dacf0a717d0eb3aa0f0c6a0e9ea88b0194d3a3cd241a1500f"}, - {file = "matplotlib-3.7.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3785bfd83b05fc0e0c2ae4c4a90034fe693ef96c679634756c50fe6efcc09856"}, - {file = "matplotlib-3.7.5-cp39-cp39-win32.whl", hash = "sha256:29b058738c104d0ca8806395f1c9089dfe4d4f0f78ea765c6c704469f3fffc81"}, - {file = "matplotlib-3.7.5-cp39-cp39-win_amd64.whl", hash = "sha256:fd4028d570fa4b31b7b165d4a685942ae9cdc669f33741e388c01857d9723eab"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:2a9a3f4d6a7f88a62a6a18c7e6a84aedcaf4faf0708b4ca46d87b19f1b526f88"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b9b3fd853d4a7f008a938df909b96db0b454225f935d3917520305b90680579c"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f0ad550da9f160737d7890217c5eeed4337d07e83ca1b2ca6535078f354e7675"}, - {file = "matplotlib-3.7.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:20da7924a08306a861b3f2d1da0d1aa9a6678e480cf8eacffe18b565af2813e7"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:b45c9798ea6bb920cb77eb7306409756a7fab9db9b463e462618e0559aecb30e"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a99866267da1e561c7776fe12bf4442174b79aac1a47bd7e627c7e4d077ebd83"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2b6aa62adb6c268fc87d80f963aca39c64615c31830b02697743c95590ce3fbb"}, - {file = "matplotlib-3.7.5-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:e530ab6a0afd082d2e9c17eb1eb064a63c5b09bb607b2b74fa41adbe3e162286"}, - {file = "matplotlib-3.7.5.tar.gz", hash = "sha256:1e5c971558ebc811aa07f54c7b7c677d78aa518ef4c390e14673a09e0860184a"}, + {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_universal2.whl", hash = "sha256:2699f7e73a76d4c110f4f25be9d2496d6ab4f17345307738557d345f099e07de"}, + {file = "matplotlib-3.7.2-cp310-cp310-macosx_10_12_x86_64.whl", hash = "sha256:a8035ba590658bae7562786c9cc6ea1a84aa49d3afab157e414c9e2ea74f496d"}, + {file = "matplotlib-3.7.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2f8e4a49493add46ad4a8c92f63e19d548b2b6ebbed75c6b4c7f46f57d36cdd1"}, + {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:71667eb2ccca4c3537d9414b1bc00554cb7f91527c17ee4ec38027201f8f1603"}, + {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:152ee0b569a37630d8628534c628456b28686e085d51394da6b71ef84c4da201"}, + {file = "matplotlib-3.7.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:070f8dddd1f5939e60aacb8fa08f19551f4b0140fab16a3669d5cd6e9cb28fc8"}, + {file = "matplotlib-3.7.2-cp310-cp310-win32.whl", hash = "sha256:fdbb46fad4fb47443b5b8ac76904b2e7a66556844f33370861b4788db0f8816a"}, + {file = "matplotlib-3.7.2-cp310-cp310-win_amd64.whl", hash = "sha256:23fb1750934e5f0128f9423db27c474aa32534cec21f7b2153262b066a581fd1"}, + {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_universal2.whl", hash = "sha256:30e1409b857aa8a747c5d4f85f63a79e479835f8dffc52992ac1f3f25837b544"}, + {file = "matplotlib-3.7.2-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:50e0a55ec74bf2d7a0ebf50ac580a209582c2dd0f7ab51bc270f1b4a0027454e"}, + {file = "matplotlib-3.7.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ac60daa1dc83e8821eed155796b0f7888b6b916cf61d620a4ddd8200ac70cd64"}, + {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:305e3da477dc8607336ba10bac96986d6308d614706cae2efe7d3ffa60465b24"}, + {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c308b255efb9b06b23874236ec0f10f026673ad6515f602027cc8ac7805352d"}, + {file = "matplotlib-3.7.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60c521e21031632aa0d87ca5ba0c1c05f3daacadb34c093585a0be6780f698e4"}, + {file = "matplotlib-3.7.2-cp311-cp311-win32.whl", hash = "sha256:26bede320d77e469fdf1bde212de0ec889169b04f7f1179b8930d66f82b30cbc"}, + {file = "matplotlib-3.7.2-cp311-cp311-win_amd64.whl", hash = "sha256:af4860132c8c05261a5f5f8467f1b269bf1c7c23902d75f2be57c4a7f2394b3e"}, + {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_universal2.whl", hash = "sha256:a1733b8e84e7e40a9853e505fe68cc54339f97273bdfe6f3ed980095f769ddc7"}, + {file = "matplotlib-3.7.2-cp38-cp38-macosx_10_12_x86_64.whl", hash = "sha256:d9881356dc48e58910c53af82b57183879129fa30492be69058c5b0d9fddf391"}, + {file = "matplotlib-3.7.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f081c03f413f59390a80b3e351cc2b2ea0205839714dbc364519bcf51f4b56ca"}, + {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:1cd120fca3407a225168238b790bd5c528f0fafde6172b140a2f3ab7a4ea63e9"}, + {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a2c1590b90aa7bd741b54c62b78de05d4186271e34e2377e0289d943b3522273"}, + {file = "matplotlib-3.7.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6d2ff3c984b8a569bc1383cd468fc06b70d7b59d5c2854ca39f1436ae8394117"}, + {file = "matplotlib-3.7.2-cp38-cp38-win32.whl", hash = "sha256:5dea00b62d28654b71ca92463656d80646675628d0828e08a5f3b57e12869e13"}, + {file = "matplotlib-3.7.2-cp38-cp38-win_amd64.whl", hash = "sha256:0f506a1776ee94f9e131af1ac6efa6e5bc7cb606a3e389b0ccb6e657f60bb676"}, + {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_universal2.whl", hash = "sha256:6515e878f91894c2e4340d81f0911857998ccaf04dbc1bba781e3d89cbf70608"}, + {file = "matplotlib-3.7.2-cp39-cp39-macosx_10_12_x86_64.whl", hash = "sha256:71f7a8c6b124e904db550f5b9fe483d28b896d4135e45c4ea381ad3b8a0e3256"}, + {file = "matplotlib-3.7.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:12f01b92ecd518e0697da4d97d163b2b3aa55eb3eb4e2c98235b3396d7dad55f"}, + {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a7e28d6396563955f7af437894a36bf2b279462239a41028323e04b85179058b"}, + {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dbcf59334ff645e6a67cd5f78b4b2cdb76384cdf587fa0d2dc85f634a72e1a3e"}, + {file = "matplotlib-3.7.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:318c89edde72ff95d8df67d82aca03861240512994a597a435a1011ba18dbc7f"}, + {file = "matplotlib-3.7.2-cp39-cp39-win32.whl", hash = "sha256:ce55289d5659b5b12b3db4dc9b7075b70cef5631e56530f14b2945e8836f2d20"}, + {file = "matplotlib-3.7.2-cp39-cp39-win_amd64.whl", hash = "sha256:2ecb5be2b2815431c81dc115667e33da0f5a1bcf6143980d180d09a717c4a12e"}, + {file = "matplotlib-3.7.2-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:fdcd28360dbb6203fb5219b1a5658df226ac9bebc2542a9e8f457de959d713d0"}, + {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0c3cca3e842b11b55b52c6fb8bd6a4088693829acbfcdb3e815fa9b7d5c92c1b"}, + {file = "matplotlib-3.7.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebf577c7a6744e9e1bd3fee45fc74a02710b214f94e2bde344912d85e0c9af7c"}, + {file = "matplotlib-3.7.2-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:936bba394682049919dda062d33435b3be211dc3dcaa011e09634f060ec878b2"}, + {file = "matplotlib-3.7.2-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:bc221ffbc2150458b1cd71cdd9ddd5bb37962b036e41b8be258280b5b01da1dd"}, + {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:35d74ebdb3f71f112b36c2629cf32323adfbf42679e2751252acd468f5001c07"}, + {file = "matplotlib-3.7.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:717157e61b3a71d3d26ad4e1770dc85156c9af435659a25ee6407dc866cb258d"}, + {file = "matplotlib-3.7.2-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:20f844d6be031948148ba49605c8b96dfe7d3711d1b63592830d650622458c11"}, + {file = "matplotlib-3.7.2.tar.gz", hash = "sha256:a8cdb91dddb04436bd2f098b8fdf4b81352e68cf4d2c6756fcc414791076569b"}, ] [package.dependencies] @@ -2032,10 +2192,10 @@ cycler = ">=0.10" fonttools = ">=4.22.0" importlib-resources = {version = ">=3.2.0", markers = "python_version < \"3.10\""} kiwisolver = ">=1.0.1" -numpy = ">=1.20,<2" +numpy = ">=1.20" packaging = ">=20.0" pillow = ">=6.2.0" -pyparsing = ">=2.3.1" +pyparsing = ">=2.3.1,<3.1" python-dateutil = ">=2.7" [[package]] @@ -2211,13 +2371,13 @@ requests = "*" [[package]] name = "mkdocs-git-revision-date-localized-plugin" -version = "1.2.9" +version = "1.3.0" description = "Mkdocs plugin that enables displaying the localized date of the last git modification of a markdown file." optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_git_revision_date_localized_plugin-1.2.9-py3-none-any.whl", hash = "sha256:dea5c8067c23df30275702a1708885500fadf0abfb595b60e698bffc79c7a423"}, - {file = "mkdocs_git_revision_date_localized_plugin-1.2.9.tar.gz", hash = "sha256:df9a50873fba3a42ce9123885f8c53d589e90ef6c2443fe3280ef1e8d33c8f65"}, + {file = "mkdocs_git_revision_date_localized_plugin-1.3.0-py3-none-any.whl", hash = "sha256:c99377ee119372d57a9e47cff4e68f04cce634a74831c06bc89b33e456e840a1"}, + {file = "mkdocs_git_revision_date_localized_plugin-1.3.0.tar.gz", hash = "sha256:439e2f14582204050a664c258861c325064d97cdc848c541e48bb034a6c4d0cb"}, ] [package.dependencies] @@ -2252,13 +2412,13 @@ pygments = ">2.12.0" [[package]] name = "mkdocs-material" -version = "9.5.39" +version = "9.5.44" description = "Documentation that simply works" optional = false python-versions = ">=3.8" files = [ - {file = "mkdocs_material-9.5.39-py3-none-any.whl", hash = "sha256:0f2f68c8db89523cb4a59705cd01b4acd62b2f71218ccb67e1e004e560410d2b"}, - {file = "mkdocs_material-9.5.39.tar.gz", hash = "sha256:25faa06142afa38549d2b781d475a86fb61de93189f532b88e69bf11e5e5c3be"}, + {file = "mkdocs_material-9.5.44-py3-none-any.whl", hash = "sha256:47015f9c167d58a5ff5e682da37441fc4d66a1c79334bfc08d774763cacf69ca"}, + {file = "mkdocs_material-9.5.44.tar.gz", hash = "sha256:f3a6c968e524166b3f3ed1fb97d3ed3e0091183b0545cedf7156a2a6804c56c0"}, ] [package.dependencies] @@ -2349,38 +2509,43 @@ files = [ [[package]] name = "mypy" -version = "1.11.2" +version = "1.13.0" description = "Optional static typing for Python" optional = false python-versions = ">=3.8" files = [ - {file = "mypy-1.11.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d42a6dd818ffce7be66cce644f1dff482f1d97c53ca70908dff0b9ddc120b77a"}, - {file = "mypy-1.11.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:801780c56d1cdb896eacd5619a83e427ce436d86a3bdf9112527f24a66618fef"}, - {file = "mypy-1.11.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:41ea707d036a5307ac674ea172875f40c9d55c5394f888b168033177fce47383"}, - {file = "mypy-1.11.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:6e658bd2d20565ea86da7d91331b0eed6d2eee22dc031579e6297f3e12c758c8"}, - {file = "mypy-1.11.2-cp310-cp310-win_amd64.whl", hash = "sha256:478db5f5036817fe45adb7332d927daa62417159d49783041338921dcf646fc7"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:75746e06d5fa1e91bfd5432448d00d34593b52e7e91a187d981d08d1f33d4385"}, - {file = "mypy-1.11.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a976775ab2256aadc6add633d44f100a2517d2388906ec4f13231fafbb0eccca"}, - {file = "mypy-1.11.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:cd953f221ac1379050a8a646585a29574488974f79d8082cedef62744f0a0104"}, - {file = "mypy-1.11.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:57555a7715c0a34421013144a33d280e73c08df70f3a18a552938587ce9274f4"}, - {file = "mypy-1.11.2-cp311-cp311-win_amd64.whl", hash = "sha256:36383a4fcbad95f2657642a07ba22ff797de26277158f1cc7bd234821468b1b6"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:e8960dbbbf36906c5c0b7f4fbf2f0c7ffb20f4898e6a879fcf56a41a08b0d318"}, - {file = "mypy-1.11.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:06d26c277962f3fb50e13044674aa10553981ae514288cb7d0a738f495550b36"}, - {file = "mypy-1.11.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:6e7184632d89d677973a14d00ae4d03214c8bc301ceefcdaf5c474866814c987"}, - {file = "mypy-1.11.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:3a66169b92452f72117e2da3a576087025449018afc2d8e9bfe5ffab865709ca"}, - {file = "mypy-1.11.2-cp312-cp312-win_amd64.whl", hash = "sha256:969ea3ef09617aff826885a22ece0ddef69d95852cdad2f60c8bb06bf1f71f70"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:37c7fa6121c1cdfcaac97ce3d3b5588e847aa79b580c1e922bb5d5d2902df19b"}, - {file = "mypy-1.11.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4a8a53bc3ffbd161b5b2a4fff2f0f1e23a33b0168f1c0778ec70e1a3d66deb86"}, - {file = "mypy-1.11.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:2ff93107f01968ed834f4256bc1fc4475e2fecf6c661260066a985b52741ddce"}, - {file = "mypy-1.11.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:edb91dded4df17eae4537668b23f0ff6baf3707683734b6a818d5b9d0c0c31a1"}, - {file = "mypy-1.11.2-cp38-cp38-win_amd64.whl", hash = "sha256:ee23de8530d99b6db0573c4ef4bd8f39a2a6f9b60655bf7a1357e585a3486f2b"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:801ca29f43d5acce85f8e999b1e431fb479cb02d0e11deb7d2abb56bdaf24fd6"}, - {file = "mypy-1.11.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:af8d155170fcf87a2afb55b35dc1a0ac21df4431e7d96717621962e4b9192e70"}, - {file = "mypy-1.11.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:f7821776e5c4286b6a13138cc935e2e9b6fde05e081bdebf5cdb2bb97c9df81d"}, - {file = "mypy-1.11.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:539c570477a96a4e6fb718b8d5c3e0c0eba1f485df13f86d2970c91f0673148d"}, - {file = "mypy-1.11.2-cp39-cp39-win_amd64.whl", hash = "sha256:3f14cd3d386ac4d05c5a39a51b84387403dadbd936e17cb35882134d4f8f0d24"}, - {file = "mypy-1.11.2-py3-none-any.whl", hash = "sha256:b499bc07dbdcd3de92b0a8b29fdf592c111276f6a12fe29c30f6c417dd546d12"}, - {file = "mypy-1.11.2.tar.gz", hash = "sha256:7f9993ad3e0ffdc95c2a14b66dee63729f021968bff8ad911867579c65d13a79"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6607e0f1dd1fb7f0aca14d936d13fd19eba5e17e1cd2a14f808fa5f8f6d8f60a"}, + {file = "mypy-1.13.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8a21be69bd26fa81b1f80a61ee7ab05b076c674d9b18fb56239d72e21d9f4c80"}, + {file = "mypy-1.13.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7b2353a44d2179846a096e25691d54d59904559f4232519d420d64da6828a3a7"}, + {file = "mypy-1.13.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:0730d1c6a2739d4511dc4253f8274cdd140c55c32dfb0a4cf8b7a43f40abfa6f"}, + {file = "mypy-1.13.0-cp310-cp310-win_amd64.whl", hash = "sha256:c5fc54dbb712ff5e5a0fca797e6e0aa25726c7e72c6a5850cfd2adbc1eb0a372"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:581665e6f3a8a9078f28d5502f4c334c0c8d802ef55ea0e7276a6e409bc0d82d"}, + {file = "mypy-1.13.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3ddb5b9bf82e05cc9a627e84707b528e5c7caaa1c55c69e175abb15a761cec2d"}, + {file = "mypy-1.13.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:20c7ee0bc0d5a9595c46f38beb04201f2620065a93755704e141fcac9f59db2b"}, + {file = "mypy-1.13.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3790ded76f0b34bc9c8ba4def8f919dd6a46db0f5a6610fb994fe8efdd447f73"}, + {file = "mypy-1.13.0-cp311-cp311-win_amd64.whl", hash = "sha256:51f869f4b6b538229c1d1bcc1dd7d119817206e2bc54e8e374b3dfa202defcca"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:5c7051a3461ae84dfb5dd15eff5094640c61c5f22257c8b766794e6dd85e72d5"}, + {file = "mypy-1.13.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:39bb21c69a5d6342f4ce526e4584bc5c197fd20a60d14a8624d8743fffb9472e"}, + {file = "mypy-1.13.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:164f28cb9d6367439031f4c81e84d3ccaa1e19232d9d05d37cb0bd880d3f93c2"}, + {file = "mypy-1.13.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:a4c1bfcdbce96ff5d96fc9b08e3831acb30dc44ab02671eca5953eadad07d6d0"}, + {file = "mypy-1.13.0-cp312-cp312-win_amd64.whl", hash = "sha256:a0affb3a79a256b4183ba09811e3577c5163ed06685e4d4b46429a271ba174d2"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a7b44178c9760ce1a43f544e595d35ed61ac2c3de306599fa59b38a6048e1aa7"}, + {file = "mypy-1.13.0-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:5d5092efb8516d08440e36626f0153b5006d4088c1d663d88bf79625af3d1d62"}, + {file = "mypy-1.13.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:de2904956dac40ced10931ac967ae63c5089bd498542194b436eb097a9f77bc8"}, + {file = "mypy-1.13.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:7bfd8836970d33c2105562650656b6846149374dc8ed77d98424b40b09340ba7"}, + {file = "mypy-1.13.0-cp313-cp313-win_amd64.whl", hash = "sha256:9f73dba9ec77acb86457a8fc04b5239822df0c14a082564737833d2963677dbc"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:100fac22ce82925f676a734af0db922ecfea991e1d7ec0ceb1e115ebe501301a"}, + {file = "mypy-1.13.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7bcb0bb7f42a978bb323a7c88f1081d1b5dee77ca86f4100735a6f541299d8fb"}, + {file = "mypy-1.13.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:bde31fc887c213e223bbfc34328070996061b0833b0a4cfec53745ed61f3519b"}, + {file = "mypy-1.13.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:07de989f89786f62b937851295ed62e51774722e5444a27cecca993fc3f9cd74"}, + {file = "mypy-1.13.0-cp38-cp38-win_amd64.whl", hash = "sha256:4bde84334fbe19bad704b3f5b78c4abd35ff1026f8ba72b29de70dda0916beb6"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:0246bcb1b5de7f08f2826451abd947bf656945209b140d16ed317f65a17dc7dc"}, + {file = "mypy-1.13.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7f5b7deae912cf8b77e990b9280f170381fdfbddf61b4ef80927edd813163732"}, + {file = "mypy-1.13.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:7029881ec6ffb8bc233a4fa364736789582c738217b133f1b55967115288a2bc"}, + {file = "mypy-1.13.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:3e38b980e5681f28f033f3be86b099a247b13c491f14bb8b1e1e134d23bb599d"}, + {file = "mypy-1.13.0-cp39-cp39-win_amd64.whl", hash = "sha256:a6789be98a2017c912ae6ccb77ea553bbaf13d27605d2ca20a76dfbced631b24"}, + {file = "mypy-1.13.0-py3-none-any.whl", hash = "sha256:9c250883f9fd81d212e0952c92dbfcc96fc237f4b7c92f56ac81fd48460b3e5a"}, + {file = "mypy-1.13.0.tar.gz", hash = "sha256:0291a61b6fbf3e6673e3405cfcc0e7650bebc7939659fdca2702958038bd835e"}, ] [package.dependencies] @@ -2390,6 +2555,7 @@ typing-extensions = ">=4.6.0" [package.extras] dmypy = ["psutil (>=4.0)"] +faster-cache = ["orjson"] install-types = ["pip"] mypyc = ["setuptools (>=50)"] reports = ["lxml"] @@ -2612,47 +2778,64 @@ files = [ [[package]] name = "numpy" -version = "1.26.4" +version = "2.1.2" description = "Fundamental package for array computing in Python" optional = false -python-versions = ">=3.9" -files = [ - {file = "numpy-1.26.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ff0f4f29c51e2803569d7a51c2304de5554655a60c5d776e35b4a41413830d0"}, - {file = "numpy-1.26.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2e4ee3380d6de9c9ec04745830fd9e2eccb3e6cf790d39d7b98ffd19b0dd754a"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d209d8969599b27ad20994c8e41936ee0964e6da07478d6c35016bc386b66ad4"}, - {file = "numpy-1.26.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ffa75af20b44f8dba823498024771d5ac50620e6915abac414251bd971b4529f"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:62b8e4b1e28009ef2846b4c7852046736bab361f7aeadeb6a5b89ebec3c7055a"}, - {file = "numpy-1.26.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a4abb4f9001ad2858e7ac189089c42178fcce737e4169dc61321660f1a96c7d2"}, - {file = "numpy-1.26.4-cp310-cp310-win32.whl", hash = "sha256:bfe25acf8b437eb2a8b2d49d443800a5f18508cd811fea3181723922a8a82b07"}, - {file = "numpy-1.26.4-cp310-cp310-win_amd64.whl", hash = "sha256:b97fe8060236edf3662adfc2c633f56a08ae30560c56310562cb4f95500022d5"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:4c66707fabe114439db9068ee468c26bbdf909cac0fb58686a42a24de1760c71"}, - {file = "numpy-1.26.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:edd8b5fe47dab091176d21bb6de568acdd906d1887a4584a15a9a96a1dca06ef"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7ab55401287bfec946ced39700c053796e7cc0e3acbef09993a9ad2adba6ca6e"}, - {file = "numpy-1.26.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:666dbfb6ec68962c033a450943ded891bed2d54e6755e35e5835d63f4f6931d5"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:96ff0b2ad353d8f990b63294c8986f1ec3cb19d749234014f4e7eb0112ceba5a"}, - {file = "numpy-1.26.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:60dedbb91afcbfdc9bc0b1f3f402804070deed7392c23eb7a7f07fa857868e8a"}, - {file = "numpy-1.26.4-cp311-cp311-win32.whl", hash = "sha256:1af303d6b2210eb850fcf03064d364652b7120803a0b872f5211f5234b399f20"}, - {file = "numpy-1.26.4-cp311-cp311-win_amd64.whl", hash = "sha256:cd25bcecc4974d09257ffcd1f098ee778f7834c3ad767fe5db785be9a4aa9cb2"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:b3ce300f3644fb06443ee2222c2201dd3a89ea6040541412b8fa189341847218"}, - {file = "numpy-1.26.4-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:03a8c78d01d9781b28a6989f6fa1bb2c4f2d51201cf99d3dd875df6fbd96b23b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9fad7dcb1aac3c7f0584a5a8133e3a43eeb2fe127f47e3632d43d677c66c102b"}, - {file = "numpy-1.26.4-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:675d61ffbfa78604709862923189bad94014bef562cc35cf61d3a07bba02a7ed"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:ab47dbe5cc8210f55aa58e4805fe224dac469cde56b9f731a4c098b91917159a"}, - {file = "numpy-1.26.4-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:1dda2e7b4ec9dd512f84935c5f126c8bd8b9f2fc001e9f54af255e8c5f16b0e0"}, - {file = "numpy-1.26.4-cp312-cp312-win32.whl", hash = "sha256:50193e430acfc1346175fcbdaa28ffec49947a06918b7b92130744e81e640110"}, - {file = "numpy-1.26.4-cp312-cp312-win_amd64.whl", hash = "sha256:08beddf13648eb95f8d867350f6a018a4be2e5ad54c8d8caed89ebca558b2818"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:7349ab0fa0c429c82442a27a9673fc802ffdb7c7775fad780226cb234965e53c"}, - {file = "numpy-1.26.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52b8b60467cd7dd1e9ed082188b4e6bb35aa5cdd01777621a1658910745b90be"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d5241e0a80d808d70546c697135da2c613f30e28251ff8307eb72ba696945764"}, - {file = "numpy-1.26.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f870204a840a60da0b12273ef34f7051e98c3b5961b61b0c2c1be6dfd64fbcd3"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:679b0076f67ecc0138fd2ede3a8fd196dddc2ad3254069bcb9faf9a79b1cebcd"}, - {file = "numpy-1.26.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:47711010ad8555514b434df65f7d7b076bb8261df1ca9bb78f53d3b2db02e95c"}, - {file = "numpy-1.26.4-cp39-cp39-win32.whl", hash = "sha256:a354325ee03388678242a4d7ebcd08b5c727033fcff3b2f536aea978e15ee9e6"}, - {file = "numpy-1.26.4-cp39-cp39-win_amd64.whl", hash = "sha256:3373d5d70a5fe74a2c1bb6d2cfd9609ecf686d47a2d7b1d37a8f3b6bf6003aea"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:afedb719a9dcfc7eaf2287b839d8198e06dcd4cb5d276a3df279231138e83d30"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:95a7476c59002f2f6c590b9b7b998306fba6a5aa646b1e22ddfeaf8f78c3a29c"}, - {file = "numpy-1.26.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:7e50d0a0cc3189f9cb0aeb3a6a6af18c16f59f004b866cd2be1c14b36134a4a0"}, - {file = "numpy-1.26.4.tar.gz", hash = "sha256:2a02aba9ed12e4ac4eb3ea9421c420301a0c6460d9830d74a9df87efa4912010"}, +python-versions = ">=3.10" +files = [ + {file = "numpy-2.1.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:30d53720b726ec36a7f88dc873f0eec8447fbc93d93a8f079dfac2629598d6ee"}, + {file = "numpy-2.1.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e8d3ca0a72dd8846eb6f7dfe8f19088060fcb76931ed592d29128e0219652884"}, + {file = "numpy-2.1.2-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:fc44e3c68ff00fd991b59092a54350e6e4911152682b4782f68070985aa9e648"}, + {file = "numpy-2.1.2-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:7c1c60328bd964b53f8b835df69ae8198659e2b9302ff9ebb7de4e5a5994db3d"}, + {file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6cdb606a7478f9ad91c6283e238544451e3a95f30fb5467fbf715964341a8a86"}, + {file = "numpy-2.1.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d666cb72687559689e9906197e3bec7b736764df6a2e58ee265e360663e9baf7"}, + {file = "numpy-2.1.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c6eef7a2dbd0abfb0d9eaf78b73017dbfd0b54051102ff4e6a7b2980d5ac1a03"}, + {file = "numpy-2.1.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:12edb90831ff481f7ef5f6bc6431a9d74dc0e5ff401559a71e5e4611d4f2d466"}, + {file = "numpy-2.1.2-cp310-cp310-win32.whl", hash = "sha256:a65acfdb9c6ebb8368490dbafe83c03c7e277b37e6857f0caeadbbc56e12f4fb"}, + {file = "numpy-2.1.2-cp310-cp310-win_amd64.whl", hash = "sha256:860ec6e63e2c5c2ee5e9121808145c7bf86c96cca9ad396c0bd3e0f2798ccbe2"}, + {file = "numpy-2.1.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b42a1a511c81cc78cbc4539675713bbcf9d9c3913386243ceff0e9429ca892fe"}, + {file = "numpy-2.1.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:faa88bc527d0f097abdc2c663cddf37c05a1c2f113716601555249805cf573f1"}, + {file = "numpy-2.1.2-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:c82af4b2ddd2ee72d1fc0c6695048d457e00b3582ccde72d8a1c991b808bb20f"}, + {file = "numpy-2.1.2-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:13602b3174432a35b16c4cfb5de9a12d229727c3dd47a6ce35111f2ebdf66ff4"}, + {file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1ebec5fd716c5a5b3d8dfcc439be82a8407b7b24b230d0ad28a81b61c2f4659a"}, + {file = "numpy-2.1.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e2b49c3c0804e8ecb05d59af8386ec2f74877f7ca8fd9c1e00be2672e4d399b1"}, + {file = "numpy-2.1.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2cbba4b30bf31ddbe97f1c7205ef976909a93a66bb1583e983adbd155ba72ac2"}, + {file = "numpy-2.1.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:8e00ea6fc82e8a804433d3e9cedaa1051a1422cb6e443011590c14d2dea59146"}, + {file = "numpy-2.1.2-cp311-cp311-win32.whl", hash = "sha256:5006b13a06e0b38d561fab5ccc37581f23c9511879be7693bd33c7cd15ca227c"}, + {file = "numpy-2.1.2-cp311-cp311-win_amd64.whl", hash = "sha256:f1eb068ead09f4994dec71c24b2844f1e4e4e013b9629f812f292f04bd1510d9"}, + {file = "numpy-2.1.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:d7bf0a4f9f15b32b5ba53147369e94296f5fffb783db5aacc1be15b4bf72f43b"}, + {file = "numpy-2.1.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:b1d0fcae4f0949f215d4632be684a539859b295e2d0cb14f78ec231915d644db"}, + {file = "numpy-2.1.2-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:f751ed0a2f250541e19dfca9f1eafa31a392c71c832b6bb9e113b10d050cb0f1"}, + {file = "numpy-2.1.2-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:bd33f82e95ba7ad632bc57837ee99dba3d7e006536200c4e9124089e1bf42426"}, + {file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1b8cde4f11f0a975d1fd59373b32e2f5a562ade7cde4f85b7137f3de8fbb29a0"}, + {file = "numpy-2.1.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d95f286b8244b3649b477ac066c6906fbb2905f8ac19b170e2175d3d799f4df"}, + {file = "numpy-2.1.2-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:ab4754d432e3ac42d33a269c8567413bdb541689b02d93788af4131018cbf366"}, + {file = "numpy-2.1.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:e585c8ae871fd38ac50598f4763d73ec5497b0de9a0ab4ef5b69f01c6a046142"}, + {file = "numpy-2.1.2-cp312-cp312-win32.whl", hash = "sha256:9c6c754df29ce6a89ed23afb25550d1c2d5fdb9901d9c67a16e0b16eaf7e2550"}, + {file = "numpy-2.1.2-cp312-cp312-win_amd64.whl", hash = "sha256:456e3b11cb79ac9946c822a56346ec80275eaf2950314b249b512896c0d2505e"}, + {file = "numpy-2.1.2-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:a84498e0d0a1174f2b3ed769b67b656aa5460c92c9554039e11f20a05650f00d"}, + {file = "numpy-2.1.2-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:4d6ec0d4222e8ffdab1744da2560f07856421b367928026fb540e1945f2eeeaf"}, + {file = "numpy-2.1.2-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:259ec80d54999cc34cd1eb8ded513cb053c3bf4829152a2e00de2371bd406f5e"}, + {file = "numpy-2.1.2-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:675c741d4739af2dc20cd6c6a5c4b7355c728167845e3c6b0e824e4e5d36a6c3"}, + {file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:05b2d4e667895cc55e3ff2b56077e4c8a5604361fc21a042845ea3ad67465aa8"}, + {file = "numpy-2.1.2-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:43cca367bf94a14aca50b89e9bc2061683116cfe864e56740e083392f533ce7a"}, + {file = "numpy-2.1.2-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:76322dcdb16fccf2ac56f99048af32259dcc488d9b7e25b51e5eca5147a3fb98"}, + {file = "numpy-2.1.2-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:32e16a03138cabe0cb28e1007ee82264296ac0983714094380b408097a418cfe"}, + {file = "numpy-2.1.2-cp313-cp313-win32.whl", hash = "sha256:242b39d00e4944431a3cd2db2f5377e15b5785920421993770cddb89992c3f3a"}, + {file = "numpy-2.1.2-cp313-cp313-win_amd64.whl", hash = "sha256:f2ded8d9b6f68cc26f8425eda5d3877b47343e68ca23d0d0846f4d312ecaa445"}, + {file = "numpy-2.1.2-cp313-cp313t-macosx_10_13_x86_64.whl", hash = "sha256:2ffef621c14ebb0188a8633348504a35c13680d6da93ab5cb86f4e54b7e922b5"}, + {file = "numpy-2.1.2-cp313-cp313t-macosx_11_0_arm64.whl", hash = "sha256:ad369ed238b1959dfbade9018a740fb9392c5ac4f9b5173f420bd4f37ba1f7a0"}, + {file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_arm64.whl", hash = "sha256:d82075752f40c0ddf57e6e02673a17f6cb0f8eb3f587f63ca1eaab5594da5b17"}, + {file = "numpy-2.1.2-cp313-cp313t-macosx_14_0_x86_64.whl", hash = "sha256:1600068c262af1ca9580a527d43dc9d959b0b1d8e56f8a05d830eea39b7c8af6"}, + {file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a26ae94658d3ba3781d5e103ac07a876b3e9b29db53f68ed7df432fd033358a8"}, + {file = "numpy-2.1.2-cp313-cp313t-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:13311c2db4c5f7609b462bc0f43d3c465424d25c626d95040f073e30f7570e35"}, + {file = "numpy-2.1.2-cp313-cp313t-musllinux_1_1_x86_64.whl", hash = "sha256:2abbf905a0b568706391ec6fa15161fad0fb5d8b68d73c461b3c1bab6064dd62"}, + {file = "numpy-2.1.2-cp313-cp313t-musllinux_1_2_aarch64.whl", hash = "sha256:ef444c57d664d35cac4e18c298c47d7b504c66b17c2ea91312e979fcfbdfb08a"}, + {file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_10_15_x86_64.whl", hash = "sha256:bdd407c40483463898b84490770199d5714dcc9dd9b792f6c6caccc523c00952"}, + {file = "numpy-2.1.2-pp310-pypy310_pp73-macosx_14_0_x86_64.whl", hash = "sha256:da65fb46d4cbb75cb417cddf6ba5e7582eb7bb0b47db4b99c9fe5787ce5d91f5"}, + {file = "numpy-2.1.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1c193d0b0238638e6fc5f10f1b074a6993cb13b0b431f64079a509d63d3aa8b7"}, + {file = "numpy-2.1.2-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:a7d80b2e904faa63068ead63107189164ca443b42dd1930299e0d1cb041cec2e"}, + {file = "numpy-2.1.2.tar.gz", hash = "sha256:13532a088217fa624c99b843eeb54640de23b3414b14aa66d023805eb731066c"}, ] [[package]] @@ -2673,12 +2856,12 @@ files = [ [package.dependencies] numpy = [ + {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, {version = ">=1.21.0", markers = "python_version <= \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\" and python_version >= \"3.8\""}, {version = ">=1.19.3", markers = "platform_system == \"Linux\" and platform_machine == \"aarch64\" and python_version >= \"3.8\" and python_version < \"3.10\" or python_version > \"3.9\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_system != \"Darwin\" and python_version < \"3.10\" or python_version >= \"3.9\" and platform_machine != \"arm64\" and python_version < \"3.10\""}, {version = ">=1.17.3", markers = "(platform_system != \"Darwin\" and platform_system != \"Linux\") and python_version >= \"3.8\" and python_version < \"3.9\" or platform_system != \"Darwin\" and python_version >= \"3.8\" and python_version < \"3.9\" and platform_machine != \"aarch64\" or platform_machine != \"arm64\" and python_version >= \"3.8\" and python_version < \"3.9\" and platform_system != \"Linux\" or (platform_machine != \"arm64\" and platform_machine != \"aarch64\") and python_version >= \"3.8\" and python_version < \"3.9\""}, {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\" and python_version < \"3.11\""}, {version = ">=1.21.2", markers = "platform_system != \"Darwin\" and python_version >= \"3.10\" and python_version < \"3.11\""}, - {version = ">=1.26.0", markers = "python_version >= \"3.12\""}, {version = ">=1.23.5", markers = "python_version >= \"3.11\" and python_version < \"3.12\""}, ] @@ -2755,9 +2938,9 @@ files = [ [package.dependencies] numpy = [ + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, {version = ">=1.20.3", markers = "python_version < \"3.10\""}, {version = ">=1.21.0", markers = "python_version >= \"3.10\" and python_version < \"3.11\""}, - {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, ] python-dateutil = ">=2.8.2" pytz = ">=2020.1" @@ -3142,13 +3325,13 @@ windows-terminal = ["colorama (>=0.4.6)"] [[package]] name = "pymdown-extensions" -version = "10.10.2" +version = "10.11.2" description = "Extension pack for Python Markdown." optional = false python-versions = ">=3.8" files = [ - {file = "pymdown_extensions-10.10.2-py3-none-any.whl", hash = "sha256:513a9e9432b197cf0539356c8f1fc376e0d10b70ad150cadeb649a5628aacd45"}, - {file = "pymdown_extensions-10.10.2.tar.gz", hash = "sha256:65d82324ef2497931bc858c8320540c6264ab0d9a292707edb61f4fe0cd56633"}, + {file = "pymdown_extensions-10.11.2-py3-none-any.whl", hash = "sha256:41cdde0a77290e480cf53892f5c5e50921a7ee3e5cd60ba91bf19837b33badcf"}, + {file = "pymdown_extensions-10.11.2.tar.gz", hash = "sha256:bc8847ecc9e784a098efd35e20cba772bc5a1b529dfcef9dc1972db9021a1049"}, ] [package.dependencies] @@ -3160,13 +3343,13 @@ extra = ["pygments (>=2.12)"] [[package]] name = "pyparsing" -version = "3.1.4" +version = "3.0.9" description = "pyparsing module - Classes and methods to define and execute parsing grammars" optional = false python-versions = ">=3.6.8" files = [ - {file = "pyparsing-3.1.4-py3-none-any.whl", hash = "sha256:a6a7ee4235a3f944aa1fa2249307708f893fe5717dc603503c6c7969c070fb7c"}, - {file = "pyparsing-3.1.4.tar.gz", hash = "sha256:f86ec8d1a83f11977c9a6ea7598e8c27fc5cddfa5b07ea2241edbbde1d7bc032"}, + {file = "pyparsing-3.0.9-py3-none-any.whl", hash = "sha256:5026bae9a10eeaefb61dab2f09052b9f4307d44aee4eda64b309723d8d206bbc"}, + {file = "pyparsing-3.0.9.tar.gz", hash = "sha256:2b020ecf7d21b687f219b71ecad3631f644a47f01403fa1d1036b0c6416d70fb"}, ] [package.extras] @@ -3193,13 +3376,13 @@ testing = ["covdefaults (>=2.3)", "pytest (>=8.3.3)", "pytest-cov (>=5)", "pytes [[package]] name = "pyproject-hooks" -version = "1.1.0" +version = "1.2.0" description = "Wrappers to call pyproject.toml-based build backend hooks." optional = false python-versions = ">=3.7" files = [ - {file = "pyproject_hooks-1.1.0-py3-none-any.whl", hash = "sha256:7ceeefe9aec63a1064c18d939bdc3adf2d8aa1988a510afec15151578b232aa2"}, - {file = "pyproject_hooks-1.1.0.tar.gz", hash = "sha256:4b37730834edbd6bd37f26ece6b44802fb1c1ee2ece0e54ddff8bfc06db86965"}, + {file = "pyproject_hooks-1.2.0-py3-none-any.whl", hash = "sha256:9e5c6bfa8dcc30091c74b0cf803c81fdd29d94f01992a7707bc97babb1141913"}, + {file = "pyproject_hooks-1.2.0.tar.gz", hash = "sha256:1e859bd5c40fae9448642dd871adf459e5e2084186e8d2c2a79a824c970da1f8"}, ] [[package]] @@ -3262,25 +3445,29 @@ files = [ [[package]] name = "pywin32" -version = "306" +version = "308" description = "Python for Window Extensions" optional = false python-versions = "*" files = [ - {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, - {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, - {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, - {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, - {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, - {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, - {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, - {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, - {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, - {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, - {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, - {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, - {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, - {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, + {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"}, + {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"}, + {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"}, + {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"}, + {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"}, + {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"}, + {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"}, + {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"}, + {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"}, + {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"}, + {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"}, + {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"}, + {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"}, + {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"}, + {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"}, + {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"}, + {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"}, + {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"}, ] [[package]] @@ -3719,19 +3906,19 @@ files = [ [[package]] name = "rich" -version = "13.8.1" +version = "13.9.2" description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal" optional = false -python-versions = ">=3.7.0" +python-versions = ">=3.8.0" files = [ - {file = "rich-13.8.1-py3-none-any.whl", hash = "sha256:1760a3c0848469b97b558fc61c85233e3dafb69c7a071b4d60c38099d3cd4c06"}, - {file = "rich-13.8.1.tar.gz", hash = "sha256:8260cda28e3db6bf04d2d1ef4dbc03ba80a824c88b0e7668a0f23126a424844a"}, + {file = "rich-13.9.2-py3-none-any.whl", hash = "sha256:8c82a3d3f8dcfe9e734771313e606b39d8247bb6b826e196f4914b333b743cf1"}, + {file = "rich-13.9.2.tar.gz", hash = "sha256:51a2c62057461aaf7152b4d611168f93a9fc73068f8ded2790f29fe2b5366d0c"}, ] [package.dependencies] markdown-it-py = ">=2.2.0" pygments = ">=2.13.0,<3.0.0" -typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.9\""} +typing-extensions = {version = ">=4.0.0,<5.0", markers = "python_version < \"3.11\""} [package.extras] jupyter = ["ipywidgets (>=7.5.1,<9)"] @@ -3850,29 +4037,29 @@ files = [ [[package]] name = "ruff" -version = "0.6.8" +version = "0.7.3" description = "An extremely fast Python linter and code formatter, written in Rust." optional = false python-versions = ">=3.7" files = [ - {file = "ruff-0.6.8-py3-none-linux_armv6l.whl", hash = "sha256:77944bca110ff0a43b768f05a529fecd0706aac7bcce36d7f1eeb4cbfca5f0f2"}, - {file = "ruff-0.6.8-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:27b87e1801e786cd6ede4ada3faa5e254ce774de835e6723fd94551464c56b8c"}, - {file = "ruff-0.6.8-py3-none-macosx_11_0_arm64.whl", hash = "sha256:cd48f945da2a6334f1793d7f701725a76ba93bf3d73c36f6b21fb04d5338dcf5"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:677e03c00f37c66cea033274295a983c7c546edea5043d0c798833adf4cf4c6f"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:9f1476236b3eacfacfc0f66aa9e6cd39f2a624cb73ea99189556015f27c0bdeb"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f5a2f17c7d32991169195d52a04c95b256378bbf0de8cb98478351eb70d526f"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:5fd0d4b7b1457c49e435ee1e437900ced9b35cb8dc5178921dfb7d98d65a08d0"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f8034b19b993e9601f2ddf2c517451e17a6ab5cdb1c13fdff50c1442a7171d87"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:6cfb227b932ba8ef6e56c9f875d987973cd5e35bc5d05f5abf045af78ad8e098"}, - {file = "ruff-0.6.8-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6ef0411eccfc3909269fed47c61ffebdcb84a04504bafa6b6df9b85c27e813b0"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:007dee844738c3d2e6c24ab5bc7d43c99ba3e1943bd2d95d598582e9c1b27750"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:ce60058d3cdd8490e5e5471ef086b3f1e90ab872b548814e35930e21d848c9ce"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_i686.whl", hash = "sha256:1085c455d1b3fdb8021ad534379c60353b81ba079712bce7a900e834859182fa"}, - {file = "ruff-0.6.8-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:70edf6a93b19481affd287d696d9e311388d808671bc209fb8907b46a8c3af44"}, - {file = "ruff-0.6.8-py3-none-win32.whl", hash = "sha256:792213f7be25316f9b46b854df80a77e0da87ec66691e8f012f887b4a671ab5a"}, - {file = "ruff-0.6.8-py3-none-win_amd64.whl", hash = "sha256:ec0517dc0f37cad14a5319ba7bba6e7e339d03fbf967a6d69b0907d61be7a263"}, - {file = "ruff-0.6.8-py3-none-win_arm64.whl", hash = "sha256:8d3bb2e3fbb9875172119021a13eed38849e762499e3cfde9588e4b4d70968dc"}, - {file = "ruff-0.6.8.tar.gz", hash = "sha256:a5bf44b1aa0adaf6d9d20f86162b34f7c593bfedabc51239953e446aefc8ce18"}, + {file = "ruff-0.7.3-py3-none-linux_armv6l.whl", hash = "sha256:34f2339dc22687ec7e7002792d1f50712bf84a13d5152e75712ac08be565d344"}, + {file = "ruff-0.7.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:fb397332a1879b9764a3455a0bb1087bda876c2db8aca3a3cbb67b3dbce8cda0"}, + {file = "ruff-0.7.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:37d0b619546103274e7f62643d14e1adcbccb242efda4e4bdb9544d7764782e9"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5d59f0c3ee4d1a6787614e7135b72e21024875266101142a09a61439cb6e38a5"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:44eb93c2499a169d49fafd07bc62ac89b1bc800b197e50ff4633aed212569299"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6d0242ce53f3a576c35ee32d907475a8d569944c0407f91d207c8af5be5dae4e"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:6b6224af8b5e09772c2ecb8dc9f3f344c1aa48201c7f07e7315367f6dd90ac29"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c50f95a82b94421c964fae4c27c0242890a20fe67d203d127e84fbb8013855f5"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7f3eff9961b5d2644bcf1616c606e93baa2d6b349e8aa8b035f654df252c8c67"}, + {file = "ruff-0.7.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8963cab06d130c4df2fd52c84e9f10d297826d2e8169ae0c798b6221be1d1d2"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:61b46049d6edc0e4317fb14b33bd693245281a3007288b68a3f5b74a22a0746d"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:10ebce7696afe4644e8c1a23b3cf8c0f2193a310c18387c06e583ae9ef284de2"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:3f36d56326b3aef8eeee150b700e519880d1aab92f471eefdef656fd57492aa2"}, + {file = "ruff-0.7.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:5d024301109a0007b78d57ab0ba190087b43dce852e552734ebf0b0b85e4fb16"}, + {file = "ruff-0.7.3-py3-none-win32.whl", hash = "sha256:4ba81a5f0c5478aa61674c5a2194de8b02652f17addf8dfc40c8937e6e7d79fc"}, + {file = "ruff-0.7.3-py3-none-win_amd64.whl", hash = "sha256:588a9ff2fecf01025ed065fe28809cd5a53b43505f48b69a1ac7707b1b7e4088"}, + {file = "ruff-0.7.3-py3-none-win_arm64.whl", hash = "sha256:1713e2c5545863cdbfe2cbce21f69ffaf37b813bfd1fb3b90dc9a6f1963f5a8c"}, + {file = "ruff-0.7.3.tar.gz", hash = "sha256:e1d1ba2e40b6e71a61b063354d04be669ab0d39c352461f3d789cac68b54a313"}, ] [[package]] @@ -3955,6 +4142,56 @@ dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy", "pycodestyle", "pyde doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.12.0)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0)", "sphinx-design (>=0.4.0)"] test = ["array-api-strict", "asv", "gmpy2", "hypothesis (>=6.30)", "mpmath", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] +[[package]] +name = "scipy" +version = "1.14.1" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.10" +files = [ + {file = "scipy-1.14.1-cp310-cp310-macosx_10_13_x86_64.whl", hash = "sha256:b28d2ca4add7ac16ae8bb6632a3c86e4b9e4d52d3e34267f6e1b0c1f8d87e389"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:d0d2821003174de06b69e58cef2316a6622b60ee613121199cb2852a873f8cf3"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_arm64.whl", hash = "sha256:8bddf15838ba768bb5f5083c1ea012d64c9a444e16192762bd858f1e126196d0"}, + {file = "scipy-1.14.1-cp310-cp310-macosx_14_0_x86_64.whl", hash = "sha256:97c5dddd5932bd2a1a31c927ba5e1463a53b87ca96b5c9bdf5dfd6096e27efc3"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2ff0a7e01e422c15739ecd64432743cf7aae2b03f3084288f399affcefe5222d"}, + {file = "scipy-1.14.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8e32dced201274bf96899e6491d9ba3e9a5f6b336708656466ad0522d8528f69"}, + {file = "scipy-1.14.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:8426251ad1e4ad903a4514712d2fa8fdd5382c978010d1c6f5f37ef286a713ad"}, + {file = "scipy-1.14.1-cp310-cp310-win_amd64.whl", hash = "sha256:a49f6ed96f83966f576b33a44257d869756df6cf1ef4934f59dd58b25e0327e5"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_10_13_x86_64.whl", hash = "sha256:2da0469a4ef0ecd3693761acbdc20f2fdeafb69e6819cc081308cc978153c675"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:c0ee987efa6737242745f347835da2cc5bb9f1b42996a4d97d5c7ff7928cb6f2"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_arm64.whl", hash = "sha256:3a1b111fac6baec1c1d92f27e76511c9e7218f1695d61b59e05e0fe04dc59617"}, + {file = "scipy-1.14.1-cp311-cp311-macosx_14_0_x86_64.whl", hash = "sha256:8475230e55549ab3f207bff11ebfc91c805dc3463ef62eda3ccf593254524ce8"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:278266012eb69f4a720827bdd2dc54b2271c97d84255b2faaa8f161a158c3b37"}, + {file = "scipy-1.14.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fef8c87f8abfb884dac04e97824b61299880c43f4ce675dd2cbeadd3c9b466d2"}, + {file = "scipy-1.14.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:b05d43735bb2f07d689f56f7b474788a13ed8adc484a85aa65c0fd931cf9ccd2"}, + {file = "scipy-1.14.1-cp311-cp311-win_amd64.whl", hash = "sha256:716e389b694c4bb564b4fc0c51bc84d381735e0d39d3f26ec1af2556ec6aad94"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:631f07b3734d34aced009aaf6fedfd0eb3498a97e581c3b1e5f14a04164a456d"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_12_0_arm64.whl", hash = "sha256:af29a935803cc707ab2ed7791c44288a682f9c8107bc00f0eccc4f92c08d6e07"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_arm64.whl", hash = "sha256:2843f2d527d9eebec9a43e6b406fb7266f3af25a751aa91d62ff416f54170bc5"}, + {file = "scipy-1.14.1-cp312-cp312-macosx_14_0_x86_64.whl", hash = "sha256:eb58ca0abd96911932f688528977858681a59d61a7ce908ffd355957f7025cfc"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:30ac8812c1d2aab7131a79ba62933a2a76f582d5dbbc695192453dae67ad6310"}, + {file = "scipy-1.14.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8f9ea80f2e65bdaa0b7627fb00cbeb2daf163caa015e59b7516395fe3bd1e066"}, + {file = "scipy-1.14.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:edaf02b82cd7639db00dbff629995ef185c8df4c3ffa71a5562a595765a06ce1"}, + {file = "scipy-1.14.1-cp312-cp312-win_amd64.whl", hash = "sha256:2ff38e22128e6c03ff73b6bb0f85f897d2362f8c052e3b8ad00532198fbdae3f"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_10_13_x86_64.whl", hash = "sha256:1729560c906963fc8389f6aac023739ff3983e727b1a4d87696b7bf108316a79"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_12_0_arm64.whl", hash = "sha256:4079b90df244709e675cdc8b93bfd8a395d59af40b72e339c2287c91860deb8e"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_arm64.whl", hash = "sha256:e0cf28db0f24a38b2a0ca33a85a54852586e43cf6fd876365c86e0657cfe7d73"}, + {file = "scipy-1.14.1-cp313-cp313-macosx_14_0_x86_64.whl", hash = "sha256:0c2f95de3b04e26f5f3ad5bb05e74ba7f68b837133a4492414b3afd79dfe540e"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b99722ea48b7ea25e8e015e8341ae74624f72e5f21fc2abd45f3a93266de4c5d"}, + {file = "scipy-1.14.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5149e3fd2d686e42144a093b206aef01932a0059c2a33ddfa67f5f035bdfe13e"}, + {file = "scipy-1.14.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:e4f5a7c49323533f9103d4dacf4e4f07078f360743dec7f7596949149efeec06"}, + {file = "scipy-1.14.1-cp313-cp313-win_amd64.whl", hash = "sha256:baff393942b550823bfce952bb62270ee17504d02a1801d7fd0719534dfb9c84"}, + {file = "scipy-1.14.1.tar.gz", hash = "sha256:5a275584e726026a5699459aa72f828a610821006228e841b94275c4a7c08417"}, +] + +[package.dependencies] +numpy = ">=1.23.5,<2.3" + +[package.extras] +dev = ["cython-lint (>=0.12.2)", "doit (>=0.36.0)", "mypy (==1.10.0)", "pycodestyle", "pydevtool", "rich-click", "ruff (>=0.0.292)", "types-psutil", "typing_extensions"] +doc = ["jupyterlite-pyodide-kernel", "jupyterlite-sphinx (>=0.13.1)", "jupytext", "matplotlib (>=3.5)", "myst-nb", "numpydoc", "pooch", "pydata-sphinx-theme (>=0.15.2)", "sphinx (>=5.0.0,<=7.3.7)", "sphinx-design (>=0.4.0)"] +test = ["Cython", "array-api-strict (>=2.0)", "asv", "gmpy2", "hypothesis (>=6.30)", "meson", "mpmath", "ninja", "pooch", "pytest", "pytest-cov", "pytest-timeout", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + [[package]] name = "secretstorage" version = "3.3.3" @@ -4110,13 +4347,13 @@ test = ["pytest", "ruff"] [[package]] name = "tomli" -version = "2.0.1" +version = "2.0.2" description = "A lil' TOML parser" optional = false -python-versions = ">=3.7" +python-versions = ">=3.8" files = [ - {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, - {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, + {file = "tomli-2.0.2-py3-none-any.whl", hash = "sha256:2ebe24485c53d303f690b0ec092806a085f07af5a5aa1464f3931eec36caaa38"}, + {file = "tomli-2.0.2.tar.gz", hash = "sha256:d46d457a85337051c36524bc5349dd91b1877838e2979ac5ced3e710ed8a60ed"}, ] [[package]] @@ -4141,13 +4378,13 @@ files = [ [[package]] name = "tox" -version = "4.21.2" +version = "4.23.2" description = "tox is a generic virtualenv management and test command line tool" optional = false python-versions = ">=3.8" files = [ - {file = "tox-4.21.2-py3-none-any.whl", hash = "sha256:13d996adcd792e7c82994b0e116d85efd84f0c6d185254d83d156f73f86b2038"}, - {file = "tox-4.21.2.tar.gz", hash = "sha256:49381ff102296753e378fa5ff30e42a35e695f149b4dbf8a2c49d15fdb5797b2"}, + {file = "tox-4.23.2-py3-none-any.whl", hash = "sha256:452bc32bb031f2282881a2118923176445bac783ab97c874b8770ab4c3b76c38"}, + {file = "tox-4.23.2.tar.gz", hash = "sha256:86075e00e555df6e82e74cfc333917f91ecb47ffbc868dcafbd2672e332f4a2c"}, ] [package.dependencies] @@ -4164,18 +4401,17 @@ typing-extensions = {version = ">=4.12.2", markers = "python_version < \"3.11\"" virtualenv = ">=20.26.6" [package.extras] -docs = ["furo (>=2024.8.6)", "sphinx (>=8.0.2)", "sphinx-argparse-cli (>=1.18.2)", "sphinx-autodoc-typehints (>=2.4.4)", "sphinx-copybutton (>=0.5.2)", "sphinx-inline-tabs (>=2023.4.21)", "sphinxcontrib-towncrier (>=0.2.1a0)", "towncrier (>=24.8)"] -testing = ["build[virtualenv] (>=1.2.2)", "covdefaults (>=2.3)", "detect-test-pollution (>=1.2)", "devpi-process (>=1.0.2)", "diff-cover (>=9.2)", "distlib (>=0.3.8)", "flaky (>=3.8.1)", "hatch-vcs (>=0.4)", "hatchling (>=1.25)", "psutil (>=6)", "pytest (>=8.3.3)", "pytest-cov (>=5)", "pytest-mock (>=3.14)", "pytest-xdist (>=3.6.1)", "re-assert (>=1.1)", "setuptools (>=75.1)", "time-machine (>=2.15)", "wheel (>=0.44)"] +test = ["devpi-process (>=1.0.2)", "pytest (>=8.3.3)", "pytest-mock (>=3.14)"] [[package]] name = "tqdm" -version = "4.66.5" +version = "4.67.0" description = "Fast, Extensible Progress Meter" optional = true python-versions = ">=3.7" files = [ - {file = "tqdm-4.66.5-py3-none-any.whl", hash = "sha256:90279a3770753eafc9194a0364852159802111925aa30eb3f9d85b0e805ac7cd"}, - {file = "tqdm-4.66.5.tar.gz", hash = "sha256:e1020aef2e5096702d8a025ac7d16b1577279c9d63f8375b63083e9a5f0fcbad"}, + {file = "tqdm-4.67.0-py3-none-any.whl", hash = "sha256:0cd8af9d56911acab92182e88d763100d4788bdf421d251616040cc4d44863be"}, + {file = "tqdm-4.67.0.tar.gz", hash = "sha256:fe5a6f95e6fe0b9755e9469b77b9c3cf850048224ecaa8293d7d2d31f97d869a"}, ] [package.dependencies] @@ -4183,6 +4419,7 @@ colorama = {version = "*", markers = "platform_system == \"Windows\""} [package.extras] dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"] +discord = ["requests"] notebook = ["ipywidgets (>=6)"] slack = ["slack-sdk"] telegram = ["requests"] @@ -4240,24 +4477,24 @@ types-setuptools = "*" [[package]] name = "types-python-dateutil" -version = "2.9.0.20240906" +version = "2.9.0.20241003" description = "Typing stubs for python-dateutil" optional = false python-versions = ">=3.8" files = [ - {file = "types-python-dateutil-2.9.0.20240906.tar.gz", hash = "sha256:9706c3b68284c25adffc47319ecc7947e5bb86b3773f843c73906fd598bc176e"}, - {file = "types_python_dateutil-2.9.0.20240906-py3-none-any.whl", hash = "sha256:27c8cc2d058ccb14946eebcaaa503088f4f6dbc4fb6093d3d456a49aef2753f6"}, + {file = "types-python-dateutil-2.9.0.20241003.tar.gz", hash = "sha256:58cb85449b2a56d6684e41aeefb4c4280631246a0da1a719bdbe6f3fb0317446"}, + {file = "types_python_dateutil-2.9.0.20241003-py3-none-any.whl", hash = "sha256:250e1d8e80e7bbc3a6c99b907762711d1a1cdd00e978ad39cb5940f6f0a87f3d"}, ] [[package]] name = "types-pytz" -version = "2024.2.0.20240913" +version = "2024.2.0.20241003" description = "Typing stubs for pytz" optional = true python-versions = ">=3.8" files = [ - {file = "types-pytz-2024.2.0.20240913.tar.gz", hash = "sha256:4433b5df4a6fc587bbed41716d86a5ba5d832b4378e506f40d34bc9c81df2c24"}, - {file = "types_pytz-2024.2.0.20240913-py3-none-any.whl", hash = "sha256:a1eebf57ebc6e127a99d2fa2ba0a88d2b173784ef9b3defcc2004ab6855a44df"}, + {file = "types-pytz-2024.2.0.20241003.tar.gz", hash = "sha256:575dc38f385a922a212bac00a7d6d2e16e141132a3c955078f4a4fd13ed6cb44"}, + {file = "types_pytz-2024.2.0.20241003-py3-none-any.whl", hash = "sha256:3e22df1336c0c6ad1d29163c8fda82736909eb977281cb823c57f8bae07118b7"}, ] [[package]] @@ -4273,13 +4510,13 @@ files = [ [[package]] name = "types-requests" -version = "2.32.0.20240914" +version = "2.32.0.20241016" description = "Typing stubs for requests" optional = false python-versions = ">=3.8" files = [ - {file = "types-requests-2.32.0.20240914.tar.gz", hash = "sha256:2850e178db3919d9bf809e434eef65ba49d0e7e33ac92d588f4a5e295fffd405"}, - {file = "types_requests-2.32.0.20240914-py3-none-any.whl", hash = "sha256:59c2f673eb55f32a99b2894faf6020e1a9f4a402ad0f192bfee0b64469054310"}, + {file = "types-requests-2.32.0.20241016.tar.gz", hash = "sha256:0d9cad2f27515d0e3e3da7134a1b6f28fb97129d86b867f24d9c726452634d95"}, + {file = "types_requests-2.32.0.20241016-py3-none-any.whl", hash = "sha256:4195d62d6d3e043a4eaaf08ff8a62184584d2e8684e9d2aa178c7915a7da3747"}, ] [package.dependencies] @@ -4287,13 +4524,13 @@ urllib3 = ">=2" [[package]] name = "types-setuptools" -version = "75.1.0.20240917" +version = "75.1.0.20241014" description = "Typing stubs for setuptools" optional = false python-versions = ">=3.8" files = [ - {file = "types-setuptools-75.1.0.20240917.tar.gz", hash = "sha256:12f12a165e7ed383f31def705e5c0fa1c26215dd466b0af34bd042f7d5331f55"}, - {file = "types_setuptools-75.1.0.20240917-py3-none-any.whl", hash = "sha256:06f78307e68d1bbde6938072c57b81cf8a99bc84bd6dc7e4c5014730b097dc0c"}, + {file = "types-setuptools-75.1.0.20241014.tar.gz", hash = "sha256:29b0560a8d4b4a91174be085847002c69abfcb048e20b33fc663005aedf56804"}, + {file = "types_setuptools-75.1.0.20241014-py3-none-any.whl", hash = "sha256:caab58366741fb99673d0138b6e2d760717f154cfb981b74fea5e8de40f0b703"}, ] [[package]] @@ -4496,13 +4733,13 @@ test = ["websockets"] [[package]] name = "wheel" -version = "0.44.0" +version = "0.45.0" description = "A built-package format for Python" optional = false python-versions = ">=3.8" files = [ - {file = "wheel-0.44.0-py3-none-any.whl", hash = "sha256:2376a90c98cc337d18623527a97c31797bd02bad0033d41547043a1cbfbe448f"}, - {file = "wheel-0.44.0.tar.gz", hash = "sha256:a29c3f2817e95ab89aa4660681ad547c0e9547f20e75b0562fe7723c9a2a9d49"}, + {file = "wheel-0.45.0-py3-none-any.whl", hash = "sha256:52f0baa5e6522155090a09c6bd95718cc46956d1b51d537ea5454249edb671c7"}, + {file = "wheel-0.45.0.tar.gz", hash = "sha256:a57353941a3183b3d5365346b567a260a0602a0f8a635926a7dede41b94c674a"}, ] [package.extras] @@ -4545,4 +4782,4 @@ metrics = ["pandas", "pandas-stubs"] [metadata] lock-version = "2.0" python-versions = "^3.8" -content-hash = "ab2e2c455fa1a7d74271da71f8c1b6f096bbebd92a79b4ec646523ef7d8530b0" +content-hash = "85f56a451ee3e0f2c00c8a39b1433d4fb54a239f14f4878e5ded30bc63729734" diff --git a/pyproject.toml b/pyproject.toml index c9aa384e7..d3084d52b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,6 @@ [tool.poetry] name = "supervision" -version = "0.24.0" +version = "0.25.0" description = "A set of easy-to-use utils that will come in handy in any Computer Vision project" authors = ["Piotr Skalski "] maintainers = [ @@ -9,7 +9,7 @@ maintainers = [ ] readme = "README.md" license = "MIT" -packages = [{ include = "supervision" }] +packages = [{ include = "supervision" }, { include = "supervision/py.typed" }] homepage = "https://github.com/roboflow/supervision" repository = "https://github.com/roboflow/supervision" documentation = "https://supervision.roboflow.com/latest/" @@ -46,17 +46,32 @@ python = "^3.8" numpy = [ { version = ">=1.21.2,<1.23.3", python = "<=3.10" }, { version = ">=1.23.3", python = ">3.10" }, + { version = ">=2.1.0", python = ">=3.13" }, ] scipy = [ { version = "1.10.0", python = "<3.9" }, { version = "^1.10.0", python = ">=3.9" }, + { version = ">=1.14.1", python = ">=3.13" }, + +] + +# Matplotlib sub-dependency +# The 'contourpy' package is required by Matplotlib for contour plotting. +# We need to ensure compatibility with both Python 3.8 and Python 3.13. +# +# For Python 3.8 and above, we use version 1.0.7 or higher, as it is the lowest major version that supports Python 3.8. +# For Python 3.13 and above, we use version 1.3.0 or higher, as it is the first version that explicitly supports Python 3.13. +contourpy = [ + { version = ">=1.0.7", python = ">=3.8" }, + { version = ">=1.3.0", python = ">=3.13" }, ] + matplotlib = ">=3.6.0" pyyaml = ">=5.3" defusedxml = "^0.7.1" pillow = ">=9.4" requests = { version = ">=2.26.0,<=2.32.3", optional = true } -tqdm = { version = ">=4.62.3,<=4.66.5", optional = true } +tqdm = { version = ">=4.62.3,<=4.67.0", optional = true } # pandas: picked lowest major version that supports Python 3.8 pandas = { version = ">=2.0.0", optional = true } pandas-stubs = { version = ">=2.0.0.230412", optional = true } @@ -69,7 +84,7 @@ metrics = ["pandas", "pandas-stubs"] [tool.poetry.group.dev.dependencies] twine = "^5.1.1" pytest = ">=7.2.2,<9.0.0" -wheel = ">=0.40,<0.45" +wheel = ">=0.40,<0.46" build = ">=0.10,<1.3" ruff = ">=0.1.0" mypy = "^1.4.1" diff --git a/supervision/annotators/core.py b/supervision/annotators/core.py index 1910ac9f4..02ab47d6f 100644 --- a/supervision/annotators/core.py +++ b/supervision/annotators/core.py @@ -16,7 +16,7 @@ ) from supervision.config import CLASS_NAME_DATA_FIELD, ORIENTED_BOX_COORDINATES from supervision.detection.core import Detections -from supervision.detection.utils import clip_boxes, mask_to_polygons +from supervision.detection.utils import clip_boxes, mask_to_polygons, spread_out_boxes from supervision.draw.color import Color, ColorPalette from supervision.draw.utils import draw_polygon from supervision.geometry.core import Position @@ -32,6 +32,8 @@ ) from supervision.utils.internal import deprecated +CV2_FONT = cv2.FONT_HERSHEY_SIMPLEX + class BoxAnnotator(BaseAnnotator): """ @@ -1054,6 +1056,7 @@ def __init__( text_position: Position = Position.TOP_LEFT, color_lookup: ColorLookup = ColorLookup.CLASS, border_radius: int = 0, + smart_position: bool = False, ): """ Args: @@ -1070,6 +1073,7 @@ def __init__( Options are `INDEX`, `CLASS`, `TRACK`. border_radius (int): The radius to apply round edges. If the selected value is higher than the lower dimension, width or height, is clipped. + smart_position (bool): Spread out the labels to avoid overlapping. """ self.border_radius: int = border_radius self.color: Union[Color, ColorPalette] = color @@ -1079,6 +1083,7 @@ def __init__( self.text_padding: int = text_padding self.text_anchor: Position = text_position self.color_lookup: ColorLookup = color_lookup + self.smart_position = smart_position @ensure_cv2_image_for_annotation def annotate( @@ -1128,11 +1133,29 @@ def annotate( ![label-annotator-example](https://media.roboflow.com/ supervision-annotator-examples/label-annotator-example-purple.png) """ + assert isinstance(scene, np.ndarray) - font = cv2.FONT_HERSHEY_SIMPLEX - anchors_coordinates = detections.get_anchors_coordinates( - anchor=self.text_anchor - ).astype(int) + self._validate_labels(labels, detections) + + labels = self._get_labels_text(detections, labels) + label_properties = self._get_label_properties(detections, labels) + + if self.smart_position: + xyxy = label_properties[:, :4] + xyxy = spread_out_boxes(xyxy) + label_properties[:, :4] = xyxy + + self._draw_labels( + scene=scene, + labels=labels, + label_properties=label_properties, + detections=detections, + custom_color_lookup=custom_color_lookup, + ) + + return scene + + def _validate_labels(self, labels: Optional[List[str]], detections: Detections): if labels is not None and len(labels) != len(detections): raise ValueError( f"The number of labels ({len(labels)}) does not match the " @@ -1140,72 +1163,121 @@ def annotate( f"should have exactly 1 label." ) - for detection_idx, center_coordinates in enumerate(anchors_coordinates): - color = resolve_color( - color=self.color, - detections=detections, - detection_idx=detection_idx, - color_lookup=( - self.color_lookup - if custom_color_lookup is None - else custom_color_lookup - ), - ) - - text_color = resolve_color( - color=self.text_color, - detections=detections, - detection_idx=detection_idx, - color_lookup=( - self.color_lookup - if custom_color_lookup is None - else custom_color_lookup - ), - ) + def _get_label_properties( + self, + detections: Detections, + labels: List[str], + ) -> np.ndarray: + """ + Calculate the numerical properties required to draw the labels on the image. - if labels is not None: - text = labels[detection_idx] - elif CLASS_NAME_DATA_FIELD in detections.data: - text = detections.data[CLASS_NAME_DATA_FIELD][detection_idx] - elif detections.class_id is not None: - text = str(detections.class_id[detection_idx]) - else: - text = str(detection_idx) + Returns: + (np.ndarray): An array of label properties, containing columns: + `min_x`, `min_y`, `max_x`, `max_y`, `padded_text_height`. + """ + label_properties = [] + anchors_coordinates = detections.get_anchors_coordinates( + anchor=self.text_anchor + ).astype(int) - text_w, text_h = cv2.getTextSize( - text=text, - fontFace=font, + for label, center_coords in zip(labels, anchors_coordinates): + (text_w, text_h) = cv2.getTextSize( + text=label, + fontFace=CV2_FONT, fontScale=self.text_scale, thickness=self.text_thickness, )[0] - text_w_padded = text_w + 2 * self.text_padding - text_h_padded = text_h + 2 * self.text_padding + + width_padded = text_w + 2 * self.text_padding + height_padded = text_h + 2 * self.text_padding + text_background_xyxy = resolve_text_background_xyxy( - center_coordinates=tuple(center_coordinates), - text_wh=(text_w_padded, text_h_padded), + center_coordinates=tuple(center_coords), + text_wh=(width_padded, height_padded), position=self.text_anchor, ) - text_x = text_background_xyxy[0] + self.text_padding - text_y = text_background_xyxy[1] + self.text_padding + text_h + label_properties.append( + [ + *text_background_xyxy, + text_h, + ] + ) + + return np.array(label_properties).reshape(-1, 5) + + @staticmethod + def _get_labels_text( + detections: Detections, custom_labels: Optional[List[str]] + ) -> List[str]: + if custom_labels is not None: + return custom_labels + + labels = [] + for idx in range(len(detections)): + if CLASS_NAME_DATA_FIELD in detections.data: + labels.append(detections.data[CLASS_NAME_DATA_FIELD][idx]) + elif detections.class_id is not None: + labels.append(str(detections.class_id[idx])) + else: + labels.append(str(idx)) + return labels + + def _draw_labels( + self, + scene: np.ndarray, + labels: List[str], + label_properties: np.ndarray, + detections: Detections, + custom_color_lookup: Optional[np.ndarray], + ) -> None: + assert len(labels) == len(label_properties) == len(detections), ( + f"Number of label properties ({len(label_properties)}), " + f"labels ({len(labels)}) and detections ({len(detections)}) " + "do not match." + ) + + color_lookup = ( + custom_color_lookup + if custom_color_lookup is not None + else self.color_lookup + ) + + for idx, label_property in enumerate(label_properties): + background_color = resolve_color( + color=self.color, + detections=detections, + detection_idx=idx, + color_lookup=color_lookup, + ) + text_color = resolve_color( + color=self.text_color, + detections=detections, + detection_idx=idx, + color_lookup=color_lookup, + ) + box_xyxy = label_property[:4] + text_height_padded = label_property[4] self.draw_rounded_rectangle( scene=scene, - xyxy=text_background_xyxy, - color=color.as_bgr(), + xyxy=box_xyxy, + color=background_color.as_bgr(), border_radius=self.border_radius, ) + + text_x = box_xyxy[0] + self.text_padding + text_y = box_xyxy[1] + self.text_padding + text_height_padded cv2.putText( img=scene, - text=text, + text=labels[idx], org=(text_x, text_y), - fontFace=font, + fontFace=CV2_FONT, fontScale=self.text_scale, color=text_color.as_bgr(), thickness=self.text_thickness, lineType=cv2.LINE_AA, ) - return scene @staticmethod def draw_rounded_rectangle( @@ -1266,6 +1338,7 @@ def __init__( text_position: Position = Position.TOP_LEFT, color_lookup: ColorLookup = ColorLookup.CLASS, border_radius: int = 0, + smart_position: bool = False, ): """ Args: @@ -1282,6 +1355,7 @@ def __init__( Options are `INDEX`, `CLASS`, `TRACK`. border_radius (int): The radius to apply round edges. If the selected value is higher than the lower dimension, width or height, is clipped. + smart_position (bool): Spread out the labels to avoid overlapping. """ self.color = color self.text_color = text_color @@ -1289,14 +1363,8 @@ def __init__( self.text_anchor = text_position self.color_lookup = color_lookup self.border_radius = border_radius - if font_path is not None: - try: - self.font = ImageFont.truetype(font_path, font_size) - except OSError: - print(f"Font path '{font_path}' not found. Using PIL's default font.") - self.font = self._load_default_font(font_size) - else: - self.font = self._load_default_font(font_size) + self.smart_position = smart_position + self.font = self._load_font(font_size, font_path) @ensure_pil_image_for_annotation def annotate( @@ -1346,88 +1414,157 @@ def annotate( """ assert isinstance(scene, Image.Image) + self._validate_labels(labels, detections) + draw = ImageDraw.Draw(scene) - anchors_coordinates = detections.get_anchors_coordinates( - anchor=self.text_anchor - ).astype(int) + labels = self._get_labels_text(detections, labels) + label_properties = self._get_label_properties(draw, detections, labels) + + if self.smart_position: + xyxy = label_properties[:, :4] + xyxy = spread_out_boxes(xyxy) + label_properties[:, :4] = xyxy + + self._draw_labels( + draw=draw, + labels=labels, + label_properties=label_properties, + detections=detections, + custom_color_lookup=custom_color_lookup, + ) + + return scene + + def _validate_labels(self, labels: Optional[List[str]], detections: Detections): if labels is not None and len(labels) != len(detections): raise ValueError( - f"The number of labels provided ({len(labels)}) does not match the " - f"number of detections ({len(detections)}). Each detection should have " - f"a corresponding label." + f"The number of labels ({len(labels)}) does not match the " + f"number of detections ({len(detections)}). Each detection " + f"should have exactly 1 label." ) - for detection_idx, center_coordinates in enumerate(anchors_coordinates): - color = resolve_color( - color=self.color, - detections=detections, - detection_idx=detection_idx, - color_lookup=( - self.color_lookup - if custom_color_lookup is None - else custom_color_lookup - ), + + def _get_label_properties( + self, draw, detections: Detections, labels: List[str] + ) -> np.ndarray: + """ + Calculate the numerical properties required to draw the labels on the image. + + Returns: + (np.ndarray): An array of label properties, containing columns: + `min_x`, `min_y`, `max_x`, `max_y`, `text_left_coordinate`, + `text_top_coordinate`. The first 4 values are already padded + with `text_padding`. + """ + label_properties = [] + + anchor_coordinates = detections.get_anchors_coordinates( + anchor=self.text_anchor + ).astype(int) + + for label, center_coords in zip(labels, anchor_coordinates): + text_left, text_top, text_right, text_bottom = draw.textbbox( + (0, 0), label, font=self.font ) + text_width = text_right - text_left + text_height = text_bottom - text_top + width_padded = text_width + 2 * self.text_padding + height_padded = text_height + 2 * self.text_padding - text_color = resolve_color( - color=self.text_color, - detections=detections, - detection_idx=detection_idx, - color_lookup=( - self.color_lookup - if custom_color_lookup is None - else custom_color_lookup - ), + text_background_xyxy = resolve_text_background_xyxy( + center_coordinates=tuple(center_coords), + text_wh=(width_padded, height_padded), + position=self.text_anchor, ) - if labels is not None: - text = labels[detection_idx] - elif CLASS_NAME_DATA_FIELD in detections.data: - text = detections.data[CLASS_NAME_DATA_FIELD][detection_idx] + label_properties.append([*text_background_xyxy, text_left, text_top]) + + return np.array(label_properties).reshape(-1, 6) + + @staticmethod + def _get_labels_text( + detections: Detections, custom_labels: Optional[List[str]] + ) -> List[str]: + if custom_labels is not None: + return custom_labels + + labels = [] + for idx in range(len(detections)): + if CLASS_NAME_DATA_FIELD in detections.data: + labels.append(detections.data[CLASS_NAME_DATA_FIELD][idx]) elif detections.class_id is not None: - text = str(detections.class_id[detection_idx]) + labels.append(str(detections.class_id[idx])) else: - text = str(detection_idx) + labels.append(str(idx)) + return labels - left, top, right, bottom = draw.textbbox((0, 0), text, font=self.font) - text_width = right - left - text_height = bottom - top - text_w_padded = text_width + 2 * self.text_padding - text_h_padded = text_height + 2 * self.text_padding - text_background_xyxy = resolve_text_background_xyxy( - center_coordinates=tuple(center_coordinates), - text_wh=(text_w_padded, text_h_padded), - position=self.text_anchor, + def _draw_labels( + self, + draw, + labels: List[str], + label_properties: np.ndarray, + detections: Detections, + custom_color_lookup: Optional[np.ndarray], + ) -> None: + assert len(labels) == len(label_properties) == len(detections), ( + f"Number of label properties ({len(label_properties)}), " + f"labels ({len(labels)}) and detections ({len(detections)}) " + "do not match." + ) + color_lookup = ( + custom_color_lookup + if custom_color_lookup is not None + else self.color_lookup + ) + + for idx, label_property in enumerate(label_properties): + background_color = resolve_color( + color=self.color, + detections=detections, + detection_idx=idx, + color_lookup=color_lookup, + ) + text_color = resolve_color( + color=self.text_color, + detections=detections, + detection_idx=idx, + color_lookup=color_lookup, ) - text_x = text_background_xyxy[0] + self.text_padding - left - text_y = text_background_xyxy[1] + self.text_padding - top + box_xyxy = label_property[:4] + text_left = label_property[4] + text_top = label_property[5] + label_x_position = box_xyxy[0] + self.text_padding - text_left + label_y_position = box_xyxy[1] + self.text_padding - text_top draw.rounded_rectangle( - text_background_xyxy, + tuple(box_xyxy), radius=self.border_radius, - fill=color.as_rgb(), + fill=background_color.as_rgb(), outline=None, ) draw.text( - xy=(text_x, text_y), - text=text, + xy=(label_x_position, label_y_position), + text=labels[idx], font=self.font, fill=text_color.as_rgb(), ) - return scene @staticmethod - def _load_default_font(size): - """ - PIL either loads a font that accepts a size (e.g. on my machine) - or raises an error saying `load_default` does not accept arguments - (e.g. in Colab). - """ + def _load_font(font_size: int, font_path: Optional[str]): + def load_default_font(size): + try: + return ImageFont.load_default(size) + except TypeError: + return ImageFont.load_default() + + if font_path is None: + return load_default_font(font_size) + try: - font = ImageFont.load_default(size) - except TypeError: - font = ImageFont.load_default() - return font + return ImageFont.truetype(font_path, font_size) + except OSError: + print(f"Font path '{font_path}' not found. Using PIL's default font.") + return load_default_font(font_size) class IconAnnotator(BaseAnnotator): diff --git a/supervision/assets/list.py b/supervision/assets/list.py index 8a01b7585..926adeba8 100644 --- a/supervision/assets/list.py +++ b/supervision/assets/list.py @@ -20,6 +20,7 @@ class VideoAssets(Enum): | `PEOPLE_WALKING` | `people-walking.mp4` | [Link](https://media.roboflow.com/supervision/video-examples/people-walking.mp4) | | `BEACH` | `beach-1.mp4` | [Link](https://media.roboflow.com/supervision/video-examples/beach-1.mp4) | | `BASKETBALL` | `basketball-1.mp4` | [Link](https://media.roboflow.com/supervision/video-examples/basketball-1.mp4) | + | `SKIING` | `skiing.mp4` | [Link](https://media.roboflow.com/supervision/video-examples/skiing.mp4) | """ # noqa: E501 // docs VEHICLES = "vehicles.mp4" @@ -31,6 +32,7 @@ class VideoAssets(Enum): PEOPLE_WALKING = "people-walking.mp4" BEACH = "beach-1.mp4" BASKETBALL = "basketball-1.mp4" + SKIING = "skiing.mp4" @classmethod def list(cls): @@ -74,4 +76,8 @@ def list(cls): f"{BASE_VIDEO_URL}{VideoAssets.BASKETBALL.value}", "60d94a3c7c47d16f09d342b088012ecc", ), + VideoAssets.SKIING.value: ( + f"{BASE_VIDEO_URL}{VideoAssets.SKIING.value}", + "d30987cbab1bbc5934199cdd1b293119", + ), } diff --git a/supervision/dataset/utils.py b/supervision/dataset/utils.py index 20b80978f..6c30eeab0 100644 --- a/supervision/dataset/utils.py +++ b/supervision/dataset/utils.py @@ -55,7 +55,7 @@ def merge_class_lists(class_lists: List[List[str]]) -> List[str]: for class_list in class_lists: for class_name in class_list: - unique_classes.add(class_name.lower()) + unique_classes.add(class_name) return sorted(list(unique_classes)) diff --git a/supervision/detection/core.py b/supervision/detection/core.py index 113948fc9..32753a30a 100644 --- a/supervision/detection/core.py +++ b/supervision/detection/core.py @@ -32,8 +32,10 @@ extract_ultralytics_masks, get_data_item, is_data_equal, + is_metadata_equal, mask_to_xyxy, merge_data, + merge_metadata, process_roboflow_result, xywh_to_xyxy, ) @@ -125,6 +127,9 @@ class simplifies data manipulation and filtering, providing a uniform API for data (Dict[str, Union[np.ndarray, List]]): A dictionary containing additional data where each key is a string representing the data type, and the value is either a NumPy array or a list of corresponding data. + metadata (Dict[str, Any]): A dictionary containing collection-level metadata + that applies to the entire set of detections. This may include information such + as the video name, camera parameters, timestamp, or other global metadata. """ # noqa: E501 // docs xyxy: np.ndarray @@ -133,6 +138,7 @@ class simplifies data manipulation and filtering, providing a uniform API for class_id: Optional[np.ndarray] = None tracker_id: Optional[np.ndarray] = None data: Dict[str, Union[np.ndarray, List]] = field(default_factory=dict) + metadata: Dict[str, Any] = field(default_factory=dict) def __post_init__(self): validate_detections_fields( @@ -185,6 +191,7 @@ def __eq__(self, other: Detections): np.array_equal(self.confidence, other.confidence), np.array_equal(self.tracker_id, other.tracker_id), is_data_equal(self.data, other.data), + is_metadata_equal(self.metadata, other.metadata), ] ) @@ -985,6 +992,7 @@ def is_empty(self) -> bool: """ empty_detections = Detections.empty() empty_detections.data = self.data + empty_detections.metadata = self.metadata return self == empty_detections @classmethod @@ -1078,6 +1086,9 @@ def stack_or_none(name: str): data = merge_data([d.data for d in detections_list]) + metadata_list = [detections.metadata for detections in detections_list] + metadata = merge_metadata(metadata_list) + return cls( xyxy=xyxy, mask=mask, @@ -1085,6 +1096,7 @@ def stack_or_none(name: str): class_id=class_id, tracker_id=tracker_id, data=data, + metadata=metadata, ) def get_anchors_coordinates(self, anchor: Position) -> np.ndarray: @@ -1198,6 +1210,7 @@ def __getitem__( class_id=self.class_id[index] if self.class_id is not None else None, tracker_id=self.tracker_id[index] if self.tracker_id is not None else None, data=get_data_item(self.data, index), + metadata=self.metadata, ) def __setitem__(self, key: str, value: Union[np.ndarray, List]): @@ -1459,6 +1472,8 @@ def merge_inner_detection_object_pair( else: winning_detection = detections_2 + metadata = merge_metadata([detections_1.metadata, detections_2.metadata]) + return Detections( xyxy=merged_xyxy, mask=merged_mask, @@ -1466,6 +1481,7 @@ def merge_inner_detection_object_pair( class_id=winning_detection.class_id, tracker_id=winning_detection.tracker_id, data=winning_detection.data, + metadata=metadata, ) diff --git a/supervision/detection/line_zone.py b/supervision/detection/line_zone.py index 985589228..b63660bc4 100644 --- a/supervision/detection/line_zone.py +++ b/supervision/detection/line_zone.py @@ -1,11 +1,12 @@ import math import warnings -from collections import Counter +from collections import Counter, defaultdict, deque from functools import lru_cache -from typing import Any, Dict, Iterable, List, Literal, Optional, Tuple +from typing import Any, Deque, Dict, Iterable, List, Literal, Optional, Tuple import cv2 import numpy as np +import numpy.typing as npt from supervision.config import CLASS_NAME_DATA_FIELD from supervision.detection.core import Detections @@ -41,6 +42,10 @@ class LineZone: to inside. out_count (int): The number of objects that have crossed the line from inside to outside. + in_count_per_class (Dict[int, int]): Number of objects of each class that have + crossed the line from outside to inside. + out_count_per_class (Dict[int, int]): Number of objects of each class that have + crossed the line from inside to outside. Example: ```python @@ -74,6 +79,7 @@ def __init__( Position.BOTTOM_LEFT, Position.BOTTOM_RIGHT, ), + minimum_crossing_threshold: int = 1, ): """ Args: @@ -84,10 +90,18 @@ def __init__( to consider when deciding on whether the detection has passed the line counter or not. By default, this contains the four corners of the detection's bounding box + minimum_crossing_threshold (int): Detection needs to be seen + on the other side of the line for this many frames to be + considered as having crossed the line. This is useful when + dealing with unstable bounding boxes or when detections + may linger on the line. """ self.vector = Vector(start=start, end=end) - self.limits = self.calculate_region_of_interest_limits(vector=self.vector) - self.tracker_state: Dict[str, bool] = {} + self.limits = self._calculate_region_of_interest_limits(vector=self.vector) + self.crossing_history_length = max(2, minimum_crossing_threshold + 1) + self.crossing_state_history: Dict[int, Deque[bool]] = defaultdict( + lambda: deque(maxlen=self.crossing_history_length) + ) self._in_count_per_class: Counter = Counter() self._out_count_per_class: Counter = Counter() self.triggering_anchors = triggering_anchors @@ -97,38 +111,96 @@ def __init__( @property def in_count(self) -> int: - """ - Number of objects that have crossed the line from - outside to inside. - """ return sum(self._in_count_per_class.values()) @property def out_count(self) -> int: - """ - Number of objects that have crossed the line from - inside to outside. - """ return sum(self._out_count_per_class.values()) @property def in_count_per_class(self) -> Dict[int, int]: - """ - Number of objects of each class that have crossed - the line from outside to inside. - """ return dict(self._in_count_per_class) @property def out_count_per_class(self) -> Dict[int, int]: + return dict(self._out_count_per_class) + + def trigger(self, detections: Detections) -> Tuple[np.ndarray, np.ndarray]: """ - Number of objects of each class that have crossed the line - from inside to outside. + Update the `in_count` and `out_count` based on the objects that cross the line. + + Args: + detections (Detections): A list of detections for which to update the + counts. + + Returns: + A tuple of two boolean NumPy arrays. The first array indicates which + detections have crossed the line from outside to inside. The second + array indicates which detections have crossed the line from inside to + outside. """ - return dict(self._out_count_per_class) + crossed_in = np.full(len(detections), False) + crossed_out = np.full(len(detections), False) + + if len(detections) == 0: + return crossed_in, crossed_out + + if detections.tracker_id is None: + warnings.warn( + "Line zone counting skipped. LineZone requires tracker_id. Refer to " + "https://supervision.roboflow.com/latest/trackers for more " + "information.", + category=SupervisionWarnings, + ) + return crossed_in, crossed_out + + self._update_class_id_to_name(detections) + + in_limits, has_any_left_trigger, has_any_right_trigger = ( + self._compute_anchor_sides(detections) + ) + + class_ids: List[Optional[int]] = ( + list(detections.class_id) + if detections.class_id is not None + else [None] * len(detections) + ) + + for i, (class_id, tracker_id) in enumerate( + zip(class_ids, detections.tracker_id) + ): + if not in_limits[i]: + continue + + if has_any_left_trigger[i] and has_any_right_trigger[i]: + continue + + tracker_state: bool = has_any_left_trigger[i] + crossing_history = self.crossing_state_history[tracker_id] + crossing_history.append(tracker_state) + + if len(crossing_history) < self.crossing_history_length: + continue + + # TODO: Account for incorrect class_id. + # Most likely this would involve indexing self.crossing_state_history + # with (tracker_id, class_id). + + oldest_state = crossing_history[0] + if crossing_history.count(oldest_state) > 1: + continue + + if tracker_state: + self._in_count_per_class[class_id] += 1 + crossed_in[i] = True + else: + self._out_count_per_class[class_id] += 1 + crossed_out[i] = True + + return crossed_in, crossed_out @staticmethod - def calculate_region_of_interest_limits(vector: Vector) -> Tuple[Vector, Vector]: + def _calculate_region_of_interest_limits(vector: Vector) -> Tuple[Vector, Vector]: magnitude = vector.magnitude if magnitude == 0: @@ -159,40 +231,45 @@ def calculate_region_of_interest_limits(vector: Vector) -> Tuple[Vector, Vector] ) return start_region_limit, end_region_limit - @staticmethod - def is_point_in_limits(point: Point, limits: Tuple[Vector, Vector]) -> bool: - cross_product_1 = limits[0].cross_product(point) - cross_product_2 = limits[1].cross_product(point) - return (cross_product_1 > 0) == (cross_product_2 > 0) - - def trigger(self, detections: Detections) -> Tuple[np.ndarray, np.ndarray]: + def _compute_anchor_sides( + self, detections: Detections + ) -> Tuple[npt.NDArray[np.bool_], npt.NDArray[np.bool_], npt.NDArray[np.bool_]]: """ - Update the `in_count` and `out_count` based on the objects that cross the line. + Find if detections' anchors are within the limit of the line + zone and which anchors are on its left and right side. + + Assumes: + * At least 1 detection is provided + * Detections have `tracker_id` + + The limit is defined as the region between the two lines, + perpendicular to the line zone, and passing through its start + and end points, as shown below: + + Limits: + ``` + | IN ↑ + | | + OUT o---LINE---o OUT + | | + ↓ IN | + ``` Args: - detections (Detections): A list of detections for which to update the - counts. + detections (Detections): The detections to check. Returns: - A tuple of two boolean NumPy arrays. The first array indicates which - detections have crossed the line from outside to inside. The second - array indicates which detections have crossed the line from inside to - outside. + result (Tuple[np.ndarray, np.ndarray, np.ndarray]): + All 3 arrays are boolean arrays of shape (N, ) where N is the + number of detections. The first array, `in_limits`, indicates + if the detection's anchor is within the line zone limits. + The second array, `has_any_left_trigger`, indicates if the + detection's anchor is on the left side of the line zone. + The third array, `has_any_right_trigger`, indicates if the + detection's anchor is on the right side of the line zone. """ - crossed_in = np.full(len(detections), False) - crossed_out = np.full(len(detections), False) - - if len(detections) == 0: - return crossed_in, crossed_out - - if detections.tracker_id is None: - warnings.warn( - "Line zone counting skipped. LineZone requires tracker_id. Refer to " - "https://supervision.roboflow.com/latest/trackers for more " - "information.", - category=SupervisionWarnings, - ) - return crossed_in, crossed_out + assert len(detections) > 0 + assert detections.tracker_id is not None all_anchors = np.array( [ @@ -203,52 +280,38 @@ def trigger(self, detections: Detections) -> Tuple[np.ndarray, np.ndarray]: cross_products_1 = cross_product(all_anchors, self.limits[0]) cross_products_2 = cross_product(all_anchors, self.limits[1]) + + # Works because limit vectors are pointing in opposite directions in_limits = (cross_products_1 > 0) == (cross_products_2 > 0) in_limits = np.all(in_limits, axis=0) triggers = cross_product(all_anchors, self.vector) < 0 has_any_left_trigger = np.any(triggers, axis=0) has_any_right_trigger = np.any(~triggers, axis=0) - is_uniformly_triggered = ~(has_any_left_trigger & has_any_right_trigger) - - class_ids = ( - list(detections.class_id) - if detections.class_id is not None - else [None] * len(detections) - ) - tracker_ids = list(detections.tracker_id) - - if CLASS_NAME_DATA_FIELD in detections.data: - class_names = detections.data[CLASS_NAME_DATA_FIELD] - for class_id, class_name in zip(class_ids, class_names): - if class_id is None: - class_name = "No class" - self.class_id_to_name[class_id] = class_name - for i, (class_ids, tracker_id) in enumerate(zip(class_ids, tracker_ids)): - if not in_limits[i]: - continue - - if not is_uniformly_triggered[i]: - continue + return in_limits, has_any_left_trigger, has_any_right_trigger - tracker_state = has_any_left_trigger[i] - if tracker_id not in self.tracker_state: - self.tracker_state[tracker_id] = tracker_state - continue + def _update_class_id_to_name(self, detections: Detections) -> None: + """ + Update the attribute keeping track of which class + IDs correspond to which class names. - if self.tracker_state.get(tracker_id) == tracker_state: - continue + Assumes that class_names are only provided when class_ids are. + """ + class_names = detections.data.get(CLASS_NAME_DATA_FIELD) + assert class_names is None or detections.class_id is not None - self.tracker_state[tracker_id] = tracker_state - if tracker_state: - self._in_count_per_class[class_ids] += 1 - crossed_in[i] = True - else: - self._out_count_per_class[class_ids] += 1 - crossed_out[i] = True + if detections.class_id is None: + return - return crossed_in, crossed_out + if class_names is None: + new_names = {class_id: str(class_id) for class_id in detections.class_id} + else: + new_names = { + class_id: class_name + for class_id, class_name in zip(detections.class_id, class_names) + } + self.class_id_to_name.update(new_names) class LineZoneAnnotator: diff --git a/supervision/detection/tools/polygon_zone.py b/supervision/detection/tools/polygon_zone.py index ac7203ff8..5cd976b1e 100644 --- a/supervision/detection/tools/polygon_zone.py +++ b/supervision/detection/tools/polygon_zone.py @@ -17,6 +17,12 @@ class PolygonZone: """ A class for defining a polygon-shaped zone within a frame for detecting objects. + !!! warning + + PolygonZone uses the `tracker_id`. Read + [here](/latest/trackers/) to learn how to plug + tracking into your inference pipeline. + Attributes: polygon (np.ndarray): A polygon represented by a numpy array of shape `(N, 2)`, containing the `x`, `y` coordinates of the points. @@ -26,6 +32,28 @@ class PolygonZone: (default: (sv.Position.BOTTOM_CENTER,)). current_count (int): The current count of detected objects within the zone mask (np.ndarray): The 2D bool mask for the polygon zone + + Example: + ```python + import supervision as sv + from ultralytics import YOLO + import numpy as np + import cv2 + + image = cv2.imread() + model = YOLO("yolo11s") + tracker = sv.ByteTrack() + + polygon = np.array([[100, 200], [200, 100], [300, 200], [200, 300]]) + polygon_zone = sv.PolygonZone(polygon=polygon) + + result = model.infer(image)[0] + detections = sv.Detections.from_ultralytics(result) + detections = tracker.update_with_detections(detections) + + is_detections_in_zone = polygon_zone.trigger(detections) + print(polygon_zone.current_count) + ``` """ def __init__( @@ -88,7 +116,7 @@ class PolygonZoneAnnotator: Attributes: zone (PolygonZone): The polygon zone to be annotated - color (Color): The color to draw the polygon lines + color (Color): The color to draw the polygon lines, default is white thickness (int): The thickness of the polygon lines, default is 2 text_color (Color): The color of the text on the polygon, default is black text_scale (float): The scale of the text on the polygon, default is 0.5 @@ -104,7 +132,7 @@ class PolygonZoneAnnotator: def __init__( self, zone: PolygonZone, - color: Color, + color: Color = Color.WHITE, thickness: int = 2, text_color: Color = Color.BLACK, text_scale: float = 0.5, diff --git a/supervision/detection/utils.py b/supervision/detection/utils.py index 43fcec5a0..f6bcd33bc 100644 --- a/supervision/detection/utils.py +++ b/supervision/detection/utils.py @@ -1,5 +1,5 @@ from itertools import chain -from typing import Dict, List, Optional, Tuple, Union +from typing import Any, Dict, List, Optional, Tuple, Union import cv2 import numpy as np @@ -23,10 +23,9 @@ def polygon_to_mask(polygon: np.ndarray, resolution_wh: Tuple[int, int]) -> np.n np.ndarray: The generated 2D mask, where the polygon is marked with `1`'s and the rest is filled with `0`'s. """ - width, height = resolution_wh - mask = np.zeros((height, width)) - - cv2.fillPoly(mask, [polygon], color=1) + width, height = map(int, resolution_wh) + mask = np.zeros((height, width), dtype=np.uint8) + cv2.fillPoly(mask, [polygon.astype(np.int32)], color=1) return mask @@ -163,9 +162,9 @@ def oriented_box_iou_batch( boxes_true = boxes_true.reshape(-1, 4, 2) boxes_detection = boxes_detection.reshape(-1, 4, 2) - max_height = max(boxes_true[:, :, 0].max(), boxes_detection[:, :, 0].max()) + 1 + max_height = int(max(boxes_true[:, :, 0].max(), boxes_detection[:, :, 0].max()) + 1) # adding 1 because we are 0-indexed - max_width = max(boxes_true[:, :, 1].max(), boxes_detection[:, :, 1].max()) + 1 + max_width = int(max(boxes_true[:, :, 1].max(), boxes_detection[:, :, 1].max()) + 1) mask_true = np.zeros((boxes_true.shape[0], max_height, max_width)) for i, box_true in enumerate(boxes_true): @@ -808,12 +807,36 @@ def is_data_equal(data_a: Dict[str, np.ndarray], data_b: Dict[str, np.ndarray]) ) +def is_metadata_equal(metadata_a: Dict[str, Any], metadata_b: Dict[str, Any]) -> bool: + """ + Compares the metadata payloads of two Detections instances. + + Args: + metadata_a, metadata_b: The metadata payloads of the instances. + + Returns: + True if the metadata payloads are equal, False otherwise. + """ + return set(metadata_a.keys()) == set(metadata_b.keys()) and all( + np.array_equal(metadata_a[key], metadata_b[key]) + if ( + isinstance(metadata_a[key], np.ndarray) + and isinstance(metadata_b[key], np.ndarray) + ) + else metadata_a[key] == metadata_b[key] + for key in metadata_a + ) + + def merge_data( data_list: List[Dict[str, Union[npt.NDArray[np.generic], List]]], ) -> Dict[str, Union[npt.NDArray[np.generic], List]]: """ Merges the data payloads of a list of Detections instances. + Warning: Assumes that empty detections were filtered-out before passing data to + this function. + Args: data_list: The data payloads of the Detections instances. Each data payload is a dictionary with the same keys, and the values are either lists or @@ -866,6 +889,61 @@ def merge_data( return merged_data +def merge_metadata(metadata_list: List[Dict[str, Any]]) -> Dict[str, Any]: + """ + Merge metadata from a list of metadata dictionaries. + + This function combines the metadata dictionaries. If a key appears in more than one + dictionary, the values must be identical for the merge to succeed. + + Warning: Assumes that empty detections were filtered-out before passing metadata to + this function. + + Args: + metadata_list (List[Dict[str, Any]]): A list of metadata dictionaries to merge. + + Returns: + Dict[str, Any]: A single merged metadata dictionary. + + Raises: + ValueError: If there are conflicting values for the same key or if + dictionaries have different keys. + """ + if not metadata_list: + return {} + + all_keys_sets = [set(metadata.keys()) for metadata in metadata_list] + if not all(keys_set == all_keys_sets[0] for keys_set in all_keys_sets): + raise ValueError("All metadata dictionaries must have the same keys to merge.") + + merged_metadata: Dict[str, Any] = {} + for metadata in metadata_list: + for key, value in metadata.items(): + if key not in merged_metadata: + merged_metadata[key] = value + continue + + other_value = merged_metadata[key] + if isinstance(value, np.ndarray) and isinstance(other_value, np.ndarray): + if not np.array_equal(merged_metadata[key], value): + raise ValueError( + f"Conflicting metadata for key: '{key}': " + "{type(value)}, {type(other_value)}." + ) + elif isinstance(value, np.ndarray) or isinstance(other_value, np.ndarray): + # Since [] == np.array([]). + raise ValueError( + f"Conflicting metadata for key: '{key}': " + "{type(value)}, {type(other_value)}." + ) + else: + print("hm") + if merged_metadata[key] != value: + raise ValueError(f"Conflicting metadata for key: '{key}'.") + + return merged_metadata + + def get_data_item( data: Dict[str, Union[np.ndarray, List]], index: Union[int, slice, List[int], np.ndarray], @@ -1039,3 +1117,59 @@ def cross_product(anchors: np.ndarray, vector: Vector) -> np.ndarray: ) vector_start = np.array([vector.start.x, vector.start.y]) return np.cross(vector_at_zero, anchors - vector_start) + + +def spread_out_boxes( + xyxy: np.ndarray, + max_iterations: int = 100, +) -> np.ndarray: + """ + Spread out boxes that overlap with each other. + + Args: + xyxy: Numpy array of shape (N, 4) where N is the number of boxes. + max_iterations: Maximum number of iterations to run the algorithm for. + """ + if len(xyxy) == 0: + return xyxy + + xyxy_padded = pad_boxes(xyxy, px=1) + for _ in range(max_iterations): + # NxN + iou = box_iou_batch(xyxy_padded, xyxy_padded) + np.fill_diagonal(iou, 0) + if np.all(iou == 0): + break + + overlap_mask = iou > 0 + + # Nx2 + centers = (xyxy_padded[:, :2] + xyxy_padded[:, 2:]) / 2 + + # NxNx2 + delta_centers = centers[:, np.newaxis, :] - centers[np.newaxis, :, :] + delta_centers *= overlap_mask[:, :, np.newaxis] + + # Nx2 + delta_sum = np.sum(delta_centers, axis=1) + delta_magnitude = np.linalg.norm(delta_sum, axis=1, keepdims=True) + direction_vectors = np.divide( + delta_sum, + delta_magnitude, + out=np.zeros_like(delta_sum), + where=delta_magnitude != 0, + ) + + force_vectors = np.sum(iou, axis=1) + force_vectors = force_vectors[:, np.newaxis] * direction_vectors + + force_vectors *= 10 + force_vectors[(force_vectors > 0) & (force_vectors < 2)] = 2 + force_vectors[(force_vectors < 0) & (force_vectors > -2)] = -2 + + force_vectors = force_vectors.astype(int) + + xyxy_padded[:, [0, 1]] += force_vectors + xyxy_padded[:, [2, 3]] += force_vectors + + return pad_boxes(xyxy_padded, px=-1) diff --git a/supervision/draw/utils.py b/supervision/draw/utils.py index 19ce4a258..0c3767ff7 100644 --- a/supervision/draw/utils.py +++ b/supervision/draw/utils.py @@ -9,7 +9,11 @@ def draw_line( - scene: np.ndarray, start: Point, end: Point, color: Color, thickness: int = 2 + scene: np.ndarray, + start: Point, + end: Point, + color: Color = Color.ROBOFLOW, + thickness: int = 2, ) -> np.ndarray: """ Draws a line on a given scene. @@ -18,7 +22,7 @@ def draw_line( scene (np.ndarray): The scene on which the line will be drawn start (Point): The starting point of the line end (Point): The end point of the line - color (Color): The color of the line + color (Color): The color of the line, defaults to Color.ROBOFLOW thickness (int): The thickness of the line Returns: @@ -35,7 +39,7 @@ def draw_line( def draw_rectangle( - scene: np.ndarray, rect: Rect, color: Color, thickness: int = 2 + scene: np.ndarray, rect: Rect, color: Color = Color.ROBOFLOW, thickness: int = 2 ) -> np.ndarray: """ Draws a rectangle on an image. @@ -60,7 +64,7 @@ def draw_rectangle( def draw_filled_rectangle( - scene: np.ndarray, rect: Rect, color: Color, opacity: float = 1 + scene: np.ndarray, rect: Rect, color: Color = Color.ROBOFLOW, opacity: float = 1 ) -> np.ndarray: """ Draws a filled rectangle on an image. @@ -151,14 +155,17 @@ def draw_rounded_rectangle( def draw_polygon( - scene: np.ndarray, polygon: np.ndarray, color: Color, thickness: int = 2 + scene: np.ndarray, + polygon: np.ndarray, + color: Color = Color.ROBOFLOW, + thickness: int = 2, ) -> np.ndarray: """Draw a polygon on a scene. Parameters: scene (np.ndarray): The scene to draw the polygon on. polygon (np.ndarray): The polygon to be drawn, given as a list of vertices. - color (Color): The color of the polygon. + color (Color): The color of the polygon. Defaults to Color.ROBOFLOW. thickness (int): The thickness of the polygon lines, by default 2. Returns: @@ -171,14 +178,17 @@ def draw_polygon( def draw_filled_polygon( - scene: np.ndarray, polygon: np.ndarray, color: Color, opacity: float = 1 + scene: np.ndarray, + polygon: np.ndarray, + color: Color = Color.ROBOFLOW, + opacity: float = 1, ) -> np.ndarray: """Draw a filled polygon on a scene. Parameters: scene (np.ndarray): The scene to draw the polygon on. polygon (np.ndarray): The polygon to be drawn, given as a list of vertices. - color (Color): The color of the polygon. + color (Color): The color of the polygon. Defaults to Color.ROBOFLOW. opacity (float): The opacity of polygon when drawn on the scene. Returns: diff --git a/supervision/geometry/utils.py b/supervision/geometry/utils.py index 8a0ca35c5..2247adc50 100644 --- a/supervision/geometry/utils.py +++ b/supervision/geometry/utils.py @@ -16,6 +16,9 @@ def get_polygon_center(polygon: np.ndarray) -> Point: Point: The center of the polygon, represented as a Point object with x and y attributes. + Raises: + ValueError: If the polygon has no vertices. + Examples: ```python import numpy as np @@ -30,6 +33,9 @@ def get_polygon_center(polygon: np.ndarray) -> Point: # This is one of the 3 candidate algorithms considered for centroid calculation. # For a more detailed discussion, see PR #1084 and commit eb33176 + if len(polygon) == 0: + raise ValueError("Polygon must have at least one vertex.") + shift_polygon = np.roll(polygon, -1, axis=0) signed_areas = np.cross(polygon, shift_polygon) / 2 if signed_areas.sum() == 0: diff --git a/supervision/keypoint/annotators.py b/supervision/keypoint/annotators.py index 559bfa921..7537b264a 100644 --- a/supervision/keypoint/annotators.py +++ b/supervision/keypoint/annotators.py @@ -5,10 +5,11 @@ import cv2 import numpy as np -from supervision import Rect, pad_boxes from supervision.annotators.base import ImageType +from supervision.detection.utils import pad_boxes, spread_out_boxes from supervision.draw.color import Color from supervision.draw.utils import draw_rounded_rectangle +from supervision.geometry.core import Rect from supervision.keypoint.core import KeyPoints from supervision.keypoint.skeletons import SKELETONS_BY_VERTEX_COUNT from supervision.utils.conversion import ensure_cv2_image_for_annotation @@ -201,6 +202,7 @@ def __init__( text_thickness: int = 1, text_padding: int = 10, border_radius: int = 0, + smart_position: bool = False, ): """ Args: @@ -215,6 +217,7 @@ def __init__( text_padding (int): The padding around the text. border_radius (int): The radius of the rounded corners of the boxes. Set to a high value to produce circles. + smart_position (bool): Spread out the labels to avoid overlap. """ self.border_radius: int = border_radius self.color: Union[Color, List[Color]] = color @@ -222,6 +225,7 @@ def __init__( self.text_scale: float = text_scale self.text_thickness: int = text_thickness self.text_padding: int = text_padding + self.smart_position = smart_position def annotate( self, @@ -356,9 +360,12 @@ def annotate( for anchor, label in zip(anchors, labels) ] ) - xyxy_padded = pad_boxes(xyxy=xyxy, px=self.text_padding) + if self.smart_position: + xyxy_padded = spread_out_boxes(xyxy_padded) + xyxy = pad_boxes(xyxy=xyxy_padded, px=-self.text_padding) + for text, color, text_color, box, box_padded in zip( labels, colors, text_colors, xyxy, xyxy_padded ): diff --git a/supervision/keypoint/core.py b/supervision/keypoint/core.py index 36d6a5968..4b8e9d55b 100644 --- a/supervision/keypoint/core.py +++ b/supervision/keypoint/core.py @@ -2,12 +2,13 @@ from contextlib import suppress from dataclasses import dataclass, field -from typing import Any, Dict, Iterator, List, Optional, Tuple, Union +from typing import Any, Dict, Iterable, Iterator, List, Optional, Tuple, Union import numpy as np import numpy.typing as npt from supervision.config import CLASS_NAME_DATA_FIELD +from supervision.detection.core import Detections from supervision.detection.utils import get_data_item, is_data_equal from supervision.validators import validate_keypoints_fields @@ -612,3 +613,75 @@ def empty(cls) -> KeyPoints: ``` """ return cls(xy=np.empty((0, 0, 2), dtype=np.float32)) + + def is_empty(self) -> bool: + """ + Returns `True` if the `KeyPoints` object is considered empty. + """ + empty_keypoints = KeyPoints.empty() + empty_keypoints.data = self.data + return self == empty_keypoints + + def as_detections( + self, selected_keypoint_indices: Optional[Iterable[int]] = None + ) -> Detections: + """ + Convert a KeyPoints object to a Detections object. This + approximates the bounding box of the detected object by + taking the bounding box that fits all keypoints. + + Arguments: + selected_keypoint_indices (Optional[Iterable[int]]): The + indices of the keypoints to include in the bounding box + calculation. This helps focus on a subset of keypoints, + e.g. when some are occluded. Captures all keypoints by default. + + Returns: + detections (Detections): The converted detections object. + + Example: + ```python + keypoints = sv.KeyPoints.from_inference(...) + detections = keypoints.as_detections() + ``` + """ + if self.is_empty(): + return Detections.empty() + + detections_list = [] + for i, xy in enumerate(self.xy): + if selected_keypoint_indices: + xy = xy[selected_keypoint_indices] + + # [0, 0] used by some frameworks to indicate missing keypoints + xy = xy[~np.all(xy == 0, axis=1)] + if len(xy) == 0: + xyxy = np.array([[0, 0, 0, 0]], dtype=np.float32) + else: + x_min = xy[:, 0].min() + x_max = xy[:, 0].max() + y_min = xy[:, 1].min() + y_max = xy[:, 1].max() + xyxy = np.array([[x_min, y_min, x_max, y_max]], dtype=np.float32) + + if self.confidence is None: + confidence = None + else: + confidence = self.confidence[i] + if selected_keypoint_indices: + confidence = confidence[selected_keypoint_indices] + confidence = np.array([confidence.mean()], dtype=np.float32) + + detections_list.append( + Detections( + xyxy=xyxy, + confidence=confidence, + ) + ) + + detections = Detections.merge(detections_list) + detections.class_id = self.class_id + detections.data = self.data + detections = detections[detections.area > 0] + + return detections diff --git a/supervision/metrics/__init__.py b/supervision/metrics/__init__.py index 17a6cd485..12b243ce6 100644 --- a/supervision/metrics/__init__.py +++ b/supervision/metrics/__init__.py @@ -1,5 +1,4 @@ from supervision.metrics.core import ( - CLASS_ID_NONE, AveragingMethod, Metric, MetricTarget, @@ -9,6 +8,12 @@ MeanAveragePrecision, MeanAveragePrecisionResult, ) +from supervision.metrics.mean_average_recall import ( + MeanAverageRecall, + MeanAverageRecallResult, +) +from supervision.metrics.precision import Precision, PrecisionResult +from supervision.metrics.recall import Recall, RecallResult from supervision.metrics.utils.object_size import ( ObjectSizeCategory, get_detection_size_category, diff --git a/supervision/metrics/core.py b/supervision/metrics/core.py index 1440fd435..def5999a0 100644 --- a/supervision/metrics/core.py +++ b/supervision/metrics/core.py @@ -4,9 +4,6 @@ from enum import Enum from typing import Any -CLASS_ID_NONE = -1 -"""Used by metrics module as class ID, when none is present""" - class Metric(ABC): """ @@ -40,9 +37,10 @@ class MetricTarget(Enum): """ Specifies what type of detection is used to compute the metric. - * BOXES: xyxy bounding boxes - * MASKS: Binary masks - * ORIENTED_BOUNDING_BOXES: Oriented bounding boxes (OBB) + Attributes: + BOXES: xyxy bounding boxes + MASKS: Binary masks + ORIENTED_BOUNDING_BOXES: Oriented bounding boxes (OBB) """ BOXES = "boxes" @@ -57,15 +55,16 @@ class AveragingMethod(Enum): Suppose, before returning the final result, a metric is computed for each class. How do you combine those to get the final number? - * MACRO: Calculate the metric for each class and average the results. The simplest - averaging method, but it does not take class imbalance into account. - * MICRO: Calculate the metric globally by counting the total true positives, false - positives, and false negatives. Micro averaging is useful when you want to give - more importance to classes with more samples. It's also more appropriate if you - have an imbalance in the number of instances per class. - * WEIGHTED: Calculate the metric for each class and average the results, weighted by - the number of true instances of each class. Use weighted averaging if you want - to take class imbalance into account. + Attributes: + MACRO: Calculate the metric for each class and average the results. The simplest + averaging method, but it does not take class imbalance into account. + MICRO: Calculate the metric globally by counting the total true positives, false + positives, and false negatives. Micro averaging is useful when you want to + give more importance to classes with more samples. It's also more + appropriate if you have an imbalance in the number of instances per class. + WEIGHTED: Calculate the metric for each class and average the results, weighted + by the number of true instances of each class. Use weighted averaging if + you want to take class imbalance into account. """ MACRO = "macro" diff --git a/supervision/metrics/f1_score.py b/supervision/metrics/f1_score.py index 2ca5bca5c..98cb5f265 100644 --- a/supervision/metrics/f1_score.py +++ b/supervision/metrics/f1_score.py @@ -9,7 +9,11 @@ from supervision.config import ORIENTED_BOX_COORDINATES from supervision.detection.core import Detections -from supervision.detection.utils import box_iou_batch, mask_iou_batch +from supervision.detection.utils import ( + box_iou_batch, + mask_iou_batch, + oriented_box_iou_batch, +) from supervision.draw.color import LEGACY_COLOR_PALETTE from supervision.metrics.core import AveragingMethod, Metric, MetricTarget from supervision.metrics.utils.object_size import ( @@ -23,23 +27,75 @@ class F1Score(Metric): + """ + F1 Score is a metric used to evaluate object detection models. It is the harmonic + mean of precision and recall, calculated at different IoU thresholds. + + In simple terms, F1 Score is a measure of a model's balance between precision and + recall (accuracy and completeness), calculated as: + + `F1 = 2 * (precision * recall) / (precision + recall)` + + Example: + ```python + import supervision as sv + from supervision.metrics import F1Score + + predictions = sv.Detections(...) + targets = sv.Detections(...) + + f1_metric = F1Score() + f1_result = f1_metric.update(predictions, targets).compute() + + print(f1_result.f1_50) + # 0.7618 + + print(f1_result) + # F1ScoreResult: + # Metric target: MetricTarget.BOXES + # Averaging method: AveragingMethod.WEIGHTED + # F1 @ 50: 0.7618 + # F1 @ 75: 0.7487 + # F1 @ thresh: [0.76175 0.76068 0.76068] + # IoU thresh: [0.5 0.55 0.6 ...] + # F1 per class: + # 0: [0.70968 0.70968 0.70968 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... + + f1_result.plot() + ``` + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/f1_plot_example.png\ + ){ align=center width="800" } + """ + def __init__( self, metric_target: MetricTarget = MetricTarget.BOXES, averaging_method: AveragingMethod = AveragingMethod.WEIGHTED, ): - self._metric_target = metric_target - if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: - raise NotImplementedError( - "F1 score is not implemented for oriented bounding boxes." - ) + """ + Initialize the F1Score metric. + Args: + metric_target (MetricTarget): The type of detection data to use. + averaging_method (AveragingMethod): The averaging method used to compute the + F1 scores. Determines how the F1 scores are aggregated across classes. + """ self._metric_target = metric_target self.averaging_method = averaging_method + self._predictions_list: List[Detections] = [] self._targets_list: List[Detections] = [] def reset(self) -> None: + """ + Reset the metric to its initial state, clearing all stored data. + """ self._predictions_list = [] self._targets_list = [] @@ -48,6 +104,16 @@ def update( predictions: Union[Detections, List[Detections]], targets: Union[Detections, List[Detections]], ) -> F1Score: + """ + Add new predictions and targets to the metric, but do not compute the result. + + Args: + predictions (Union[Detections, List[Detections]]): The predicted detections. + targets (Union[Detections, List[Detections]]): The target detections. + + Returns: + (F1Score): The updated metric instance. + """ if not isinstance(predictions, list): predictions = [predictions] if not isinstance(targets, list): @@ -65,6 +131,13 @@ def update( return self def compute(self) -> F1ScoreResult: + """ + Calculate the F1 score metric based on the stored predictions and ground-truth + data, at different IoU thresholds. + + Returns: + (F1ScoreResult): The F1 score metric result. + """ result = self._compute(self._predictions_list, self._targets_list) small_predictions, small_targets = self._filter_predictions_and_targets_by_size( @@ -112,8 +185,12 @@ def _compute( iou = box_iou_batch(target_contents, prediction_contents) elif self._metric_target == MetricTarget.MASKS: iou = mask_iou_batch(target_contents, prediction_contents) + elif self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + iou = oriented_box_iou_batch( + target_contents, prediction_contents + ) else: - raise NotImplementedError( + raise ValueError( "Unsupported metric target for IoU calculation" ) @@ -312,12 +389,22 @@ def _detections_content(self, detections: Detections) -> np.ndarray: return ( detections.mask if detections.mask is not None - else np.empty((0, 0, 0), dtype=bool) + else self._make_empty_content() ) if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: - if obb := detections.data.get(ORIENTED_BOX_COORDINATES): - return np.ndarray(obb, dtype=np.float32) - return np.empty((0, 8), dtype=np.float32) + obb = detections.data.get(ORIENTED_BOX_COORDINATES) + if obb is not None and len(obb) > 0: + return np.array(obb, dtype=np.float32) + return self._make_empty_content() + raise ValueError(f"Invalid metric target: {self._metric_target}") + + def _make_empty_content(self) -> np.ndarray: + if self._metric_target == MetricTarget.BOXES: + return np.empty((0, 4), dtype=np.float32) + if self._metric_target == MetricTarget.MASKS: + return np.empty((0, 0, 0), dtype=bool) + if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + return np.empty((0, 4, 2), dtype=np.float32) raise ValueError(f"Invalid metric target: {self._metric_target}") def _filter_detections_by_size( @@ -373,7 +460,6 @@ class F1ScoreResult: The results of the F1 score metric calculation. Defaults to `0` if no detections or targets were provided. - Provides a custom `__str__` method for pretty printing. Attributes: metric_target (MetricTarget): the type of data used for the metric - @@ -390,11 +476,11 @@ class F1ScoreResult: matched_classes (np.ndarray): the class IDs of all matched classes. Corresponds to the rows of `f1_per_class`. small_objects (Optional[F1ScoreResult]): the F1 metric results - for small objects. + for small objects (area < 32²). medium_objects (Optional[F1ScoreResult]): the F1 metric results - for medium objects. + for medium objects (32² ≤ area < 96²). large_objects (Optional[F1ScoreResult]): the F1 metric results - for large objects. + for large objects (area ≥ 96²). """ metric_target: MetricTarget @@ -424,6 +510,19 @@ def __str__(self) -> str: Example: ```python print(f1_result) + # F1ScoreResult: + # Metric target: MetricTarget.BOXES + # Averaging method: AveragingMethod.WEIGHTED + # F1 @ 50: 0.7618 + # F1 @ 75: 0.7487 + # F1 @ thresh: [0.76175 0.76068 0.76068] + # IoU thresh: [0.5 0.55 0.6 ...] + # F1 per class: + # 0: [0.70968 0.70968 0.70968 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... ``` """ out_str = ( @@ -487,6 +586,10 @@ def to_pandas(self) -> "pd.DataFrame": def plot(self): """ Plot the F1 results. + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/f1_plot_example.png\ + ){ align=center width="800" } """ labels = ["F1@50", "F1@75"] diff --git a/supervision/metrics/mean_average_precision.py b/supervision/metrics/mean_average_precision.py index 04a5fe9dd..9e7a30d0e 100644 --- a/supervision/metrics/mean_average_precision.py +++ b/supervision/metrics/mean_average_precision.py @@ -9,7 +9,11 @@ from supervision.config import ORIENTED_BOX_COORDINATES from supervision.detection.core import Detections -from supervision.detection.utils import box_iou_batch, mask_iou_batch +from supervision.detection.utils import ( + box_iou_batch, + mask_iou_batch, + oriented_box_iou_batch, +) from supervision.draw.color import LEGACY_COLOR_PALETTE from supervision.metrics.core import Metric, MetricTarget from supervision.metrics.utils.object_size import ( @@ -23,6 +27,48 @@ class MeanAveragePrecision(Metric): + """ + Mean Average Precision (mAP) is a metric used to evaluate object detection models. + It is the average of the precision-recall curves at different IoU thresholds. + + Example: + ```python + import supervision as sv + from supervision.metrics import MeanAveragePrecision + + predictions = sv.Detections(...) + targets = sv.Detections(...) + + map_metric = MeanAveragePrecision() + map_result = map_metric.update(predictions, targets).compute() + + print(map_result.map50_95) + # 0.4674 + + print(map_result) + # MeanAveragePrecisionResult: + # Metric target: MetricTarget.BOXES + # Class agnostic: False + # mAP @ 50:95: 0.4674 + # mAP @ 50: 0.5048 + # mAP @ 75: 0.4796 + # mAP scores: [0.50485 0.50377 0.50377 ...] + # IoU thresh: [0.5 0.55 0.6 ...] + # AP per class: + # 0: [0.67699 0.67699 0.67699 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... + + map_result.plot() + ``` + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/mAP_plot_example.png\ + ){ align=center width="800" } + """ + def __init__( self, metric_target: MetricTarget = MetricTarget.BOXES, @@ -36,17 +82,15 @@ def __init__( class_agnostic (bool): Whether to treat all data as a single class. """ self._metric_target = metric_target - if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: - raise NotImplementedError( - "Mean Average Precision is not implemented for oriented bounding boxes." - ) - self._class_agnostic = class_agnostic self._predictions_list: List[Detections] = [] self._targets_list: List[Detections] = [] def reset(self) -> None: + """ + Reset the metric to its initial state, clearing all stored data. + """ self._predictions_list = [] self._targets_list = [] @@ -76,6 +120,15 @@ def update( f" targets ({len(targets)}) during the update must be the same." ) + if self._class_agnostic: + predictions = deepcopy(predictions) + targets = deepcopy(targets) + + for prediction in predictions: + prediction.class_id[:] = -1 + for target in targets: + target.class_id[:] = -1 + self._predictions_list.extend(predictions) self._targets_list.extend(targets) @@ -86,26 +139,10 @@ def compute( ) -> MeanAveragePrecisionResult: """ Calculate Mean Average Precision based on predicted and ground-truth - detections at different thresholds. + detections at different thresholds. Returns: - (MeanAveragePrecisionResult): New instance of MeanAveragePrecision. - - Example: - ```python - import supervision as sv - from supervision.metrics import MeanAveragePrecision - - predictions = sv.Detections(...) - targets = sv.Detections(...) - - map_metric = MeanAveragePrecision() - map_result = map_metric.update(predictions, targets).compute() - - print(map_result) - print(map_result.map50_95) - map_result.plot() - ``` + (MeanAveragePrecisionResult): The Mean Average Precision result. """ result = self._compute(self._predictions_list, self._targets_list) @@ -172,14 +209,19 @@ def _compute( iou = box_iou_batch(target_contents, prediction_contents) elif self._metric_target == MetricTarget.MASKS: iou = mask_iou_batch(target_contents, prediction_contents) + elif self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + iou = oriented_box_iou_batch( + target_contents, prediction_contents + ) else: - raise NotImplementedError( + raise ValueError( "Unsupported metric target for IoU calculation" ) matches = self._match_detection_batch( predictions.class_id, targets.class_id, iou, iou_thresholds ) + stats.append( ( matches, @@ -203,6 +245,7 @@ def _compute( return MeanAveragePrecisionResult( metric_target=self._metric_target, + is_class_agnostic=self._class_agnostic, mAP_scores=mAP_scores, iou_thresholds=iou_thresholds, matched_classes=unique_classes, @@ -230,7 +273,7 @@ def _compute_average_precision(recall: np.ndarray, precision: np.ndarray) -> flo for r, p in zip(recall[::-1], precision[::-1]): precision_levels[recall_levels <= r] = p - average_precision = (1 / 100 * precision_levels).sum() + average_precision = (1 / 101 * precision_levels).sum() return average_precision @staticmethod @@ -332,8 +375,9 @@ def _detections_content(self, detections: Detections) -> np.ndarray: else self._make_empty_content() ) if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: - if obb := detections.data.get(ORIENTED_BOX_COORDINATES): - return np.ndarray(obb, dtype=np.float32) + obb = detections.data.get(ORIENTED_BOX_COORDINATES) + if obb is not None and len(obb) > 0: + return np.array(obb, dtype=np.float32) return self._make_empty_content() raise ValueError(f"Invalid metric target: {self._metric_target}") @@ -343,7 +387,7 @@ def _make_empty_content(self) -> np.ndarray: if self._metric_target == MetricTarget.MASKS: return np.empty((0, 0, 0), dtype=bool) if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: - return np.empty((0, 8), dtype=np.float32) + return np.empty((0, 4, 2), dtype=np.float32) raise ValueError(f"Invalid metric target: {self._metric_target}") def _filter_detections_by_size( @@ -383,6 +427,8 @@ class MeanAveragePrecisionResult: Attributes: metric_target (MetricTarget): the type of data used for the metric - boxes, masks or oriented bounding boxes. + class_agnostic (bool): When computing class-agnostic results, class ID + is set to `-1`. mAP_map50_95 (float): the mAP score at IoU thresholds from `0.5` to `0.95`. mAP_map50 (float): the mAP score at IoU threshold of `0.5`. mAP_map75 (float): the mAP score at IoU threshold of `0.75`. @@ -394,14 +440,15 @@ class and IoU threshold. Shape: `(num_target_classes, num_iou_thresholds)` matched_classes (np.ndarray): the class IDs of all matched classes. Corresponds to the rows of `ap_per_class`. small_objects (Optional[MeanAveragePrecisionResult]): the mAP results - for small objects. + for small objects (area < 32²). medium_objects (Optional[MeanAveragePrecisionResult]): the mAP results - for medium objects. + for medium objects (32² ≤ area < 96²). large_objects (Optional[MeanAveragePrecisionResult]): the mAP results - for large objects. + for large objects (area ≥ 96²). """ metric_target: MetricTarget + is_class_agnostic: bool @property def map50_95(self) -> float: @@ -430,12 +477,27 @@ def __str__(self) -> str: Example: ```python print(map_result) + # MeanAveragePrecisionResult: + # Metric target: MetricTarget.BOXES + # Class agnostic: False + # mAP @ 50:95: 0.4674 + # mAP @ 50: 0.5048 + # mAP @ 75: 0.4796 + # mAP scores: [0.50485 0.50377 0.50377 ...] + # IoU thresh: [0.5 0.55 0.6 ...] + # AP per class: + # 0: [0.67699 0.67699 0.67699 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... ``` """ out_str = ( f"{self.__class__.__name__}:\n" f"Metric target: {self.metric_target}\n" + f"Class agnostic: {self.is_class_agnostic}\n" f"mAP @ 50:95: {self.map50_95:.4f}\n" f"mAP @ 50: {self.map50:.4f}\n" f"mAP @ 75: {self.map75:.4f}\n" @@ -500,6 +562,10 @@ def to_pandas(self) -> "pd.DataFrame": def plot(self): """ Plot the mAP results. + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/mAP_plot_example.png\ + ){ align=center width="800" } """ labels = ["mAP@50:95", "mAP@50", "mAP@75"] diff --git a/supervision/metrics/mean_average_recall.py b/supervision/metrics/mean_average_recall.py new file mode 100644 index 000000000..9c3a40718 --- /dev/null +++ b/supervision/metrics/mean_average_recall.py @@ -0,0 +1,697 @@ +from __future__ import annotations + +from copy import deepcopy +from dataclasses import dataclass +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import numpy as np +from matplotlib import pyplot as plt + +from supervision.config import ORIENTED_BOX_COORDINATES +from supervision.detection.core import Detections +from supervision.detection.utils import ( + box_iou_batch, + mask_iou_batch, + oriented_box_iou_batch, +) +from supervision.draw.color import LEGACY_COLOR_PALETTE +from supervision.metrics.core import Metric, MetricTarget +from supervision.metrics.utils.object_size import ( + ObjectSizeCategory, + get_detection_size_category, +) +from supervision.metrics.utils.utils import ensure_pandas_installed + +if TYPE_CHECKING: + import pandas as pd + + +class MeanAverageRecall(Metric): + """ + Mean Average Recall (mAR) measures how well the model detects + and retrieves relevant objects by averaging recall over multiple + IoU thresholds, classes and detection limits. + + Intuitively, while Recall measures the ability to find all relevant + objects, mAR narrows down how many detections are considered for each + class. For example, mAR @ 100 considers the top 100 highest confidence + detections for each class. mAR @ 1 considers only the highest + confidence detection for each class. + + Example: + ```python + import supervision as sv + from supervision.metrics import MeanAverageRecall + + predictions = sv.Detections(...) + targets = sv.Detections(...) + + map_metric = MeanAverageRecall() + map_result = map_metric.update(predictions, targets).compute() + + print(mar_results.mar_at_100) + # 0.5241 + + print(mar_results) + # MeanAverageRecallResult: + # Metric target: MetricTarget.BOXES + # mAR @ 1: 0.1362 + # mAR @ 10: 0.4239 + # mAR @ 100: 0.5241 + # max detections: [1 10 100] + # IoU thresh: [0.5 0.55 0.6 ...] + # mAR per class: + # 0: [0.78571 0.78571 0.78571 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... + + mar_results.plot() + ``` + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/mAR_plot_example.png\ + ){ align=center width="800" } + """ + + def __init__( + self, + metric_target: MetricTarget = MetricTarget.BOXES, + ): + """ + Initialize the Mean Average Recall metric. + + Args: + metric_target (MetricTarget): The type of detection data to use. + """ + self._metric_target = metric_target + + self._predictions_list: List[Detections] = [] + self._targets_list: List[Detections] = [] + + self.max_detections = np.array([1, 10, 100]) + + def reset(self) -> None: + """ + Reset the metric to its initial state, clearing all stored data. + """ + self._predictions_list = [] + self._targets_list = [] + + def update( + self, + predictions: Union[Detections, List[Detections]], + targets: Union[Detections, List[Detections]], + ) -> MeanAverageRecall: + """ + Add new predictions and targets to the metric, but do not compute the result. + + Args: + predictions (Union[Detections, List[Detections]]): The predicted detections. + targets (Union[Detections, List[Detections]]): The target detections. + + Returns: + (Recall): The updated metric instance. + """ + if not isinstance(predictions, list): + predictions = [predictions] + if not isinstance(targets, list): + targets = [targets] + + if len(predictions) != len(targets): + raise ValueError( + f"The number of predictions ({len(predictions)}) and" + f" targets ({len(targets)}) during the update must be the same." + ) + + self._predictions_list.extend(predictions) + self._targets_list.extend(targets) + + return self + + def compute(self) -> MeanAverageRecallResult: + """ + Calculate the Mean Average Recall metric based on the stored predictions + and ground-truth, at different IoU thresholds and maximum detection counts. + + Returns: + (MeanAverageRecallResult): The Mean Average Recall metric result. + """ + result = self._compute(self._predictions_list, self._targets_list) + + small_predictions, small_targets = self._filter_predictions_and_targets_by_size( + self._predictions_list, self._targets_list, ObjectSizeCategory.SMALL + ) + result.small_objects = self._compute(small_predictions, small_targets) + + medium_predictions, medium_targets = ( + self._filter_predictions_and_targets_by_size( + self._predictions_list, self._targets_list, ObjectSizeCategory.MEDIUM + ) + ) + result.medium_objects = self._compute(medium_predictions, medium_targets) + + large_predictions, large_targets = self._filter_predictions_and_targets_by_size( + self._predictions_list, self._targets_list, ObjectSizeCategory.LARGE + ) + result.large_objects = self._compute(large_predictions, large_targets) + + return result + + def _compute( + self, predictions_list: List[Detections], targets_list: List[Detections] + ) -> MeanAverageRecallResult: + iou_thresholds = np.linspace(0.5, 0.95, 10) + stats = [] + + for predictions, targets in zip(predictions_list, targets_list): + prediction_contents = self._detections_content(predictions) + target_contents = self._detections_content(targets) + + if len(targets) > 0: + if len(predictions) == 0: + stats.append( + ( + np.zeros((0, iou_thresholds.size), dtype=bool), + np.zeros((0,), dtype=np.float32), + np.zeros((0,), dtype=int), + targets.class_id, + ) + ) + + else: + if self._metric_target == MetricTarget.BOXES: + iou = box_iou_batch(target_contents, prediction_contents) + elif self._metric_target == MetricTarget.MASKS: + iou = mask_iou_batch(target_contents, prediction_contents) + elif self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + iou = oriented_box_iou_batch( + target_contents, prediction_contents + ) + else: + raise ValueError( + "Unsupported metric target for IoU calculation" + ) + + matches = self._match_detection_batch( + predictions.class_id, targets.class_id, iou, iou_thresholds + ) + stats.append( + ( + matches, + predictions.confidence, + predictions.class_id, + targets.class_id, + ) + ) + + if not stats: + return MeanAverageRecallResult( + metric_target=self._metric_target, + recall_scores=np.zeros(iou_thresholds.shape[0]), + recall_per_class=np.zeros((0, iou_thresholds.shape[0])), + max_detections=self.max_detections, + iou_thresholds=iou_thresholds, + matched_classes=np.array([], dtype=int), + small_objects=None, + medium_objects=None, + large_objects=None, + ) + + concatenated_stats = [np.concatenate(items, 0) for items in zip(*stats)] + recall_scores_per_k, recall_per_class, unique_classes = ( + self._compute_average_recall_for_classes(*concatenated_stats) + ) + + return MeanAverageRecallResult( + metric_target=self._metric_target, + recall_scores=recall_scores_per_k, + recall_per_class=recall_per_class, + max_detections=self.max_detections, + iou_thresholds=iou_thresholds, + matched_classes=unique_classes, + small_objects=None, + medium_objects=None, + large_objects=None, + ) + + def _compute_average_recall_for_classes( + self, + matches: np.ndarray, + prediction_confidence: np.ndarray, + prediction_class_ids: np.ndarray, + true_class_ids: np.ndarray, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + sorted_indices = np.argsort(-prediction_confidence) + matches = matches[sorted_indices] + prediction_class_ids = prediction_class_ids[sorted_indices] + unique_classes, class_counts = np.unique(true_class_ids, return_counts=True) + + recalls_at_k = [] + for max_detections in self.max_detections: + # Shape: PxTh,P,C,C -> CxThx3 + confusion_matrix = self._compute_confusion_matrix( + matches, + prediction_class_ids, + unique_classes, + class_counts, + max_detections=max_detections, + ) + + # Shape: CxThx3 -> CxTh + recall_per_class = self._compute_recall(confusion_matrix) + recalls_at_k.append(recall_per_class) + + # Shape: KxCxTh -> KxC + recalls_at_k = np.array(recalls_at_k) + average_recall_per_class = np.mean(recalls_at_k, axis=2) + + # Shape: KxC -> K + recall_scores = np.mean(average_recall_per_class, axis=1) + + return recall_scores, recall_per_class, unique_classes + + @staticmethod + def _match_detection_batch( + predictions_classes: np.ndarray, + target_classes: np.ndarray, + iou: np.ndarray, + iou_thresholds: np.ndarray, + ) -> np.ndarray: + num_predictions, num_iou_levels = ( + predictions_classes.shape[0], + iou_thresholds.shape[0], + ) + correct = np.zeros((num_predictions, num_iou_levels), dtype=bool) + correct_class = target_classes[:, None] == predictions_classes + + for i, iou_level in enumerate(iou_thresholds): + matched_indices = np.where((iou >= iou_level) & correct_class) + + if matched_indices[0].shape[0]: + combined_indices = np.stack(matched_indices, axis=1) + iou_values = iou[matched_indices][:, None] + matches = np.hstack([combined_indices, iou_values]) + + if matched_indices[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + + correct[matches[:, 1].astype(int), i] = True + + return correct + + @staticmethod + def _compute_confusion_matrix( + sorted_matches: np.ndarray, + sorted_prediction_class_ids: np.ndarray, + unique_classes: np.ndarray, + class_counts: np.ndarray, + max_detections: Optional[int] = None, + ) -> np.ndarray: + """ + Compute the confusion matrix for each class and IoU threshold. + + Assumes the matches and prediction_class_ids are sorted by confidence + in descending order. + + Args: + sorted_matches: np.ndarray, bool, shape (P, Th), that is True + if the prediction is a true positive at the given IoU threshold. + sorted_prediction_class_ids: np.ndarray, int, shape (P,), containing + the class id for each prediction. + unique_classes: np.ndarray, int, shape (C,), containing the unique + class ids. + class_counts: np.ndarray, int, shape (C,), containing the number + of true instances for each class. + max_detections: Optional[int], the maximum number of detections to + consider for each class. Extra detections are considered false + positives. By default, all detections are considered. + + Returns: + np.ndarray, shape (C, Th, 3), containing the true positives, false + positives, and false negatives for each class and IoU threshold. + """ + num_thresholds = sorted_matches.shape[1] + num_classes = unique_classes.shape[0] + + confusion_matrix = np.zeros((num_classes, num_thresholds, 3)) + for class_idx, class_id in enumerate(unique_classes): + is_class = sorted_prediction_class_ids == class_id + num_true = class_counts[class_idx] + num_predictions = is_class.sum() + + if num_predictions == 0: + true_positives = np.zeros(num_thresholds) + false_positives = np.zeros(num_thresholds) + false_negatives = np.full(num_thresholds, num_true) + elif num_true == 0: + true_positives = np.zeros(num_thresholds) + false_positives = np.full(num_thresholds, num_predictions) + false_negatives = np.zeros(num_thresholds) + else: + limited_matches = sorted_matches[is_class][slice(max_detections)] + true_positives = limited_matches.sum(0) + + false_positives = (1 - limited_matches).sum(0) + false_negatives = num_true - true_positives + false_negatives = num_true - true_positives + confusion_matrix[class_idx] = np.stack( + [true_positives, false_positives, false_negatives], axis=1 + ) + + return confusion_matrix + + @staticmethod + def _compute_recall(confusion_matrix: np.ndarray) -> np.ndarray: + """ + Broadcastable function, computing the recall from the confusion matrix. + + Arguments: + confusion_matrix: np.ndarray, shape (N, ..., 3), where the last dimension + contains the true positives, false positives, and false negatives. + + Returns: + np.ndarray, shape (N, ...), containing the recall for each element. + """ + if not confusion_matrix.shape[-1] == 3: + raise ValueError( + f"Confusion matrix must have shape (..., 3), got " + f"{confusion_matrix.shape}" + ) + true_positives = confusion_matrix[..., 0] + false_negatives = confusion_matrix[..., 2] + + denominator = true_positives + false_negatives + recall = np.where(denominator == 0, 0, true_positives / denominator) + + return recall + + def _detections_content(self, detections: Detections) -> np.ndarray: + """Return boxes, masks or oriented bounding boxes from detections.""" + if self._metric_target == MetricTarget.BOXES: + return detections.xyxy + if self._metric_target == MetricTarget.MASKS: + return ( + detections.mask + if detections.mask is not None + else self._make_empty_content() + ) + if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + obb = detections.data.get(ORIENTED_BOX_COORDINATES) + if obb is not None and len(obb) > 0: + return np.array(obb, dtype=np.float32) + return self._make_empty_content() + raise ValueError(f"Invalid metric target: {self._metric_target}") + + def _make_empty_content(self) -> np.ndarray: + if self._metric_target == MetricTarget.BOXES: + return np.empty((0, 4), dtype=np.float32) + if self._metric_target == MetricTarget.MASKS: + return np.empty((0, 0, 0), dtype=bool) + if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + return np.empty((0, 4, 2), dtype=np.float32) + raise ValueError(f"Invalid metric target: {self._metric_target}") + + def _filter_detections_by_size( + self, detections: Detections, size_category: ObjectSizeCategory + ) -> Detections: + """Return a copy of detections with contents filtered by object size.""" + new_detections = deepcopy(detections) + if detections.is_empty() or size_category == ObjectSizeCategory.ANY: + return new_detections + + sizes = get_detection_size_category(new_detections, self._metric_target) + size_mask = sizes == size_category.value + + new_detections.xyxy = new_detections.xyxy[size_mask] + if new_detections.mask is not None: + new_detections.mask = new_detections.mask[size_mask] + if new_detections.class_id is not None: + new_detections.class_id = new_detections.class_id[size_mask] + if new_detections.confidence is not None: + new_detections.confidence = new_detections.confidence[size_mask] + if new_detections.tracker_id is not None: + new_detections.tracker_id = new_detections.tracker_id[size_mask] + if new_detections.data is not None: + for key, value in new_detections.data.items(): + new_detections.data[key] = np.array(value)[size_mask] + + return new_detections + + def _filter_predictions_and_targets_by_size( + self, + predictions_list: List[Detections], + targets_list: List[Detections], + size_category: ObjectSizeCategory, + ) -> Tuple[List[Detections], List[Detections]]: + new_predictions_list = [] + new_targets_list = [] + for predictions, targets in zip(predictions_list, targets_list): + new_predictions_list.append( + self._filter_detections_by_size(predictions, size_category) + ) + new_targets_list.append( + self._filter_detections_by_size(targets, size_category) + ) + return new_predictions_list, new_targets_list + + +@dataclass +class MeanAverageRecallResult: + # """ + # The results of the recall metric calculation. + + # Defaults to `0` if no detections or targets were provided. + + # Attributes: + # metric_target (MetricTarget): the type of data used for the metric - + # boxes, masks or oriented bounding boxes. + # averaging_method (AveragingMethod): the averaging method used to compute the + # recall. Determines how the recall is aggregated across classes. + # recall_at_50 (float): the recall at IoU threshold of `0.5`. + # recall_at_75 (float): the recall at IoU threshold of `0.75`. + # recall_scores (np.ndarray): the recall scores at each IoU threshold. + # Shape: `(num_iou_thresholds,)` + # recall_per_class (np.ndarray): the recall scores per class and IoU threshold. + # Shape: `(num_target_classes, num_iou_thresholds)` + # iou_thresholds (np.ndarray): the IoU thresholds used in the calculations. + # matched_classes (np.ndarray): the class IDs of all matched classes. + # Corresponds to the rows of `recall_per_class`. + # small_objects (Optional[RecallResult]): the Recall metric results + # for small objects. + # medium_objects (Optional[RecallResult]): the Recall metric results + # for medium objects. + # large_objects (Optional[RecallResult]): the Recall metric results + # for large objects. + # """ + """ + The results of the Mean Average Recall metric calculation. + + Defaults to `0` if no detections or targets were provided. + + Attributes: + metric_target (MetricTarget): the type of data used for the metric - + boxes, masks or oriented bounding boxes. + mAR_at_1 (float): the Mean Average Recall, when considering only the top + highest confidence detection for each class. + mAR_at_10 (float): the Mean Average Recall, when considering top 10 + highest confidence detections for each class. + mAR_at_100 (float): the Mean Average Recall, when considering top 100 + highest confidence detections for each class. + recall_per_class (np.ndarray): the recall scores per class and IoU threshold. + Shape: `(num_target_classes, num_iou_thresholds)` + max_detections (np.ndarray): the array with maximum number of detections + considered. + iou_thresholds (np.ndarray): the IoU thresholds used in the calculations. + matched_classes (np.ndarray): the class IDs of all matched classes. + Corresponds to the rows of `recall_per_class`. + small_objects (Optional[MeanAverageRecallResult]): the Mean Average Recall + metric results for small objects (area < 32²). + medium_objects (Optional[MeanAverageRecallResult]): the Mean Average Recall + metric results for medium objects (32² ≤ area < 96²). + large_objects (Optional[MeanAverageRecallResult]): the Mean Average Recall + metric results for large objects (area ≥ 96²). + """ + + metric_target: MetricTarget + + @property + def mAR_at_1(self) -> float: + return self.recall_scores[0] + + @property + def mAR_at_10(self) -> float: + return self.recall_scores[1] + + @property + def mAR_at_100(self) -> float: + return self.recall_scores[2] + + recall_scores: np.ndarray + recall_per_class: np.ndarray + max_detections: np.ndarray + iou_thresholds: np.ndarray + matched_classes: np.ndarray + + small_objects: Optional[MeanAverageRecallResult] + medium_objects: Optional[MeanAverageRecallResult] + large_objects: Optional[MeanAverageRecallResult] + + def __str__(self) -> str: + """ + Format as a pretty string. + + Example: + ```python + # MeanAverageRecallResult: + # Metric target: MetricTarget.BOXES + # mAR @ 1: 0.1362 + # mAR @ 10: 0.4239 + # mAR @ 100: 0.5241 + # max detections: [1 10 100] + # IoU thresh: [0.5 0.55 0.6 ...] + # mAR per class: + # 0: [0.78571 0.78571 0.78571 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... + ``` + """ + out_str = ( + f"{self.__class__.__name__}:\n" + f"Metric target: {self.metric_target}\n" + f"mAR @ 1: {self.mAR_at_1:.4f}\n" + f"mAR @ 10: {self.mAR_at_10:.4f}\n" + f"mAR @ 100: {self.mAR_at_100:.4f}\n" + f"max detections: {self.max_detections}\n" + f"IoU thresh: {self.iou_thresholds}\n" + f"mAR per class:\n" + ) + if self.recall_per_class.size == 0: + out_str += " No results\n" + for class_id, recall_of_class in zip( + self.matched_classes, self.recall_per_class + ): + out_str += f" {class_id}: {recall_of_class}\n" + + indent = " " + if self.small_objects is not None: + indented = indent + str(self.small_objects).replace("\n", f"\n{indent}") + out_str += f"\nSmall objects:\n{indented}" + if self.medium_objects is not None: + indented = indent + str(self.medium_objects).replace("\n", f"\n{indent}") + out_str += f"\nMedium objects:\n{indented}" + if self.large_objects is not None: + indented = indent + str(self.large_objects).replace("\n", f"\n{indent}") + out_str += f"\nLarge objects:\n{indented}" + + return out_str + + def to_pandas(self) -> "pd.DataFrame": + """ + Convert the result to a pandas DataFrame. + + Returns: + (pd.DataFrame): The result as a DataFrame. + """ + ensure_pandas_installed() + import pandas as pd + + pandas_data = { + "mAR @ 1": self.mAR_at_1, + "mAR @ 10": self.mAR_at_10, + "mAR @ 100": self.mAR_at_100, + } + + if self.small_objects is not None: + small_objects_df = self.small_objects.to_pandas() + for key, value in small_objects_df.items(): + pandas_data[f"small_objects_{key}"] = value + if self.medium_objects is not None: + medium_objects_df = self.medium_objects.to_pandas() + for key, value in medium_objects_df.items(): + pandas_data[f"medium_objects_{key}"] = value + if self.large_objects is not None: + large_objects_df = self.large_objects.to_pandas() + for key, value in large_objects_df.items(): + pandas_data[f"large_objects_{key}"] = value + + return pd.DataFrame(pandas_data, index=[0]) + + def plot(self): + """ + Plot the Mean Average Recall results. + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/mAR_plot_example.png\ + ){ align=center width="800" } + """ + labels = ["mAR @ 1", "mAR @ 10", "mAR @ 100"] + values = [self.mAR_at_1, self.mAR_at_10, self.mAR_at_100] + colors = [LEGACY_COLOR_PALETTE[0]] * 3 + + if self.small_objects is not None: + small_objects = self.small_objects + labels += ["Small: mAR @ 1", "Small: mAR @ 10", "Small: mAR @ 100"] + values += [ + small_objects.mAR_at_1, + small_objects.mAR_at_10, + small_objects.mAR_at_100, + ] + colors += [LEGACY_COLOR_PALETTE[3]] * 3 + + if self.medium_objects is not None: + medium_objects = self.medium_objects + labels += ["Medium: mAR @ 1", "Medium: mAR @ 10", "Medium: mAR @ 100"] + values += [ + medium_objects.mAR_at_1, + medium_objects.mAR_at_10, + medium_objects.mAR_at_100, + ] + colors += [LEGACY_COLOR_PALETTE[2]] * 3 + + if self.large_objects is not None: + large_objects = self.large_objects + labels += ["Large: mAR @ 1", "Large: mAR @ 10", "Large: mAR @ 100"] + values += [ + large_objects.mAR_at_1, + large_objects.mAR_at_10, + large_objects.mAR_at_100, + ] + colors += [LEGACY_COLOR_PALETTE[4]] * 3 + + plt.rcParams["font.family"] = "monospace" + + _, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylim(0, 1) + ax.set_ylabel("Value", fontweight="bold") + title = ( + f"Mean Average Recall, by Object Size" + f"\n(target: {self.metric_target.value})" + ) + ax.set_title(title, fontweight="bold") + + x_positions = range(len(labels)) + bars = ax.bar(x_positions, values, color=colors, align="center") + + ax.set_xticks(x_positions) + ax.set_xticklabels(labels, rotation=45, ha="right") + + for bar in bars: + y_value = bar.get_height() + ax.text( + bar.get_x() + bar.get_width() / 2, + y_value + 0.02, + f"{y_value:.2f}", + ha="center", + va="bottom", + ) + + plt.rcParams["font.family"] = "sans-serif" + + plt.tight_layout() + plt.show() diff --git a/supervision/metrics/precision.py b/supervision/metrics/precision.py new file mode 100644 index 000000000..a5d4011e8 --- /dev/null +++ b/supervision/metrics/precision.py @@ -0,0 +1,653 @@ +from __future__ import annotations + +from copy import deepcopy +from dataclasses import dataclass +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import numpy as np +from matplotlib import pyplot as plt + +from supervision.config import ORIENTED_BOX_COORDINATES +from supervision.detection.core import Detections +from supervision.detection.utils import ( + box_iou_batch, + mask_iou_batch, + oriented_box_iou_batch, +) +from supervision.draw.color import LEGACY_COLOR_PALETTE +from supervision.metrics.core import AveragingMethod, Metric, MetricTarget +from supervision.metrics.utils.object_size import ( + ObjectSizeCategory, + get_detection_size_category, +) +from supervision.metrics.utils.utils import ensure_pandas_installed + +if TYPE_CHECKING: + import pandas as pd + + +class Precision(Metric): + """ + Precision is a metric used to evaluate object detection models. It is the ratio of + true positive detections to the total number of predicted detections. We calculate + it at different IoU thresholds. + + In simple terms, Precision is a measure of a model's accuracy, calculated as: + + `Precision = TP / (TP + FP)` + + Here, `TP` is the number of true positives (correct detections), and `FP` is the + number of false positive detections (detected, but incorrectly). + + Example: + ```python + import supervision as sv + from supervision.metrics import Precision + + predictions = sv.Detections(...) + targets = sv.Detections(...) + + precision_metric = Precision() + precision_result = precision_metric.update(predictions, targets).compute() + + print(precision_result.precision_at_50) + # 0.8099 + + print(precision_result) + # PrecisionResult: + # Metric target: MetricTarget.BOXES + # Averaging method: AveragingMethod.WEIGHTED + # P @ 50: 0.8099 + # P @ 75: 0.7969 + # P @ thresh: [0.80992 0.80905 0.80905 ...] + # IoU thresh: [0.5 0.55 0.6 ...] + # Precision per class: + # 0: [0.64706 0.64706 0.64706 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... + + print(precision_result.small_objects.precision_at_50) + ``` + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/precision_plot_example.png\ + ){ align=center width="800" } + """ + + def __init__( + self, + metric_target: MetricTarget = MetricTarget.BOXES, + averaging_method: AveragingMethod = AveragingMethod.WEIGHTED, + ): + """ + Initialize the Precision metric. + + Args: + metric_target (MetricTarget): The type of detection data to use. + averaging_method (AveragingMethod): The averaging method used to compute the + precision. Determines how the precision is aggregated across classes. + """ + self._metric_target = metric_target + self.averaging_method = averaging_method + + self._predictions_list: List[Detections] = [] + self._targets_list: List[Detections] = [] + + def reset(self) -> None: + """ + Reset the metric to its initial state, clearing all stored data. + """ + self._predictions_list = [] + self._targets_list = [] + + def update( + self, + predictions: Union[Detections, List[Detections]], + targets: Union[Detections, List[Detections]], + ) -> Precision: + """ + Add new predictions and targets to the metric, but do not compute the result. + + Args: + predictions (Union[Detections, List[Detections]]): The predicted detections. + targets (Union[Detections, List[Detections]]): The target detections. + + Returns: + (Precision): The updated metric instance. + """ + if not isinstance(predictions, list): + predictions = [predictions] + if not isinstance(targets, list): + targets = [targets] + + if len(predictions) != len(targets): + raise ValueError( + f"The number of predictions ({len(predictions)}) and" + f" targets ({len(targets)}) during the update must be the same." + ) + + self._predictions_list.extend(predictions) + self._targets_list.extend(targets) + + return self + + def compute(self) -> PrecisionResult: + """ + Calculate the precision metric based on the stored predictions and ground-truth + data, at different IoU thresholds. + + Returns: + (PrecisionResult): The precision metric result. + """ + result = self._compute(self._predictions_list, self._targets_list) + + small_predictions, small_targets = self._filter_predictions_and_targets_by_size( + self._predictions_list, self._targets_list, ObjectSizeCategory.SMALL + ) + result.small_objects = self._compute(small_predictions, small_targets) + + medium_predictions, medium_targets = ( + self._filter_predictions_and_targets_by_size( + self._predictions_list, self._targets_list, ObjectSizeCategory.MEDIUM + ) + ) + result.medium_objects = self._compute(medium_predictions, medium_targets) + + large_predictions, large_targets = self._filter_predictions_and_targets_by_size( + self._predictions_list, self._targets_list, ObjectSizeCategory.LARGE + ) + result.large_objects = self._compute(large_predictions, large_targets) + + return result + + def _compute( + self, predictions_list: List[Detections], targets_list: List[Detections] + ) -> PrecisionResult: + iou_thresholds = np.linspace(0.5, 0.95, 10) + stats = [] + + for predictions, targets in zip(predictions_list, targets_list): + prediction_contents = self._detections_content(predictions) + target_contents = self._detections_content(targets) + + if len(targets) > 0: + if len(predictions) == 0: + stats.append( + ( + np.zeros((0, iou_thresholds.size), dtype=bool), + np.zeros((0,), dtype=np.float32), + np.zeros((0,), dtype=int), + targets.class_id, + ) + ) + + else: + if self._metric_target == MetricTarget.BOXES: + iou = box_iou_batch(target_contents, prediction_contents) + elif self._metric_target == MetricTarget.MASKS: + iou = mask_iou_batch(target_contents, prediction_contents) + elif self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + iou = oriented_box_iou_batch( + target_contents, prediction_contents + ) + else: + raise ValueError( + "Unsupported metric target for IoU calculation" + ) + + matches = self._match_detection_batch( + predictions.class_id, targets.class_id, iou, iou_thresholds + ) + stats.append( + ( + matches, + predictions.confidence, + predictions.class_id, + targets.class_id, + ) + ) + + if not stats: + return PrecisionResult( + metric_target=self._metric_target, + averaging_method=self.averaging_method, + precision_scores=np.zeros(iou_thresholds.shape[0]), + precision_per_class=np.zeros((0, iou_thresholds.shape[0])), + iou_thresholds=iou_thresholds, + matched_classes=np.array([], dtype=int), + small_objects=None, + medium_objects=None, + large_objects=None, + ) + + concatenated_stats = [np.concatenate(items, 0) for items in zip(*stats)] + precision_scores, precision_per_class, unique_classes = ( + self._compute_precision_for_classes(*concatenated_stats) + ) + + return PrecisionResult( + metric_target=self._metric_target, + averaging_method=self.averaging_method, + precision_scores=precision_scores, + precision_per_class=precision_per_class, + iou_thresholds=iou_thresholds, + matched_classes=unique_classes, + small_objects=None, + medium_objects=None, + large_objects=None, + ) + + def _compute_precision_for_classes( + self, + matches: np.ndarray, + prediction_confidence: np.ndarray, + prediction_class_ids: np.ndarray, + true_class_ids: np.ndarray, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + sorted_indices = np.argsort(-prediction_confidence) + matches = matches[sorted_indices] + prediction_class_ids = prediction_class_ids[sorted_indices] + unique_classes, class_counts = np.unique(true_class_ids, return_counts=True) + + # Shape: PxTh,P,C,C -> CxThx3 + confusion_matrix = self._compute_confusion_matrix( + matches, prediction_class_ids, unique_classes, class_counts + ) + + # Shape: CxThx3 -> CxTh + precision_per_class = self._compute_precision(confusion_matrix) + + # Shape: CxTh -> Th + if self.averaging_method == AveragingMethod.MACRO: + precision_scores = np.mean(precision_per_class, axis=0) + elif self.averaging_method == AveragingMethod.MICRO: + confusion_matrix_merged = confusion_matrix.sum(0) + precision_scores = self._compute_precision(confusion_matrix_merged) + elif self.averaging_method == AveragingMethod.WEIGHTED: + class_counts = class_counts.astype(np.float32) + precision_scores = np.average( + precision_per_class, axis=0, weights=class_counts + ) + + return precision_scores, precision_per_class, unique_classes + + @staticmethod + def _match_detection_batch( + predictions_classes: np.ndarray, + target_classes: np.ndarray, + iou: np.ndarray, + iou_thresholds: np.ndarray, + ) -> np.ndarray: + num_predictions, num_iou_levels = ( + predictions_classes.shape[0], + iou_thresholds.shape[0], + ) + correct = np.zeros((num_predictions, num_iou_levels), dtype=bool) + correct_class = target_classes[:, None] == predictions_classes + + for i, iou_level in enumerate(iou_thresholds): + matched_indices = np.where((iou >= iou_level) & correct_class) + + if matched_indices[0].shape[0]: + combined_indices = np.stack(matched_indices, axis=1) + iou_values = iou[matched_indices][:, None] + matches = np.hstack([combined_indices, iou_values]) + + if matched_indices[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + + correct[matches[:, 1].astype(int), i] = True + + return correct + + @staticmethod + def _compute_confusion_matrix( + sorted_matches: np.ndarray, + sorted_prediction_class_ids: np.ndarray, + unique_classes: np.ndarray, + class_counts: np.ndarray, + ) -> np.ndarray: + """ + Compute the confusion matrix for each class and IoU threshold. + + Assumes the matches and prediction_class_ids are sorted by confidence + in descending order. + + Arguments: + sorted_matches: np.ndarray, bool, shape (P, Th), that is True + if the prediction is a true positive at the given IoU threshold. + sorted_prediction_class_ids: np.ndarray, int, shape (P,), containing + the class id for each prediction. + unique_classes: np.ndarray, int, shape (C,), containing the unique + class ids. + class_counts: np.ndarray, int, shape (C,), containing the number + of true instances for each class. + + Returns: + np.ndarray, shape (C, Th, 3), containing the true positives, false + positives, and false negatives for each class and IoU threshold. + """ + + num_thresholds = sorted_matches.shape[1] + num_classes = unique_classes.shape[0] + + confusion_matrix = np.zeros((num_classes, num_thresholds, 3)) + for class_idx, class_id in enumerate(unique_classes): + is_class = sorted_prediction_class_ids == class_id + num_true = class_counts[class_idx] + num_predictions = is_class.sum() + + if num_predictions == 0: + true_positives = np.zeros(num_thresholds) + false_positives = np.zeros(num_thresholds) + false_negatives = np.full(num_thresholds, num_true) + elif num_true == 0: + true_positives = np.zeros(num_thresholds) + false_positives = np.full(num_thresholds, num_predictions) + false_negatives = np.zeros(num_thresholds) + else: + true_positives = sorted_matches[is_class].sum(0) + false_positives = (1 - sorted_matches[is_class]).sum(0) + false_negatives = num_true - true_positives + confusion_matrix[class_idx] = np.stack( + [true_positives, false_positives, false_negatives], axis=1 + ) + + return confusion_matrix + + @staticmethod + def _compute_precision(confusion_matrix: np.ndarray) -> np.ndarray: + """ + Broadcastable function, computing the precision from the confusion matrix. + + Arguments: + confusion_matrix: np.ndarray, shape (N, ..., 3), where the last dimension + contains the true positives, false positives, and false negatives. + + Returns: + np.ndarray, shape (N, ...), containing the precision for each element. + """ + if not confusion_matrix.shape[-1] == 3: + raise ValueError( + f"Confusion matrix must have shape (..., 3), got " + f"{confusion_matrix.shape}" + ) + true_positives = confusion_matrix[..., 0] + false_positives = confusion_matrix[..., 1] + + denominator = true_positives + false_positives + precision = np.where(denominator == 0, 0, true_positives / denominator) + + return precision + + def _detections_content(self, detections: Detections) -> np.ndarray: + """Return boxes, masks or oriented bounding boxes from detections.""" + if self._metric_target == MetricTarget.BOXES: + return detections.xyxy + if self._metric_target == MetricTarget.MASKS: + return ( + detections.mask + if detections.mask is not None + else self._make_empty_content() + ) + if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + obb = detections.data.get(ORIENTED_BOX_COORDINATES) + if obb is not None and len(obb) > 0: + return np.array(obb, dtype=np.float32) + return self._make_empty_content() + raise ValueError(f"Invalid metric target: {self._metric_target}") + + def _make_empty_content(self) -> np.ndarray: + if self._metric_target == MetricTarget.BOXES: + return np.empty((0, 4), dtype=np.float32) + if self._metric_target == MetricTarget.MASKS: + return np.empty((0, 0, 0), dtype=bool) + if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + return np.empty((0, 4, 2), dtype=np.float32) + raise ValueError(f"Invalid metric target: {self._metric_target}") + + def _filter_detections_by_size( + self, detections: Detections, size_category: ObjectSizeCategory + ) -> Detections: + """Return a copy of detections with contents filtered by object size.""" + new_detections = deepcopy(detections) + if detections.is_empty() or size_category == ObjectSizeCategory.ANY: + return new_detections + + sizes = get_detection_size_category(new_detections, self._metric_target) + size_mask = sizes == size_category.value + + new_detections.xyxy = new_detections.xyxy[size_mask] + if new_detections.mask is not None: + new_detections.mask = new_detections.mask[size_mask] + if new_detections.class_id is not None: + new_detections.class_id = new_detections.class_id[size_mask] + if new_detections.confidence is not None: + new_detections.confidence = new_detections.confidence[size_mask] + if new_detections.tracker_id is not None: + new_detections.tracker_id = new_detections.tracker_id[size_mask] + if new_detections.data is not None: + for key, value in new_detections.data.items(): + new_detections.data[key] = np.array(value)[size_mask] + + return new_detections + + def _filter_predictions_and_targets_by_size( + self, + predictions_list: List[Detections], + targets_list: List[Detections], + size_category: ObjectSizeCategory, + ) -> Tuple[List[Detections], List[Detections]]: + """ + Filter predictions and targets by object size category. + """ + new_predictions_list = [] + new_targets_list = [] + for predictions, targets in zip(predictions_list, targets_list): + new_predictions_list.append( + self._filter_detections_by_size(predictions, size_category) + ) + new_targets_list.append( + self._filter_detections_by_size(targets, size_category) + ) + return new_predictions_list, new_targets_list + + +@dataclass +class PrecisionResult: + """ + The results of the precision metric calculation. + + Defaults to `0` if no detections or targets were provided. + + Attributes: + metric_target (MetricTarget): the type of data used for the metric - + boxes, masks or oriented bounding boxes. + averaging_method (AveragingMethod): the averaging method used to compute the + precision. Determines how the precision is aggregated across classes. + precision_at_50 (float): the precision at IoU threshold of `0.5`. + precision_at_75 (float): the precision at IoU threshold of `0.75`. + precision_scores (np.ndarray): the precision scores at each IoU threshold. + Shape: `(num_iou_thresholds,)` + precision_per_class (np.ndarray): the precision scores per class and + IoU threshold. Shape: `(num_target_classes, num_iou_thresholds)` + iou_thresholds (np.ndarray): the IoU thresholds used in the calculations. + matched_classes (np.ndarray): the class IDs of all matched classes. + Corresponds to the rows of `precision_per_class`. + small_objects (Optional[PrecisionResult]): the Precision metric results + for small objects (area < 32²). + medium_objects (Optional[PrecisionResult]): the Precision metric results + for medium objects (32² ≤ area < 96²). + large_objects (Optional[PrecisionResult]): the Precision metric results + for large objects (area ≥ 96²). + """ + + metric_target: MetricTarget + averaging_method: AveragingMethod + + @property + def precision_at_50(self) -> float: + return self.precision_scores[0] + + @property + def precision_at_75(self) -> float: + return self.precision_scores[5] + + precision_scores: np.ndarray + precision_per_class: np.ndarray + iou_thresholds: np.ndarray + matched_classes: np.ndarray + + small_objects: Optional[PrecisionResult] + medium_objects: Optional[PrecisionResult] + large_objects: Optional[PrecisionResult] + + def __str__(self) -> str: + """ + Format as a pretty string. + + Example: + ```python + print(precision_result) + # PrecisionResult: + # Metric target: MetricTarget.BOXES + # Averaging method: AveragingMethod.WEIGHTED + # P @ 50: 0.8099 + # P @ 75: 0.7969 + # P @ thresh: [0.80992 0.80905 0.80905 ...] + # IoU thresh: [0.5 0.55 0.6 ...] + # Precision per class: + # 0: [0.64706 0.64706 0.64706 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... + ``` + """ + out_str = ( + f"{self.__class__.__name__}:\n" + f"Metric target: {self.metric_target}\n" + f"Averaging method: {self.averaging_method}\n" + f"P @ 50: {self.precision_at_50:.4f}\n" + f"P @ 75: {self.precision_at_75:.4f}\n" + f"P @ thresh: {self.precision_scores}\n" + f"IoU thresh: {self.iou_thresholds}\n" + f"Precision per class:\n" + ) + if self.precision_per_class.size == 0: + out_str += " No results\n" + for class_id, precision_of_class in zip( + self.matched_classes, self.precision_per_class + ): + out_str += f" {class_id}: {precision_of_class}\n" + + indent = " " + if self.small_objects is not None: + indented = indent + str(self.small_objects).replace("\n", f"\n{indent}") + out_str += f"\nSmall objects:\n{indented}" + if self.medium_objects is not None: + indented = indent + str(self.medium_objects).replace("\n", f"\n{indent}") + out_str += f"\nMedium objects:\n{indented}" + if self.large_objects is not None: + indented = indent + str(self.large_objects).replace("\n", f"\n{indent}") + out_str += f"\nLarge objects:\n{indented}" + + return out_str + + def to_pandas(self) -> "pd.DataFrame": + """ + Convert the result to a pandas DataFrame. + + Returns: + (pd.DataFrame): The result as a DataFrame. + """ + ensure_pandas_installed() + import pandas as pd + + pandas_data = { + "P@50": self.precision_at_50, + "P@75": self.precision_at_75, + } + + if self.small_objects is not None: + small_objects_df = self.small_objects.to_pandas() + for key, value in small_objects_df.items(): + pandas_data[f"small_objects_{key}"] = value + if self.medium_objects is not None: + medium_objects_df = self.medium_objects.to_pandas() + for key, value in medium_objects_df.items(): + pandas_data[f"medium_objects_{key}"] = value + if self.large_objects is not None: + large_objects_df = self.large_objects.to_pandas() + for key, value in large_objects_df.items(): + pandas_data[f"large_objects_{key}"] = value + + return pd.DataFrame(pandas_data, index=[0]) + + def plot(self): + """ + Plot the precision results. + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/precision_plot_example.png\ + ){ align=center width="800" } + """ + + labels = ["Precision@50", "Precision@75"] + values = [self.precision_at_50, self.precision_at_75] + colors = [LEGACY_COLOR_PALETTE[0]] * 2 + + if self.small_objects is not None: + small_objects = self.small_objects + labels += ["Small: P@50", "Small: P@75"] + values += [small_objects.precision_at_50, small_objects.precision_at_75] + colors += [LEGACY_COLOR_PALETTE[3]] * 2 + + if self.medium_objects is not None: + medium_objects = self.medium_objects + labels += ["Medium: P@50", "Medium: P@75"] + values += [medium_objects.precision_at_50, medium_objects.precision_at_75] + colors += [LEGACY_COLOR_PALETTE[2]] * 2 + + if self.large_objects is not None: + large_objects = self.large_objects + labels += ["Large: P@50", "Large: P@75"] + values += [large_objects.precision_at_50, large_objects.precision_at_75] + colors += [LEGACY_COLOR_PALETTE[4]] * 2 + + plt.rcParams["font.family"] = "monospace" + + _, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylim(0, 1) + ax.set_ylabel("Value", fontweight="bold") + title = ( + f"Precision, by Object Size" + f"\n(target: {self.metric_target.value}," + f" averaging: {self.averaging_method.value})" + ) + ax.set_title(title, fontweight="bold") + + x_positions = range(len(labels)) + bars = ax.bar(x_positions, values, color=colors, align="center") + + ax.set_xticks(x_positions) + ax.set_xticklabels(labels, rotation=45, ha="right") + + for bar in bars: + y_value = bar.get_height() + ax.text( + bar.get_x() + bar.get_width() / 2, + y_value + 0.02, + f"{y_value:.2f}", + ha="center", + va="bottom", + ) + + plt.rcParams["font.family"] = "sans-serif" + + plt.tight_layout() + plt.show() diff --git a/supervision/metrics/recall.py b/supervision/metrics/recall.py new file mode 100644 index 000000000..b3586ff7d --- /dev/null +++ b/supervision/metrics/recall.py @@ -0,0 +1,652 @@ +from __future__ import annotations + +from copy import deepcopy +from dataclasses import dataclass +from typing import TYPE_CHECKING, List, Optional, Tuple, Union + +import numpy as np +from matplotlib import pyplot as plt + +from supervision.config import ORIENTED_BOX_COORDINATES +from supervision.detection.core import Detections +from supervision.detection.utils import ( + box_iou_batch, + mask_iou_batch, + oriented_box_iou_batch, +) +from supervision.draw.color import LEGACY_COLOR_PALETTE +from supervision.metrics.core import AveragingMethod, Metric, MetricTarget +from supervision.metrics.utils.object_size import ( + ObjectSizeCategory, + get_detection_size_category, +) +from supervision.metrics.utils.utils import ensure_pandas_installed + +if TYPE_CHECKING: + import pandas as pd + + +class Recall(Metric): + """ + Recall is a metric used to evaluate object detection models. It is the ratio of + true positive detections to the total number of ground truth instances. We calculate + it at different IoU thresholds. + + In simple terms, Recall is a measure of a model's completeness, calculated as: + + `Recall = TP / (TP + FN)` + + Here, `TP` is the number of true positives (correct detections), and `FN` is the + number of false negatives (missed detections). + + Example: + ```python + import supervision as sv + from supervision.metrics import Recall + + predictions = sv.Detections(...) + targets = sv.Detections(...) + + recall_metric = Recall() + recall_result = recall_metric.update(predictions, targets).compute() + + print(recall_result.recall_at_50) + # 0.7615 + + print(recall_result) + # RecallResult: + # Metric target: MetricTarget.BOXES + # Averaging method: AveragingMethod.WEIGHTED + # R @ 50: 0.7615 + # R @ 75: 0.7462 + # R @ thresh: [0.76151 0.76011 0.76011 0.75732 ...] + # IoU thresh: [0.5 0.55 0.6 ...] + # Recall per class: + # 0: [0.78571 0.78571 0.78571 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... + + recall_result.plot() + + ``` + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/recall_plot_example.png\ + ){ align=center width="800" } + """ + + def __init__( + self, + metric_target: MetricTarget = MetricTarget.BOXES, + averaging_method: AveragingMethod = AveragingMethod.WEIGHTED, + ): + """ + Initialize the Recall metric. + + Args: + metric_target (MetricTarget): The type of detection data to use. + averaging_method (AveragingMethod): The averaging method used to compute the + recall. Determines how the recall is aggregated across classes. + """ + self._metric_target = metric_target + self.averaging_method = averaging_method + + self._predictions_list: List[Detections] = [] + self._targets_list: List[Detections] = [] + + def reset(self) -> None: + """ + Reset the metric to its initial state, clearing all stored data. + """ + self._predictions_list = [] + self._targets_list = [] + + def update( + self, + predictions: Union[Detections, List[Detections]], + targets: Union[Detections, List[Detections]], + ) -> Recall: + """ + Add new predictions and targets to the metric, but do not compute the result. + + Args: + predictions (Union[Detections, List[Detections]]): The predicted detections. + targets (Union[Detections, List[Detections]]): The target detections. + + Returns: + (Recall): The updated metric instance. + """ + if not isinstance(predictions, list): + predictions = [predictions] + if not isinstance(targets, list): + targets = [targets] + + if len(predictions) != len(targets): + raise ValueError( + f"The number of predictions ({len(predictions)}) and" + f" targets ({len(targets)}) during the update must be the same." + ) + + self._predictions_list.extend(predictions) + self._targets_list.extend(targets) + + return self + + def compute(self) -> RecallResult: + """ + Calculate the recall metric based on the stored predictions and ground-truth + data, at different IoU thresholds. + + Returns: + (RecallResult): The recall metric result. + """ + result = self._compute(self._predictions_list, self._targets_list) + + small_predictions, small_targets = self._filter_predictions_and_targets_by_size( + self._predictions_list, self._targets_list, ObjectSizeCategory.SMALL + ) + result.small_objects = self._compute(small_predictions, small_targets) + + medium_predictions, medium_targets = ( + self._filter_predictions_and_targets_by_size( + self._predictions_list, self._targets_list, ObjectSizeCategory.MEDIUM + ) + ) + result.medium_objects = self._compute(medium_predictions, medium_targets) + + large_predictions, large_targets = self._filter_predictions_and_targets_by_size( + self._predictions_list, self._targets_list, ObjectSizeCategory.LARGE + ) + result.large_objects = self._compute(large_predictions, large_targets) + + return result + + def _compute( + self, predictions_list: List[Detections], targets_list: List[Detections] + ) -> RecallResult: + iou_thresholds = np.linspace(0.5, 0.95, 10) + stats = [] + + for predictions, targets in zip(predictions_list, targets_list): + prediction_contents = self._detections_content(predictions) + target_contents = self._detections_content(targets) + + if len(targets) > 0: + if len(predictions) == 0: + stats.append( + ( + np.zeros((0, iou_thresholds.size), dtype=bool), + np.zeros((0,), dtype=np.float32), + np.zeros((0,), dtype=int), + targets.class_id, + ) + ) + + else: + if self._metric_target == MetricTarget.BOXES: + iou = box_iou_batch(target_contents, prediction_contents) + elif self._metric_target == MetricTarget.MASKS: + iou = mask_iou_batch(target_contents, prediction_contents) + elif self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + iou = oriented_box_iou_batch( + target_contents, prediction_contents + ) + else: + raise ValueError( + "Unsupported metric target for IoU calculation" + ) + + matches = self._match_detection_batch( + predictions.class_id, targets.class_id, iou, iou_thresholds + ) + stats.append( + ( + matches, + predictions.confidence, + predictions.class_id, + targets.class_id, + ) + ) + + if not stats: + return RecallResult( + metric_target=self._metric_target, + averaging_method=self.averaging_method, + recall_scores=np.zeros(iou_thresholds.shape[0]), + recall_per_class=np.zeros((0, iou_thresholds.shape[0])), + iou_thresholds=iou_thresholds, + matched_classes=np.array([], dtype=int), + small_objects=None, + medium_objects=None, + large_objects=None, + ) + + concatenated_stats = [np.concatenate(items, 0) for items in zip(*stats)] + recall_scores, recall_per_class, unique_classes = ( + self._compute_recall_for_classes(*concatenated_stats) + ) + + return RecallResult( + metric_target=self._metric_target, + averaging_method=self.averaging_method, + recall_scores=recall_scores, + recall_per_class=recall_per_class, + iou_thresholds=iou_thresholds, + matched_classes=unique_classes, + small_objects=None, + medium_objects=None, + large_objects=None, + ) + + def _compute_recall_for_classes( + self, + matches: np.ndarray, + prediction_confidence: np.ndarray, + prediction_class_ids: np.ndarray, + true_class_ids: np.ndarray, + ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]: + sorted_indices = np.argsort(-prediction_confidence) + matches = matches[sorted_indices] + prediction_class_ids = prediction_class_ids[sorted_indices] + unique_classes, class_counts = np.unique(true_class_ids, return_counts=True) + + # Shape: PxTh,P,C,C -> CxThx3 + confusion_matrix = self._compute_confusion_matrix( + matches, prediction_class_ids, unique_classes, class_counts + ) + + # Shape: CxThx3 -> CxTh + recall_per_class = self._compute_recall(confusion_matrix) + + # Shape: CxTh -> Th + if self.averaging_method == AveragingMethod.MACRO: + recall_scores = np.mean(recall_per_class, axis=0) + elif self.averaging_method == AveragingMethod.MICRO: + confusion_matrix_merged = confusion_matrix.sum(0) + recall_scores = self._compute_recall(confusion_matrix_merged) + elif self.averaging_method == AveragingMethod.WEIGHTED: + class_counts = class_counts.astype(np.float32) + recall_scores = np.average(recall_per_class, axis=0, weights=class_counts) + + return recall_scores, recall_per_class, unique_classes + + @staticmethod + def _match_detection_batch( + predictions_classes: np.ndarray, + target_classes: np.ndarray, + iou: np.ndarray, + iou_thresholds: np.ndarray, + ) -> np.ndarray: + num_predictions, num_iou_levels = ( + predictions_classes.shape[0], + iou_thresholds.shape[0], + ) + correct = np.zeros((num_predictions, num_iou_levels), dtype=bool) + correct_class = target_classes[:, None] == predictions_classes + + for i, iou_level in enumerate(iou_thresholds): + matched_indices = np.where((iou >= iou_level) & correct_class) + + if matched_indices[0].shape[0]: + combined_indices = np.stack(matched_indices, axis=1) + iou_values = iou[matched_indices][:, None] + matches = np.hstack([combined_indices, iou_values]) + + if matched_indices[0].shape[0] > 1: + matches = matches[matches[:, 2].argsort()[::-1]] + matches = matches[np.unique(matches[:, 1], return_index=True)[1]] + matches = matches[np.unique(matches[:, 0], return_index=True)[1]] + + correct[matches[:, 1].astype(int), i] = True + + return correct + + @staticmethod + def _compute_confusion_matrix( + sorted_matches: np.ndarray, + sorted_prediction_class_ids: np.ndarray, + unique_classes: np.ndarray, + class_counts: np.ndarray, + ) -> np.ndarray: + """ + Compute the confusion matrix for each class and IoU threshold. + + Assumes the matches and prediction_class_ids are sorted by confidence + in descending order. + + Arguments: + sorted_matches: np.ndarray, bool, shape (P, Th), that is True + if the prediction is a true positive at the given IoU threshold. + sorted_prediction_class_ids: np.ndarray, int, shape (P,), containing + the class id for each prediction. + unique_classes: np.ndarray, int, shape (C,), containing the unique + class ids. + class_counts: np.ndarray, int, shape (C,), containing the number + of true instances for each class. + + Returns: + np.ndarray, shape (C, Th, 3), containing the true positives, false + positives, and false negatives for each class and IoU threshold. + """ + + num_thresholds = sorted_matches.shape[1] + num_classes = unique_classes.shape[0] + + confusion_matrix = np.zeros((num_classes, num_thresholds, 3)) + for class_idx, class_id in enumerate(unique_classes): + is_class = sorted_prediction_class_ids == class_id + num_true = class_counts[class_idx] + num_predictions = is_class.sum() + + if num_predictions == 0: + true_positives = np.zeros(num_thresholds) + false_positives = np.zeros(num_thresholds) + false_negatives = np.full(num_thresholds, num_true) + elif num_true == 0: + true_positives = np.zeros(num_thresholds) + false_positives = np.full(num_thresholds, num_predictions) + false_negatives = np.zeros(num_thresholds) + else: + true_positives = sorted_matches[is_class].sum(0) + false_positives = (1 - sorted_matches[is_class]).sum(0) + false_negatives = num_true - true_positives + confusion_matrix[class_idx] = np.stack( + [true_positives, false_positives, false_negatives], axis=1 + ) + + return confusion_matrix + + @staticmethod + def _compute_recall(confusion_matrix: np.ndarray) -> np.ndarray: + """ + Broadcastable function, computing the recall from the confusion matrix. + + Arguments: + confusion_matrix: np.ndarray, shape (N, ..., 3), where the last dimension + contains the true positives, false positives, and false negatives. + + Returns: + np.ndarray, shape (N, ...), containing the recall for each element. + """ + if not confusion_matrix.shape[-1] == 3: + raise ValueError( + f"Confusion matrix must have shape (..., 3), got " + f"{confusion_matrix.shape}" + ) + true_positives = confusion_matrix[..., 0] + false_negatives = confusion_matrix[..., 2] + + denominator = true_positives + false_negatives + recall = np.where(denominator == 0, 0, true_positives / denominator) + + return recall + + def _detections_content(self, detections: Detections) -> np.ndarray: + """Return boxes, masks or oriented bounding boxes from detections.""" + if self._metric_target == MetricTarget.BOXES: + return detections.xyxy + if self._metric_target == MetricTarget.MASKS: + return ( + detections.mask + if detections.mask is not None + else self._make_empty_content() + ) + if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + obb = detections.data.get(ORIENTED_BOX_COORDINATES) + if obb is not None and len(obb) > 0: + return np.array(obb, dtype=np.float32) + return self._make_empty_content() + raise ValueError(f"Invalid metric target: {self._metric_target}") + + def _make_empty_content(self) -> np.ndarray: + if self._metric_target == MetricTarget.BOXES: + return np.empty((0, 4), dtype=np.float32) + if self._metric_target == MetricTarget.MASKS: + return np.empty((0, 0, 0), dtype=bool) + if self._metric_target == MetricTarget.ORIENTED_BOUNDING_BOXES: + return np.empty((0, 4, 2), dtype=np.float32) + raise ValueError(f"Invalid metric target: {self._metric_target}") + + def _filter_detections_by_size( + self, detections: Detections, size_category: ObjectSizeCategory + ) -> Detections: + """Return a copy of detections with contents filtered by object size.""" + new_detections = deepcopy(detections) + if detections.is_empty() or size_category == ObjectSizeCategory.ANY: + return new_detections + + sizes = get_detection_size_category(new_detections, self._metric_target) + size_mask = sizes == size_category.value + + new_detections.xyxy = new_detections.xyxy[size_mask] + if new_detections.mask is not None: + new_detections.mask = new_detections.mask[size_mask] + if new_detections.class_id is not None: + new_detections.class_id = new_detections.class_id[size_mask] + if new_detections.confidence is not None: + new_detections.confidence = new_detections.confidence[size_mask] + if new_detections.tracker_id is not None: + new_detections.tracker_id = new_detections.tracker_id[size_mask] + if new_detections.data is not None: + for key, value in new_detections.data.items(): + new_detections.data[key] = np.array(value)[size_mask] + + return new_detections + + def _filter_predictions_and_targets_by_size( + self, + predictions_list: List[Detections], + targets_list: List[Detections], + size_category: ObjectSizeCategory, + ) -> Tuple[List[Detections], List[Detections]]: + """ + Filter predictions and targets by object size category. + """ + new_predictions_list = [] + new_targets_list = [] + for predictions, targets in zip(predictions_list, targets_list): + new_predictions_list.append( + self._filter_detections_by_size(predictions, size_category) + ) + new_targets_list.append( + self._filter_detections_by_size(targets, size_category) + ) + return new_predictions_list, new_targets_list + + +@dataclass +class RecallResult: + """ + The results of the recall metric calculation. + + Defaults to `0` if no detections or targets were provided. + + Attributes: + metric_target (MetricTarget): the type of data used for the metric - + boxes, masks or oriented bounding boxes. + averaging_method (AveragingMethod): the averaging method used to compute the + recall. Determines how the recall is aggregated across classes. + recall_at_50 (float): the recall at IoU threshold of `0.5`. + recall_at_75 (float): the recall at IoU threshold of `0.75`. + recall_scores (np.ndarray): the recall scores at each IoU threshold. + Shape: `(num_iou_thresholds,)` + recall_per_class (np.ndarray): the recall scores per class and IoU threshold. + Shape: `(num_target_classes, num_iou_thresholds)` + iou_thresholds (np.ndarray): the IoU thresholds used in the calculations. + matched_classes (np.ndarray): the class IDs of all matched classes. + Corresponds to the rows of `recall_per_class`. + small_objects (Optional[RecallResult]): the Recall metric results + for small objects (area < 32²). + medium_objects (Optional[RecallResult]): the Recall metric results + for medium objects (32² ≤ area < 96²). + large_objects (Optional[RecallResult]): the Recall metric results + for large objects (area ≥ 96²). + """ + + metric_target: MetricTarget + averaging_method: AveragingMethod + + @property + def recall_at_50(self) -> float: + return self.recall_scores[0] + + @property + def recall_at_75(self) -> float: + return self.recall_scores[5] + + recall_scores: np.ndarray + recall_per_class: np.ndarray + iou_thresholds: np.ndarray + matched_classes: np.ndarray + + small_objects: Optional[RecallResult] + medium_objects: Optional[RecallResult] + large_objects: Optional[RecallResult] + + def __str__(self) -> str: + """ + Format as a pretty string. + + Example: + ```python + print(recall_result) + # RecallResult: + # Metric target: MetricTarget.BOXES + # Averaging method: AveragingMethod.WEIGHTED + # R @ 50: 0.7615 + # R @ 75: 0.7462 + # R @ thresh: [0.76151 0.76011 0.76011 0.75732 ...] + # IoU thresh: [0.5 0.55 0.6 ...] + # Recall per class: + # 0: [0.78571 0.78571 0.78571 ...] + # ... + # Small objects: ... + # Medium objects: ... + # Large objects: ... + ``` + """ + out_str = ( + f"{self.__class__.__name__}:\n" + f"Metric target: {self.metric_target}\n" + f"Averaging method: {self.averaging_method}\n" + f"R @ 50: {self.recall_at_50:.4f}\n" + f"R @ 75: {self.recall_at_75:.4f}\n" + f"R @ thresh: {self.recall_scores}\n" + f"IoU thresh: {self.iou_thresholds}\n" + f"Recall per class:\n" + ) + if self.recall_per_class.size == 0: + out_str += " No results\n" + for class_id, recall_of_class in zip( + self.matched_classes, self.recall_per_class + ): + out_str += f" {class_id}: {recall_of_class}\n" + + indent = " " + if self.small_objects is not None: + indented = indent + str(self.small_objects).replace("\n", f"\n{indent}") + out_str += f"\nSmall objects:\n{indented}" + if self.medium_objects is not None: + indented = indent + str(self.medium_objects).replace("\n", f"\n{indent}") + out_str += f"\nMedium objects:\n{indented}" + if self.large_objects is not None: + indented = indent + str(self.large_objects).replace("\n", f"\n{indent}") + out_str += f"\nLarge objects:\n{indented}" + + return out_str + + def to_pandas(self) -> "pd.DataFrame": + """ + Convert the result to a pandas DataFrame. + + Returns: + (pd.DataFrame): The result as a DataFrame. + """ + ensure_pandas_installed() + import pandas as pd + + pandas_data = { + "R@50": self.recall_at_50, + "R@75": self.recall_at_75, + } + + if self.small_objects is not None: + small_objects_df = self.small_objects.to_pandas() + for key, value in small_objects_df.items(): + pandas_data[f"small_objects_{key}"] = value + if self.medium_objects is not None: + medium_objects_df = self.medium_objects.to_pandas() + for key, value in medium_objects_df.items(): + pandas_data[f"medium_objects_{key}"] = value + if self.large_objects is not None: + large_objects_df = self.large_objects.to_pandas() + for key, value in large_objects_df.items(): + pandas_data[f"large_objects_{key}"] = value + + return pd.DataFrame(pandas_data, index=[0]) + + def plot(self): + """ + Plot the recall results. + + ![example_plot](\ + https://media.roboflow.com/supervision-docs/metrics/recall_plot_example.png\ + ){ align=center width="800" } + """ + + labels = ["Recall@50", "Recall@75"] + values = [self.recall_at_50, self.recall_at_75] + colors = [LEGACY_COLOR_PALETTE[0]] * 2 + + if self.small_objects is not None: + small_objects = self.small_objects + labels += ["Small: R@50", "Small: R@75"] + values += [small_objects.recall_at_50, small_objects.recall_at_75] + colors += [LEGACY_COLOR_PALETTE[3]] * 2 + + if self.medium_objects is not None: + medium_objects = self.medium_objects + labels += ["Medium: R@50", "Medium: R@75"] + values += [medium_objects.recall_at_50, medium_objects.recall_at_75] + colors += [LEGACY_COLOR_PALETTE[2]] * 2 + + if self.large_objects is not None: + large_objects = self.large_objects + labels += ["Large: R@50", "Large: R@75"] + values += [large_objects.recall_at_50, large_objects.recall_at_75] + colors += [LEGACY_COLOR_PALETTE[4]] * 2 + + plt.rcParams["font.family"] = "monospace" + + _, ax = plt.subplots(figsize=(10, 6)) + ax.set_ylim(0, 1) + ax.set_ylabel("Value", fontweight="bold") + title = ( + f"Recall, by Object Size" + f"\n(target: {self.metric_target.value}," + f" averaging: {self.averaging_method.value})" + ) + ax.set_title(title, fontweight="bold") + + x_positions = range(len(labels)) + bars = ax.bar(x_positions, values, color=colors, align="center") + + ax.set_xticks(x_positions) + ax.set_xticklabels(labels, rotation=45, ha="right") + + for bar in bars: + y_value = bar.get_height() + ax.text( + bar.get_x() + bar.get_width() / 2, + y_value + 0.02, + f"{y_value:.2f}", + ha="center", + va="bottom", + ) + + plt.rcParams["font.family"] = "sans-serif" + + plt.tight_layout() + plt.show() diff --git a/supervision/py.typed b/supervision/py.typed new file mode 100644 index 000000000..e69de29bb diff --git a/supervision/tracker/byte_tracker/basetrack.py b/supervision/tracker/byte_tracker/basetrack.py deleted file mode 100644 index 806f75384..000000000 --- a/supervision/tracker/byte_tracker/basetrack.py +++ /dev/null @@ -1,63 +0,0 @@ -from collections import OrderedDict -from enum import Enum - -import numpy as np - - -class TrackState(Enum): - New = 0 - Tracked = 1 - Lost = 2 - Removed = 3 - - -class BaseTrack: - _count = 0 - - def __init__(self): - self.track_id = 0 - self.is_activated = False - self.state = TrackState.New - - self.history = OrderedDict() - self.features = [] - self.curr_feature = None - self.score = 0 - self.start_frame = 0 - self.frame_id = 0 - self.time_since_update = 0 - - # multi-camera - self.location = (np.inf, np.inf) - - @property - def end_frame(self) -> int: - return self.frame_id - - @staticmethod - def next_id() -> int: - BaseTrack._count += 1 - return BaseTrack._count - - @staticmethod - def reset_counter(): - BaseTrack._count = 0 - BaseTrack.track_id = 0 - BaseTrack.start_frame = 0 - BaseTrack.frame_id = 0 - BaseTrack.time_since_update = 0 - - def activate(self, *args): - raise NotImplementedError - - def predict(self): - raise NotImplementedError - - def update(self, *args, **kwargs): - raise NotImplementedError - - def mark_lost(self): - self.state = TrackState.Lost - - def mark_removed(self): - self.state = TrackState.Removed diff --git a/supervision/tracker/byte_tracker/core.py b/supervision/tracker/byte_tracker/core.py index 89e1e2f2c..cb46af733 100644 --- a/supervision/tracker/byte_tracker/core.py +++ b/supervision/tracker/byte_tracker/core.py @@ -5,186 +5,9 @@ from supervision.detection.core import Detections from supervision.detection.utils import box_iou_batch from supervision.tracker.byte_tracker import matching -from supervision.tracker.byte_tracker.basetrack import BaseTrack, TrackState from supervision.tracker.byte_tracker.kalman_filter import KalmanFilter - - -class STrack(BaseTrack): - shared_kalman = KalmanFilter() - _external_count = 0 - - def __init__(self, tlwh, score, class_ids, minimum_consecutive_frames): - # wait activate - self._tlwh = np.asarray(tlwh, dtype=np.float32) - self.kalman_filter = None - self.mean, self.covariance = None, None - self.is_activated = False - - self.score = score - self.class_ids = class_ids - self.tracklet_len = 0 - - self.external_track_id = -1 - - self.minimum_consecutive_frames = minimum_consecutive_frames - - def predict(self): - mean_state = self.mean.copy() - if self.state != TrackState.Tracked: - mean_state[7] = 0 - self.mean, self.covariance = self.kalman_filter.predict( - mean_state, self.covariance - ) - - @staticmethod - def multi_predict(stracks): - if len(stracks) > 0: - multi_mean = [] - multi_covariance = [] - for i, st in enumerate(stracks): - multi_mean.append(st.mean.copy()) - multi_covariance.append(st.covariance) - if st.state != TrackState.Tracked: - multi_mean[i][7] = 0 - - multi_mean, multi_covariance = STrack.shared_kalman.multi_predict( - np.asarray(multi_mean), np.asarray(multi_covariance) - ) - for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): - stracks[i].mean = mean - stracks[i].covariance = cov - - def activate(self, kalman_filter, frame_id): - """Start a new tracklet""" - self.kalman_filter = kalman_filter - self.internal_track_id = self.next_id() - self.mean, self.covariance = self.kalman_filter.initiate( - self.tlwh_to_xyah(self._tlwh) - ) - - self.tracklet_len = 0 - self.state = TrackState.Tracked - if frame_id == 1: - self.is_activated = True - - if self.minimum_consecutive_frames == 1: - self.external_track_id = self.next_external_id() - - self.frame_id = frame_id - self.start_frame = frame_id - - def re_activate(self, new_track, frame_id, new_id=False): - self.mean, self.covariance = self.kalman_filter.update( - self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh) - ) - self.tracklet_len = 0 - self.state = TrackState.Tracked - - self.frame_id = frame_id - if new_id: - self.internal_track_id = self.next_id() - self.score = new_track.score - - def update(self, new_track, frame_id): - """ - Update a matched track - :type new_track: STrack - :type frame_id: int - :type update_feature: bool - :return: - """ - self.frame_id = frame_id - self.tracklet_len += 1 - - new_tlwh = new_track.tlwh - self.mean, self.covariance = self.kalman_filter.update( - self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh) - ) - self.state = TrackState.Tracked - if self.tracklet_len == self.minimum_consecutive_frames: - self.is_activated = True - if self.external_track_id == -1: - self.external_track_id = self.next_external_id() - - self.score = new_track.score - - @property - def tlwh(self): - """Get current position in bounding box format `(top left x, top left y, - width, height)`. - """ - if self.mean is None: - return self._tlwh.copy() - ret = self.mean[:4].copy() - ret[2] *= ret[3] - ret[:2] -= ret[2:] / 2 - return ret - - @property - def tlbr(self): - """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., - `(top left, bottom right)`. - """ - ret = self.tlwh.copy() - ret[2:] += ret[:2] - return ret - - @staticmethod - def tlwh_to_xyah(tlwh): - """Convert bounding box to format `(center x, center y, aspect ratio, - height)`, where the aspect ratio is `width / height`. - """ - ret = np.asarray(tlwh).copy() - ret[:2] += ret[2:] / 2 - ret[2] /= ret[3] - return ret - - def to_xyah(self): - return self.tlwh_to_xyah(self.tlwh) - - @staticmethod - def next_external_id(): - STrack._external_count += 1 - return STrack._external_count - - @staticmethod - def reset_external_counter(): - STrack._external_count = 0 - - @staticmethod - def tlbr_to_tlwh(tlbr): - ret = np.asarray(tlbr).copy() - ret[2:] -= ret[:2] - return ret - - @staticmethod - def tlwh_to_tlbr(tlwh): - ret = np.asarray(tlwh).copy() - ret[2:] += ret[:2] - return ret - - def __repr__(self): - return "OT_{}_({}-{})".format( - self.internal_track_id, self.start_frame, self.end_frame - ) - - -def detections2boxes(detections: Detections) -> np.ndarray: - """ - Convert Supervision Detections to numpy tensors for further computation. - Args: - detections (Detections): Detections/Targets in the format of sv.Detections. - Returns: - (np.ndarray): Detections as numpy tensors as in - `(x_min, y_min, x_max, y_max, confidence, class_id)` order. - """ - return np.hstack( - ( - detections.xyxy, - detections.confidence[:, np.newaxis], - detections.class_id[:, np.newaxis], - ) - ) +from supervision.tracker.byte_tracker.single_object_track import STrack, TrackState +from supervision.tracker.byte_tracker.utils import IdCounter class ByteTrack: @@ -230,11 +53,17 @@ def __init__( self.max_time_lost = int(frame_rate / 30.0 * lost_track_buffer) self.minimum_consecutive_frames = minimum_consecutive_frames self.kalman_filter = KalmanFilter() + self.shared_kalman = KalmanFilter() self.tracked_tracks: List[STrack] = [] self.lost_tracks: List[STrack] = [] self.removed_tracks: List[STrack] = [] + # Warning, possible bug: If you also set internal_id to start at 1, + # all traces will be connected across objects. + self.internal_id_counter = IdCounter() + self.external_id_counter = IdCounter(start_id=1) + def update_with_detections(self, detections: Detections) -> Detections: """ Updates the tracker with the provided detections and returns the updated @@ -274,8 +103,12 @@ def callback(frame: np.ndarray, index: int) -> np.ndarray: ) ``` """ - - tensors = detections2boxes(detections=detections) + tensors = np.hstack( + ( + detections.xyxy, + detections.confidence[:, np.newaxis], + ) + ) tracks = self.update_with_tensors(tensors=tensors) if len(tracks) > 0: @@ -301,7 +134,7 @@ def callback(frame: np.ndarray, index: int) -> np.ndarray: return detections - def reset(self): + def reset(self) -> None: """ Resets the internal state of the ByteTrack tracker. @@ -311,11 +144,11 @@ def reset(self): ensuring the tracker starts with a clean state for each new video. """ self.frame_id = 0 - self.tracked_tracks: List[STrack] = [] - self.lost_tracks: List[STrack] = [] - self.removed_tracks: List[STrack] = [] - BaseTrack.reset_counter() - STrack.reset_external_counter() + self.internal_id_counter.reset() + self.external_id_counter.reset() + self.tracked_tracks = [] + self.lost_tracks = [] + self.removed_tracks = [] def update_with_tensors(self, tensors: np.ndarray) -> List[STrack]: """ @@ -333,7 +166,6 @@ def update_with_tensors(self, tensors: np.ndarray) -> List[STrack]: lost_stracks = [] removed_stracks = [] - class_ids = tensors[:, 5] scores = tensors[:, 4] bboxes = tensors[:, :4] @@ -347,14 +179,18 @@ def update_with_tensors(self, tensors: np.ndarray) -> List[STrack]: scores_keep = scores[remain_inds] scores_second = scores[inds_second] - class_ids_keep = class_ids[remain_inds] - class_ids_second = class_ids[inds_second] - if len(dets) > 0: """Detections""" detections = [ - STrack(STrack.tlbr_to_tlwh(tlbr), s, c, self.minimum_consecutive_frames) - for (tlbr, s, c) in zip(dets, scores_keep, class_ids_keep) + STrack( + STrack.tlbr_to_tlwh(tlbr), + score_keep, + self.minimum_consecutive_frames, + self.shared_kalman, + self.internal_id_counter, + self.external_id_counter, + ) + for (tlbr, score_keep) in zip(dets, scores_keep) ] else: detections = [] @@ -372,7 +208,7 @@ def update_with_tensors(self, tensors: np.ndarray) -> List[STrack]: """ Step 2: First association, with high score detection boxes""" strack_pool = joint_tracks(tracked_stracks, self.lost_tracks) # Predict the current location with KF - STrack.multi_predict(strack_pool) + STrack.multi_predict(strack_pool, self.shared_kalman) dists = matching.iou_distance(strack_pool, detections) dists = matching.fuse_score(dists, detections) @@ -387,7 +223,7 @@ def update_with_tensors(self, tensors: np.ndarray) -> List[STrack]: track.update(detections[idet], self.frame_id) activated_starcks.append(track) else: - track.re_activate(det, self.frame_id, new_id=False) + track.re_activate(det, self.frame_id) refind_stracks.append(track) """ Step 3: Second association, with low score detection boxes""" @@ -395,8 +231,15 @@ def update_with_tensors(self, tensors: np.ndarray) -> List[STrack]: if len(dets_second) > 0: """Detections""" detections_second = [ - STrack(STrack.tlbr_to_tlwh(tlbr), s, c, self.minimum_consecutive_frames) - for (tlbr, s, c) in zip(dets_second, scores_second, class_ids_second) + STrack( + STrack.tlbr_to_tlwh(tlbr), + score_second, + self.minimum_consecutive_frames, + self.shared_kalman, + self.internal_id_counter, + self.external_id_counter, + ) + for (tlbr, score_second) in zip(dets_second, scores_second) ] else: detections_second = [] @@ -416,13 +259,13 @@ def update_with_tensors(self, tensors: np.ndarray) -> List[STrack]: track.update(det, self.frame_id) activated_starcks.append(track) else: - track.re_activate(det, self.frame_id, new_id=False) + track.re_activate(det, self.frame_id) refind_stracks.append(track) for it in u_track: track = r_tracked_stracks[it] if not track.state == TrackState.Lost: - track.mark_lost() + track.state = TrackState.Lost lost_stracks.append(track) """Deal with unconfirmed tracks, usually tracks with only one beginning frame""" @@ -438,7 +281,7 @@ def update_with_tensors(self, tensors: np.ndarray) -> List[STrack]: activated_starcks.append(unconfirmed[itracked]) for it in u_unconfirmed: track = unconfirmed[it] - track.mark_removed() + track.state = TrackState.Removed removed_stracks.append(track) """ Step 4: Init new stracks""" @@ -450,8 +293,8 @@ def update_with_tensors(self, tensors: np.ndarray) -> List[STrack]: activated_starcks.append(track) """ Step 5: Update state""" for track in self.lost_tracks: - if self.frame_id - track.end_frame > self.max_time_lost: - track.mark_removed() + if self.frame_id - track.frame_id > self.max_time_lost: + track.state = TrackState.Removed removed_stracks.append(track) self.tracked_tracks = [ @@ -497,7 +340,7 @@ def joint_tracks( return result -def sub_tracks(track_list_a: List, track_list_b: List) -> List[int]: +def sub_tracks(track_list_a: List[STrack], track_list_b: List[STrack]) -> List[int]: """ Returns a list of tracks from track_list_a after removing any tracks that share the same internal_track_id with tracks in track_list_b. @@ -518,7 +361,9 @@ def sub_tracks(track_list_a: List, track_list_b: List) -> List[int]: return list(tracks.values()) -def remove_duplicate_tracks(tracks_a: List, tracks_b: List) -> Tuple[List, List]: +def remove_duplicate_tracks( + tracks_a: List[STrack], tracks_b: List[STrack] +) -> Tuple[List[STrack], List[STrack]]: pairwise_distance = matching.iou_distance(tracks_a, tracks_b) matching_pairs = np.where(pairwise_distance < 0.15) diff --git a/supervision/tracker/byte_tracker/matching.py b/supervision/tracker/byte_tracker/matching.py index 24abe224c..eb774d4c4 100644 --- a/supervision/tracker/byte_tracker/matching.py +++ b/supervision/tracker/byte_tracker/matching.py @@ -1,10 +1,15 @@ -from typing import List, Tuple +from __future__ import annotations + +from typing import TYPE_CHECKING, List, Tuple import numpy as np from scipy.optimize import linear_sum_assignment from supervision.detection.utils import box_iou_batch +if TYPE_CHECKING: + from supervision.tracker.byte_tracker.core import STrack + def indices_to_matches( cost_matrix: np.ndarray, indices: np.ndarray, thresh: float @@ -20,7 +25,7 @@ def indices_to_matches( def linear_assignment( cost_matrix: np.ndarray, thresh: float -) -> [np.ndarray, Tuple[int], Tuple[int, int]]: +) -> Tuple[np.ndarray, Tuple[int], Tuple[int, int]]: if cost_matrix.size == 0: return ( np.empty((0, 2), dtype=int), @@ -35,7 +40,7 @@ def linear_assignment( return indices_to_matches(cost_matrix, indices, thresh) -def iou_distance(atracks: List, btracks: List) -> np.ndarray: +def iou_distance(atracks: List[STrack], btracks: List[STrack]) -> np.ndarray: if (len(atracks) > 0 and isinstance(atracks[0], np.ndarray)) or ( len(btracks) > 0 and isinstance(btracks[0], np.ndarray) ): @@ -53,11 +58,11 @@ def iou_distance(atracks: List, btracks: List) -> np.ndarray: return cost_matrix -def fuse_score(cost_matrix: np.ndarray, detections: List) -> np.ndarray: +def fuse_score(cost_matrix: np.ndarray, stracks: List[STrack]) -> np.ndarray: if cost_matrix.size == 0: return cost_matrix iou_sim = 1 - cost_matrix - det_scores = np.array([det.score for det in detections]) + det_scores = np.array([strack.score for strack in stracks]) det_scores = np.expand_dims(det_scores, axis=0).repeat(cost_matrix.shape[0], axis=0) fuse_sim = iou_sim * det_scores fuse_cost = 1 - fuse_sim diff --git a/supervision/tracker/byte_tracker/single_object_track.py b/supervision/tracker/byte_tracker/single_object_track.py new file mode 100644 index 000000000..3b9bfdf2d --- /dev/null +++ b/supervision/tracker/byte_tracker/single_object_track.py @@ -0,0 +1,178 @@ +from __future__ import annotations + +from enum import Enum +from typing import List + +import numpy as np +import numpy.typing as npt + +from supervision.tracker.byte_tracker.kalman_filter import KalmanFilter +from supervision.tracker.byte_tracker.utils import IdCounter + + +class TrackState(Enum): + New = 0 + Tracked = 1 + Lost = 2 + Removed = 3 + + +class STrack: + def __init__( + self, + tlwh: npt.NDArray[np.float32], + score: npt.NDArray[np.float32], + minimum_consecutive_frames: int, + shared_kalman: KalmanFilter, + internal_id_counter: IdCounter, + external_id_counter: IdCounter, + ): + self.state = TrackState.New + self.is_activated = False + self.start_frame = 0 + self.frame_id = 0 + + self._tlwh = np.asarray(tlwh, dtype=np.float32) + self.kalman_filter = None + self.shared_kalman = shared_kalman + self.mean, self.covariance = None, None + self.is_activated = False + + self.score = score + self.tracklet_len = 0 + + self.minimum_consecutive_frames = minimum_consecutive_frames + + self.internal_id_counter = internal_id_counter + self.external_id_counter = external_id_counter + self.internal_track_id = self.internal_id_counter.NO_ID + self.external_track_id = self.external_id_counter.NO_ID + + def predict(self) -> None: + mean_state = self.mean.copy() + if self.state != TrackState.Tracked: + mean_state[7] = 0 + self.mean, self.covariance = self.kalman_filter.predict( + mean_state, self.covariance + ) + + @staticmethod + def multi_predict(stracks: List[STrack], shared_kalman: KalmanFilter) -> None: + if len(stracks) > 0: + multi_mean = [] + multi_covariance = [] + for i, st in enumerate(stracks): + multi_mean.append(st.mean.copy()) + multi_covariance.append(st.covariance) + if st.state != TrackState.Tracked: + multi_mean[i][7] = 0 + + multi_mean, multi_covariance = shared_kalman.multi_predict( + np.asarray(multi_mean), np.asarray(multi_covariance) + ) + for i, (mean, cov) in enumerate(zip(multi_mean, multi_covariance)): + stracks[i].mean = mean + stracks[i].covariance = cov + + def activate(self, kalman_filter: KalmanFilter, frame_id: int) -> None: + """Start a new tracklet""" + self.kalman_filter = kalman_filter + self.internal_track_id = self.internal_id_counter.new_id() + self.mean, self.covariance = self.kalman_filter.initiate( + self.tlwh_to_xyah(self._tlwh) + ) + + self.tracklet_len = 0 + self.state = TrackState.Tracked + if frame_id == 1: + self.is_activated = True + + if self.minimum_consecutive_frames == 1: + self.external_track_id = self.external_id_counter.new_id() + + self.frame_id = frame_id + self.start_frame = frame_id + + def re_activate(self, new_track: STrack, frame_id: int) -> None: + self.mean, self.covariance = self.kalman_filter.update( + self.mean, self.covariance, self.tlwh_to_xyah(new_track.tlwh) + ) + self.tracklet_len = 0 + self.state = TrackState.Tracked + + self.frame_id = frame_id + self.score = new_track.score + + def update(self, new_track: STrack, frame_id: int) -> None: + """ + Update a matched track + :type new_track: STrack + :type frame_id: int + :type update_feature: bool + :return: + """ + self.frame_id = frame_id + self.tracklet_len += 1 + + new_tlwh = new_track.tlwh + self.mean, self.covariance = self.kalman_filter.update( + self.mean, self.covariance, self.tlwh_to_xyah(new_tlwh) + ) + self.state = TrackState.Tracked + if self.tracklet_len == self.minimum_consecutive_frames: + self.is_activated = True + if self.external_track_id == self.external_id_counter.NO_ID: + self.external_track_id = self.external_id_counter.new_id() + + self.score = new_track.score + + @property + def tlwh(self) -> npt.NDArray[np.float32]: + """Get current position in bounding box format `(top left x, top left y, + width, height)`. + """ + if self.mean is None: + return self._tlwh.copy() + ret = self.mean[:4].copy() + ret[2] *= ret[3] + ret[:2] -= ret[2:] / 2 + return ret + + @property + def tlbr(self) -> npt.NDArray[np.float32]: + """Convert bounding box to format `(min x, min y, max x, max y)`, i.e., + `(top left, bottom right)`. + """ + ret = self.tlwh.copy() + ret[2:] += ret[:2] + return ret + + @staticmethod + def tlwh_to_xyah(tlwh) -> npt.NDArray[np.float32]: + """Convert bounding box to format `(center x, center y, aspect ratio, + height)`, where the aspect ratio is `width / height`. + """ + ret = np.asarray(tlwh).copy() + ret[:2] += ret[2:] / 2 + ret[2] /= ret[3] + return ret + + def to_xyah(self) -> npt.NDArray[np.float32]: + return self.tlwh_to_xyah(self.tlwh) + + @staticmethod + def tlbr_to_tlwh(tlbr) -> npt.NDArray[np.float32]: + ret = np.asarray(tlbr).copy() + ret[2:] -= ret[:2] + return ret + + @staticmethod + def tlwh_to_tlbr(tlwh) -> npt.NDArray[np.float32]: + ret = np.asarray(tlwh).copy() + ret[2:] += ret[:2] + return ret + + def __repr__(self) -> str: + return "OT_{}_({}-{})".format( + self.internal_track_id, self.start_frame, self.frame_id + ) diff --git a/supervision/tracker/byte_tracker/utils.py b/supervision/tracker/byte_tracker/utils.py new file mode 100644 index 000000000..cd2a1036b --- /dev/null +++ b/supervision/tracker/byte_tracker/utils.py @@ -0,0 +1,18 @@ +class IdCounter: + def __init__(self, start_id: int = 0): + self.start_id = start_id + if self.start_id <= self.NO_ID: + raise ValueError(f"start_id must be greater than {self.NO_ID}") + self.reset() + + def reset(self) -> None: + self._id = self.start_id + + def new_id(self) -> int: + returned_id = self._id + self._id += 1 + return returned_id + + @property + def NO_ID(self) -> int: + return -1 diff --git a/test/detection/test_core.py b/test/detection/test_core.py index b857250e0..61796bef2 100644 --- a/test/detection/test_core.py +++ b/test/detection/test_core.py @@ -106,6 +106,26 @@ "never_seen_key": [9], }, ) +TEST_DET_WITH_METADATA = Detections( + xyxy=np.array([[10, 10, 20, 20]]), + class_id=np.array([1]), + metadata={"source": "camera1"}, +) + +TEST_DET_WITH_METADATA_2 = Detections( + xyxy=np.array([[30, 30, 40, 40]]), + class_id=np.array([2]), + metadata={"source": "camera1"}, +) +TEST_DET_NO_METADATA = Detections( + xyxy=np.array([[10, 10, 20, 20]]), + class_id=np.array([1]), +) +TEST_DET_DIFFERENT_METADATA = Detections( + xyxy=np.array([[50, 50, 60, 60]]), + class_id=np.array([3]), + metadata={"source": "camera2"}, +) @pytest.mark.parametrize( @@ -258,6 +278,11 @@ def test_getitem( TEST_DET_1, DoesNotRaise(), ), # Single detection and empty-array fields + ( + [TEST_DET_ZERO_LENGTH, TEST_DET_ZERO_LENGTH], + TEST_DET_ZERO_LENGTH, + DoesNotRaise(), + ), # Zero-length fields across all Detections ( [ TEST_DET_1, @@ -287,12 +312,190 @@ def test_getitem( Detections.empty(), ], mock_detections( - xyxy=[[10, 10, 20, 20]], + xyxy=np.array([[10, 10, 20, 20]]), class_id=[1], mask=[np.zeros((4, 4), dtype=bool)], ), DoesNotRaise(), ), # Segmentation + Empty + # Metadata + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + class_id=np.array([1]), + metadata={"source": "camera1"}, + ), + Detections.empty(), + ], + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + class_id=np.array([1]), + metadata={"source": "camera1"}, + ), + DoesNotRaise(), + ), # Metadata merge with empty detections + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + class_id=np.array([1]), + metadata={"source": "camera1"}, + ), + Detections(xyxy=np.array([[30, 30, 40, 40]]), class_id=np.array([2])), + ], + None, + pytest.raises(ValueError), + ), # Empty and non-empty metadata + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + class_id=np.array([1]), + metadata={"source": "camera1"}, + ) + ], + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + class_id=np.array([1]), + metadata={"source": "camera1"}, + ), + DoesNotRaise(), + ), # Single detection with metadata + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + class_id=np.array([1]), + metadata={"source": "camera1"}, + ), + Detections( + xyxy=np.array([[30, 30, 40, 40]]), + class_id=np.array([2]), + metadata={"source": "camera1"}, + ), + ], + Detections( + xyxy=np.array([[10, 10, 20, 20], [30, 30, 40, 40]]), + class_id=np.array([1, 2]), + metadata={"source": "camera1"}, + ), + DoesNotRaise(), + ), # Multiple metadata entries with identical values + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + class_id=np.array([1]), + metadata={"source": "camera1"}, + ), + Detections( + xyxy=np.array([[50, 50, 60, 60]]), + class_id=np.array([3]), + metadata={"source": "camera2"}, + ), + ], + None, + pytest.raises(ValueError), + ), # Different metadata values + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + metadata={"source": "camera1", "resolution": "1080p"}, + ), + Detections( + xyxy=np.array([[30, 30, 40, 40]]), + metadata={"source": "camera1", "resolution": "1080p"}, + ), + ], + Detections( + xyxy=np.array([[10, 10, 20, 20], [30, 30, 40, 40]]), + metadata={"source": "camera1", "resolution": "1080p"}, + ), + DoesNotRaise(), + ), # Large metadata with multiple identical entries + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), metadata={"source": "camera1"} + ), + Detections( + xyxy=np.array([[30, 30, 40, 40]]), metadata={"source": ["camera1"]} + ), + ], + None, + pytest.raises(ValueError), + ), # Inconsistent types in metadata values + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), metadata={"source": "camera1"} + ), + Detections( + xyxy=np.array([[30, 30, 40, 40]]), metadata={"location": "indoor"} + ), + ], + None, + pytest.raises(ValueError), + ), # Metadata key mismatch + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + metadata={ + "source": "camera1", + "settings": {"resolution": "1080p", "fps": 30}, + }, + ), + Detections( + xyxy=np.array([[30, 30, 40, 40]]), + metadata={ + "source": "camera1", + "settings": {"resolution": "1080p", "fps": 30}, + }, + ), + ], + Detections( + xyxy=np.array([[10, 10, 20, 20], [30, 30, 40, 40]]), + metadata={ + "source": "camera1", + "settings": {"resolution": "1080p", "fps": 30}, + }, + ), + DoesNotRaise(), + ), # multi-field metadata + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + metadata={"calibration_matrix": np.array([[1, 0], [0, 1]])}, + ), + Detections( + xyxy=np.array([[30, 30, 40, 40]]), + metadata={"calibration_matrix": np.array([[1, 0], [0, 1]])}, + ), + ], + Detections( + xyxy=np.array([[10, 10, 20, 20], [30, 30, 40, 40]]), + metadata={"calibration_matrix": np.array([[1, 0], [0, 1]])}, + ), + DoesNotRaise(), + ), # Identical 2D numpy arrays in metadata + ( + [ + Detections( + xyxy=np.array([[10, 10, 20, 20]]), + metadata={"calibration_matrix": np.array([[1, 0], [0, 1]])}, + ), + Detections( + xyxy=np.array([[30, 30, 40, 40]]), + metadata={"calibration_matrix": np.array([[2, 0], [0, 2]])}, + ), + ], + None, + pytest.raises(ValueError), + ), # Mismatching 2D numpy arrays in metadata ], ) def test_merge( @@ -302,7 +505,7 @@ def test_merge( ) -> None: with exception: result = Detections.merge(detections_list=detections_list) - assert result == expected_result + assert result == expected_result, f"Expected: {expected_result}, Got: {result}" @pytest.mark.parametrize( diff --git a/test/detection/test_line_counter.py b/test/detection/test_line_counter.py index b7e4d33dd..a140add55 100644 --- a/test/detection/test_line_counter.py +++ b/test/detection/test_line_counter.py @@ -69,7 +69,7 @@ def test_calculate_region_of_interest_limits( exception: Exception, ) -> None: with exception: - result = LineZone.calculate_region_of_interest_limits(vector=vector) + result = LineZone._calculate_region_of_interest_limits(vector=vector) assert result == expected_result @@ -303,6 +303,19 @@ def test_line_zone_one_detection_default_anchors( [False, True, False, True], [False, False, True, False], ), + ( # Scrape line, left side, center anchor (along line point) + Vector(Point(0, 0), Point(10, 0)), + [ + [-2, 4, 2, 6], + [-2, 4 - 10, 2, 6 - 10], + [-2, 4, 2, 6], + [-2, 4 - 10, 2, 6 - 10], + [-2, 4 - 10, 2, 6 - 10], + ], + [Position.CENTER], + [False, True, False, True, False], + [False, False, True, False, False], + ), ( # Scrape line, right side, corner anchors Vector(Point(0, 0), Point(10, 0)), [ @@ -477,3 +490,289 @@ def test_line_zone_multiple_detections( assert crossed_in_list == expected_crossed_in assert crossed_out_list == expected_crossed_out + + +@pytest.mark.parametrize( + "vector, xyxy_sequence, triggering_anchors, minimum_crossing_threshold, " + "expected_crossed_in, expected_crossed_out", + [ + ( # Detection lingers around line, all crosses counted + Vector(Point(0, 0), Point(10, 0)), + [ + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4 - 10, 3, 6], + ], + [ + Position.TOP_LEFT, + ], + 1, + [False, True, False, True, False], + [False, False, True, False, False], + ), + ( # Detection lingers around line, only final cross counted + Vector(Point(0, 0), Point(10, 0)), + [ + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4 - 10, 3, 6], + ], + [ + Position.TOP_LEFT, + ], + 2, + [False, False, False, False, True], + [False, False, False, False, False], + ), + ( # Detection lingers around line for a long time + Vector(Point(0, 0), Point(10, 0)), + [ + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4 - 10, 3, 6], + ], + [ + Position.TOP_LEFT, + ], + 2, + [False] * 12 + [True], + [False] * 13, + ), + ( # Detection lingers around line, longer cycle + Vector(Point(0, 0), Point(10, 0)), + [ + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4, 3, 6], + [2, 4, 3, 6], + [2, 4, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4 - 10, 3, 6], + [2, 4 - 10, 3, 6], + ], + [ + Position.TOP_LEFT, + ], + 4, + [False] * 8 + [True], + [False] * 9, + ), + ], +) +def test_line_zone_one_detection_long_horizon( + vector: Vector, + xyxy_sequence: List[List[float]], + triggering_anchors: List[Position], + minimum_crossing_threshold: int, + expected_crossed_in: List[bool], + expected_crossed_out: List[bool], +) -> None: + line_zone = LineZone( + start=vector.start, + end=vector.end, + triggering_anchors=triggering_anchors, + minimum_crossing_threshold=minimum_crossing_threshold, + ) + + crossed_in_list = [] + crossed_out_list = [] + for i, bbox in enumerate(xyxy_sequence): + detections = mock_detections( + xyxy=[bbox], + tracker_id=[0], + ) + crossed_in, crossed_out = line_zone.trigger(detections) + crossed_in_list.append(crossed_in[0]) + crossed_out_list.append(crossed_out[0]) + + assert ( + crossed_in_list == expected_crossed_in + ), f"expected {expected_crossed_in}, got {crossed_in_list}" + assert ( + crossed_out_list == expected_crossed_out + ), f"expected {expected_crossed_out}, got {crossed_out_list}" + + +@pytest.mark.parametrize( + "vector, xyxy_sequence, anchors, minimum_crossing_threshold, " + "expected_crossed_in, expected_crossed_out, expected_count_in, " + "expected_count_out, exception", + [ + ( # One stays, one crosses, one disappears before crossing + Vector(Point(0, 0), Point(10, 0)), + [ + [[4, 4, 6, 6], [4, 4, 6, 6], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + ], + [ + Position.TOP_LEFT, + ], + 1, + [ + [False, False, False], + [False, True, False], + [False, False], + [False, True], + [False, False], + ], + [ + [False, False, False], + [False, False, False], + [False, True], + [False, False], + [False, False], + ], + [0, 1, 1, 2, 2], + [0, 0, 1, 1, 1], + DoesNotRaise(), + ), + ( # One stays, one crosses, one disappears immediately after crossing + Vector(Point(0, 0), Point(10, 0)), + [ + [[4, 4, 6, 6], [4, 4, 6, 6], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + ], + [ + Position.TOP_LEFT, + ], + 1, + [ + [False, False, False], + [False, True, False], + [False, False, True], + [False, True], + [False, False], + ], + [ + [False, False, False], + [False, False, False], + [False, True, False], + [False, False], + [False, False], + ], + [0, 1, 2, 3, 3], + [0, 0, 1, 1, 1], + DoesNotRaise(), + ), + ( # One stays, one crosses, one disappears before crossing + Vector(Point(0, 0), Point(10, 0)), + [ + [[4, 4, 6, 6], [4, 4, 6, 6], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + ], + [ + Position.TOP_LEFT, + ], + 2, + [ + [False, False, False], + [False, False, False], + [False, False], + [False, False], + [False, True], + ], + [ + [False, False, False], + [False, False, False], + [False, False], + [False, False], + [False, False], + ], + [0, 0, 0, 0, 1], + [0, 0, 0, 0, 0], + DoesNotRaise(), + ), + ( # One stays, one crosses, one disappears immediately after crossing + Vector(Point(0, 0), Point(10, 0)), + [ + [[4, 4, 6, 6], [4, 4, 6, 6], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10], [4, 4, 6, 6]], + [[4, 4, 6, 6], [4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + [[4, 4, 6, 6], [4, 4 - 10, 6, 6 - 10]], + ], + [ + Position.TOP_LEFT, + ], + 2, + [ + [False, False, False], + [False, False, False], + [False, False, False], + [False, False], + [False, True], + ], + [ + [False, False, False], + [False, False, False], + [False, False, False], + [False, False], + [False, False], + ], + [0, 0, 0, 0, 1], + [0, 0, 0, 0, 0], + DoesNotRaise(), + ), + ], +) +def test_line_zone_long_horizon_disappearing_detections( + vector: Vector, + xyxy_sequence: List[List[Optional[List[float]]]], + anchors: List[Position], + minimum_crossing_threshold: int, + expected_crossed_in: List[List[bool]], + expected_crossed_out: List[List[bool]], + expected_count_in: List[int], + expected_count_out: List[int], + exception: Exception, +) -> None: + with exception: + line_zone = LineZone( + start=vector.start, + end=vector.end, + triggering_anchors=anchors, + minimum_crossing_threshold=minimum_crossing_threshold, + ) + crossed_in_list = [] + crossed_out_list = [] + count_in_list = [] + count_out_list = [] + for bboxes in xyxy_sequence: + detections = mock_detections( + xyxy=bboxes, + tracker_id=[i for i in range(0, len(bboxes))], + ) + crossed_in, crossed_out = line_zone.trigger(detections) + crossed_in_list.append(list(crossed_in)) + crossed_out_list.append(list(crossed_out)) + count_in_list.append(line_zone.in_count) + count_out_list.append(line_zone.out_count) + + assert crossed_in_list == expected_crossed_in + assert crossed_out_list == expected_crossed_out + assert count_in_list == expected_count_in + assert count_out_list == expected_count_out diff --git a/test/detection/test_utils.py b/test/detection/test_utils.py index 77c4cea54..87e50f6a4 100644 --- a/test/detection/test_utils.py +++ b/test/detection/test_utils.py @@ -14,6 +14,7 @@ filter_polygons_by_area, get_data_item, merge_data, + merge_metadata, move_boxes, process_roboflow_result, scale_boxes, @@ -1138,3 +1139,163 @@ def test_xywh_to_xyxy(xywh: np.ndarray, expected_result: np.ndarray) -> None: def test_xcycwh_to_xyxy(xcycwh: np.ndarray, expected_result: np.ndarray) -> None: result = xcycwh_to_xyxy(xcycwh) np.testing.assert_array_equal(result, expected_result) + + +@pytest.mark.parametrize( + "metadata_list, expected_result, exception", + [ + # Identical metadata with a single key + ([{"key1": "value1"}, {"key1": "value1"}], {"key1": "value1"}, DoesNotRaise()), + # Identical metadata with multiple keys + ( + [ + {"key1": "value1", "key2": "value2"}, + {"key1": "value1", "key2": "value2"}, + ], + {"key1": "value1", "key2": "value2"}, + DoesNotRaise(), + ), + # Conflicting values for the same key + ([{"key1": "value1"}, {"key1": "value2"}], None, pytest.raises(ValueError)), + # Different sets of keys across dictionaries + ([{"key1": "value1"}, {"key2": "value2"}], None, pytest.raises(ValueError)), + # Empty metadata list + ([], {}, DoesNotRaise()), + # Empty metadata dictionaries + ([{}, {}], {}, DoesNotRaise()), + # Different declaration order for keys + ( + [ + {"key1": "value1", "key2": "value2"}, + {"key2": "value2", "key1": "value1"}, + ], + {"key1": "value1", "key2": "value2"}, + DoesNotRaise(), + ), + # Nested metadata dictionaries + ( + [{"key1": {"sub_key": "sub_value"}}, {"key1": {"sub_key": "sub_value"}}], + {"key1": {"sub_key": "sub_value"}}, + DoesNotRaise(), + ), + # Large metadata dictionaries with many keys + ( + [ + {f"key{i}": f"value{i}" for i in range(100)}, + {f"key{i}": f"value{i}" for i in range(100)}, + ], + {f"key{i}": f"value{i}" for i in range(100)}, + DoesNotRaise(), + ), + # Mixed types in list metadata values + ( + [{"key1": ["value1", 2, True]}, {"key1": ["value1", 2, True]}], + {"key1": ["value1", 2, True]}, + DoesNotRaise(), + ), + # Identical lists across metadata dictionaries + ( + [{"key1": [1, 2, 3]}, {"key1": [1, 2, 3]}], + {"key1": [1, 2, 3]}, + DoesNotRaise(), + ), + # Identical numpy arrays across metadata dictionaries + ( + [{"key1": np.array([1, 2, 3])}, {"key1": np.array([1, 2, 3])}], + {"key1": np.array([1, 2, 3])}, + DoesNotRaise(), + ), + # Identical numpy arrays across metadata dictionaries, different datatype + ( + [ + {"key1": np.array([1, 2, 3], dtype=np.int32)}, + {"key1": np.array([1, 2, 3], dtype=np.int64)}, + ], + {"key1": np.array([1, 2, 3])}, + DoesNotRaise(), + ), + # Conflicting lists for the same key + ([{"key1": [1, 2, 3]}, {"key1": [4, 5, 6]}], None, pytest.raises(ValueError)), + # Conflicting numpy arrays for the same key + ( + [{"key1": np.array([1, 2, 3])}, {"key1": np.array([4, 5, 6])}], + None, + pytest.raises(ValueError), + ), + # Mixed data types: list and numpy array for the same key + ( + [{"key1": [1, 2, 3]}, {"key1": np.array([1, 2, 3])}], + None, + pytest.raises(ValueError), + ), + # Empty lists and numpy arrays for the same key + ([{"key1": []}, {"key1": np.array([])}], None, pytest.raises(ValueError)), + # Identical multi-dimensional lists across metadata dictionaries + ( + [{"key1": [[1, 2], [3, 4]]}, {"key1": [[1, 2], [3, 4]]}], + {"key1": [[1, 2], [3, 4]]}, + DoesNotRaise(), + ), + # Identical multi-dimensional numpy arrays across metadata dictionaries + ( + [ + {"key1": np.arange(4).reshape(2, 2)}, + {"key1": np.arange(4).reshape(2, 2)}, + ], + {"key1": np.arange(4).reshape(2, 2)}, + DoesNotRaise(), + ), + # Conflicting multi-dimensional lists for the same key + ( + [{"key1": [[1, 2], [3, 4]]}, {"key1": [[5, 6], [7, 8]]}], + None, + pytest.raises(ValueError), + ), + # Conflicting multi-dimensional numpy arrays for the same key + ( + [ + {"key1": np.arange(4).reshape(2, 2)}, + {"key1": np.arange(4, 8).reshape(2, 2)}, + ], + None, + pytest.raises(ValueError), + ), + # Mixed types with multi-dimensional list and array for the same key + ( + [{"key1": [[1, 2], [3, 4]]}, {"key1": np.arange(4).reshape(2, 2)}], + None, + pytest.raises(ValueError), + ), + # Identical higher-dimensional (3D) numpy arrays across + # metadata dictionaries + ( + [ + {"key1": np.arange(8).reshape(2, 2, 2)}, + {"key1": np.arange(8).reshape(2, 2, 2)}, + ], + {"key1": np.arange(8).reshape(2, 2, 2)}, + DoesNotRaise(), + ), + # Differently-shaped higher-dimensional (3D) numpy arrays + # across metadata dictionaries + ( + [ + {"key1": np.arange(8).reshape(2, 2, 2)}, + {"key1": np.arange(8).reshape(4, 1, 2)}, + ], + None, + pytest.raises(ValueError), + ), + ], +) +def test_merge_metadata(metadata_list, expected_result, exception): + with exception: + result = merge_metadata(metadata_list) + if expected_result is None: + assert result is None, f"Expected an error, but got a result {result}" + for key, value in result.items(): + assert key in expected_result + if isinstance(value, np.ndarray): + np.testing.assert_array_equal(value, expected_result[key]) + else: + assert value == expected_result[key] diff --git a/test/tracker/__init__.py b/test/tracker/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/test/tracker/test_byte_tracker.py b/test/tracker/test_byte_tracker.py new file mode 100644 index 000000000..98efeb093 --- /dev/null +++ b/test/tracker/test_byte_tracker.py @@ -0,0 +1,40 @@ +from typing import List + +import numpy as np +import pytest + +import supervision as sv + + +@pytest.mark.parametrize( + "detections, expected_results", + [ + ( + [ + sv.Detections( + xyxy=np.array([[10, 10, 20, 20], [30, 30, 40, 40]]), + class_id=np.array([1, 1]), + confidence=np.array([1, 1]), + ), + sv.Detections( + xyxy=np.array([[10, 10, 20, 20], [30, 30, 40, 40]]), + class_id=np.array([1, 1]), + confidence=np.array([1, 1]), + ), + ], + sv.Detections( + xyxy=np.array([[10, 10, 20, 20], [30, 30, 40, 40]]), + class_id=np.array([1, 1]), + confidence=np.array([1, 1]), + tracker_id=np.array([1, 2]), + ), + ), + ], +) +def test_byte_tracker( + detections: List[sv.Detections], + expected_results: sv.Detections, +) -> None: + byte_tracker = sv.ByteTrack() + tracked_detections = [byte_tracker.update_with_detections(d) for d in detections] + assert tracked_detections[-1] == expected_results diff --git a/test/utils/test_internal.py b/test/utils/test_internal.py index eee614e6c..872822a7c 100644 --- a/test/utils/test_internal.py +++ b/test/utils/test_internal.py @@ -121,7 +121,15 @@ def __private_property(self): ( Detections.empty(), False, - {"xyxy", "class_id", "confidence", "mask", "tracker_id", "data"}, + { + "xyxy", + "class_id", + "confidence", + "mask", + "tracker_id", + "data", + "metadata", + }, DoesNotRaise(), ), ( @@ -134,6 +142,7 @@ def __private_property(self): "mask", "tracker_id", "data", + "metadata", "area", "box_area", }, @@ -149,6 +158,7 @@ def __private_property(self): "mask", "tracker_id", "data", + "metadata", }, DoesNotRaise(), ), @@ -169,13 +179,22 @@ def __private_property(self): "mask", "tracker_id", "data", + "metadata", }, DoesNotRaise(), ), ( Detections.empty(), False, - {"xyxy", "class_id", "confidence", "mask", "tracker_id", "data"}, + { + "xyxy", + "class_id", + "confidence", + "mask", + "tracker_id", + "data", + "metadata", + }, DoesNotRaise(), ), ], diff --git a/tox.ini b/tox.ini index 46886c13c..3f44d215e 100644 --- a/tox.ini +++ b/tox.ini @@ -1,5 +1,5 @@ [tox] -envlist = py38,py39,py310,py311,py312 +envlist = py38,py39,py310,py311,py312,py313 [testenv] changedir = test