diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 059a5d37e..9f78edd3b 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -188,7 +188,7 @@ jobs: runs-on: windows-latest strategy: matrix: - python-version: [3.7, 3.8, 3.9, '3.10', '3.11', '3.12'] + python-version: [3.7, 3.8, 3.9, '3.10', '3.11', '3.12', '3.13'] python-architecture: [x64, x86] fail-fast: false steps: @@ -244,7 +244,7 @@ jobs: needs: build-docstrings strategy: matrix: - python-version: [3.8, 3.9, '3.10', '3.11', '3.12'] + python-version: [3.8, 3.9, '3.10', '3.11', '3.12', '3.13'] os: [macos-13, macos-14] # macos-13 is x64, macos-14 is arm64 runs-on: ${{ matrix.os }} steps: @@ -352,7 +352,7 @@ jobs: /opt/python/cp38-cp38/bin/python3.8 setup.py sdist --formats=gztar mv dist/* wheelhouse/audited/ - name: Build wheels - run: for PYBIN in /opt/python/cp3{7..12}*/bin; do "${PYBIN}/pip" wheel . -w ./wheelhouse/ --verbose; done + run: for PYBIN in /opt/python/cp3{7..13}*/bin; do "${PYBIN}/pip" wheel . -w ./wheelhouse/ --verbose; done - name: Audit wheels run: for whl in wheelhouse/*.whl; do auditwheel repair "$whl" --plat $PLAT -w wheelhouse/audited/; done - name: Archive wheel artifacts @@ -413,7 +413,7 @@ jobs: if: startsWith(github.ref, 'refs/tags/v') != true run: echo "BUILD_COMMIT_HASH=${{github.sha}}" >> $GITHUB_ENV - name: Building wheels - run: for PYBIN in /opt/python/cp3{7..12}*/bin; do "${PYBIN}/pip" wheel . -w ./wheelhouse/ --verbose; done + run: for PYBIN in /opt/python/cp3{7..13}*/bin; do "${PYBIN}/pip" wheel . -w ./wheelhouse/ --verbose; done - name: Auditing wheels run: for whl in wheelhouse/*.whl; do auditwheel repair "$whl" --plat $PLAT -w wheelhouse/audited/; done - name: Archive wheel artifacts diff --git a/.github/workflows/test-install-dependencies.yml b/.github/workflows/test-install-dependencies.yml index 51546588f..5635a594b 100644 --- a/.github/workflows/test-install-dependencies.yml +++ b/.github/workflows/test-install-dependencies.yml @@ -16,7 +16,7 @@ runs-on: ubuntu-latest strategy: matrix: - container_image: ["fedora:34", "fedora:35", "fedora:36", "ubuntu:18.04", "ubuntu:20.04", "ubuntu:22.04", "ubuntu:rolling"] + container_image: ["fedora:34", "fedora:35", "fedora:36", "ubuntu:20.04", "ubuntu:22.04", "ubuntu:24.04", "ubuntu:rolling"] container: image: ${{ matrix.container_image }} steps: diff --git a/depthai-core b/depthai-core index 12158a516..d6a37a5ba 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 12158a5166e0162ee94ae7a86aaac48eafd2a15f +Subproject commit d6a37a5ba8ba2ee187fabc9208b813db5f75d4a7 diff --git a/examples/Camera/camera_undistort.py b/examples/Camera/camera_undistort.py old mode 100644 new mode 100755 index 0d78f1d9e..965d3cef0 --- a/examples/Camera/camera_undistort.py +++ b/examples/Camera/camera_undistort.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai import cv2 diff --git a/examples/Camera/thermal_cam.py b/examples/Camera/thermal_cam.py old mode 100644 new mode 100755 index 51a6d62ef..d6f7384ce --- a/examples/Camera/thermal_cam.py +++ b/examples/Camera/thermal_cam.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai import cv2 import numpy as np diff --git a/examples/Cast/cast_blur.py b/examples/Cast/cast_blur.py old mode 100644 new mode 100755 index 733a491dd..68c988e46 --- a/examples/Cast/cast_blur.py +++ b/examples/Cast/cast_blur.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai import cv2 from pathlib import Path diff --git a/examples/Cast/cast_concat.py b/examples/Cast/cast_concat.py old mode 100644 new mode 100755 index e4e3bcbb8..decbb5b08 --- a/examples/Cast/cast_concat.py +++ b/examples/Cast/cast_concat.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import numpy as np import cv2 import depthai as dai diff --git a/examples/Cast/cast_diff.py b/examples/Cast/cast_diff.py old mode 100644 new mode 100755 index aaaaf750d..471f05fea --- a/examples/Cast/cast_diff.py +++ b/examples/Cast/cast_diff.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import cv2 import depthai as dai from pathlib import Path diff --git a/examples/ColorCamera/rgb_scene.py b/examples/ColorCamera/rgb_scene.py index 7bd550b1d..1815b7db7 100755 --- a/examples/ColorCamera/rgb_scene.py +++ b/examples/ColorCamera/rgb_scene.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import cv2 import depthai as dai from itertools import cycle diff --git a/examples/ColorCamera/rgb_undistort.py b/examples/ColorCamera/rgb_undistort.py index 97e71b184..f617f180f 100755 --- a/examples/ColorCamera/rgb_undistort.py +++ b/examples/ColorCamera/rgb_undistort.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import cv2 import depthai as dai import numpy as np diff --git a/examples/CrashReport/capture_diagnostic.py b/examples/CrashReport/capture_diagnostic.py old mode 100644 new mode 100755 index 5962406f0..363d40e4c --- a/examples/CrashReport/capture_diagnostic.py +++ b/examples/CrashReport/capture_diagnostic.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai import zipfile from json import dump, JSONEncoder diff --git a/examples/FeatureTracker/feature_motion_estimation.py b/examples/FeatureTracker/feature_motion_estimation.py old mode 100644 new mode 100755 index ae0162f9c..efbdfc24f --- a/examples/FeatureTracker/feature_motion_estimation.py +++ b/examples/FeatureTracker/feature_motion_estimation.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import numpy as np import cv2 from collections import deque diff --git a/examples/ImageAlign/depth_align.py b/examples/ImageAlign/depth_align.py old mode 100644 new mode 100755 index 9e13fcf58..df773e6c5 --- a/examples/ImageAlign/depth_align.py +++ b/examples/ImageAlign/depth_align.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import numpy as np import cv2 import depthai as dai @@ -32,7 +34,6 @@ def getFps(self): device = dai.Device() calibrationHandler = device.readCalibration() -rgbIntrinsics = calibrationHandler.getCameraIntrinsics(RGB_SOCKET, int(1920 / ISP_SCALE), int(1080 / ISP_SCALE)) rgbDistortion = calibrationHandler.getDistortionCoefficients(RGB_SOCKET) distortionModel = calibrationHandler.getDistortionModel(RGB_SOCKET) if distortionModel != dai.CameraModel.Perspective: @@ -158,6 +159,8 @@ def updateBlendWeights(percentRgb): if frameDepth is not None: cvFrame = frameRgb.getCvFrame() + rgbIntrinsics = calibrationHandler.getCameraIntrinsics(RGB_SOCKET, int(cvFrame.shape[1]), int(cvFrame.shape[0])) + # Undistort the rgb frame cvFrameUndistorted = cv2.undistort( cvFrame, @@ -170,7 +173,7 @@ def updateBlendWeights(percentRgb): cv2.imshow("Depth aligned", alignedDepthColorized) blended = cv2.addWeighted( - cvFrame, rgbWeight, alignedDepthColorized, depthWeight, 0 + cvFrameUndistorted, rgbWeight, alignedDepthColorized, depthWeight, 0 ) cv2.putText( blended, diff --git a/examples/ImageAlign/image_align.py b/examples/ImageAlign/image_align.py old mode 100644 new mode 100755 index 2d257e61b..4e555f205 --- a/examples/ImageAlign/image_align.py +++ b/examples/ImageAlign/image_align.py @@ -1,6 +1,9 @@ +#!/usr/bin/env python3 + import cv2 import depthai as dai from datetime import timedelta +import numpy as np # This is an interactive example that shows how two frame sources without depth information. FPS = 30.0 @@ -12,7 +15,16 @@ COLOR_RESOLUTION = dai.ColorCameraProperties.SensorResolution.THE_1080_P LEFT_RIGHT_RESOLUTION = dai.MonoCameraProperties.SensorResolution.THE_720_P +ISP_SCALE = 3 + device = dai.Device() + +calibrationHandler = device.readCalibration() +rgbDistortion = calibrationHandler.getDistortionCoefficients(RGB_SOCKET) +distortionModel = calibrationHandler.getDistortionModel(RGB_SOCKET) +if distortionModel != dai.CameraModel.Perspective: + raise RuntimeError("Unsupported distortion model for RGB camera. This example supports only Perspective model.") + pipeline = dai.Pipeline() # Define sources and outputs @@ -30,7 +42,7 @@ camRgb.setBoardSocket(RGB_SOCKET) camRgb.setResolution(COLOR_RESOLUTION) camRgb.setFps(FPS) -camRgb.setIspScale(1, 3) +camRgb.setIspScale(1, ISP_SCALE) out.setStreamName("out") @@ -107,12 +119,20 @@ def updateDepthPlane(depth): # Colorize the aligned depth leftCv = leftAligned.getCvFrame() + rgbIntrinsics = calibrationHandler.getCameraIntrinsics(RGB_SOCKET, int(frameRgbCv.shape[1]), int(frameRgbCv.shape[0])) + + cvFrameUndistorted = cv2.undistort( + frameRgbCv, + np.array(rgbIntrinsics), + np.array(rgbDistortion), + ) + if len(leftCv.shape) == 2: leftCv = cv2.cvtColor(leftCv, cv2.COLOR_GRAY2BGR) - if leftCv.shape != frameRgbCv.shape: - leftCv = cv2.resize(leftCv, (frameRgbCv.shape[1], frameRgbCv.shape[0])) + if leftCv.shape != cvFrameUndistorted.shape: + leftCv = cv2.resize(leftCv, (cvFrameUndistorted.shape[1], cvFrameUndistorted.shape[0])) - blended = cv2.addWeighted(frameRgbCv, rgbWeight, leftCv, leftWeight, 0) + blended = cv2.addWeighted(cvFrameUndistorted, rgbWeight, leftCv, leftWeight, 0) cv2.imshow(windowName, blended) key = cv2.waitKey(1) diff --git a/examples/ImageAlign/thermal_align.py b/examples/ImageAlign/thermal_align.py old mode 100644 new mode 100755 index c353f2e9a..3e2673dc2 --- a/examples/ImageAlign/thermal_align.py +++ b/examples/ImageAlign/thermal_align.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import cv2 import depthai as dai import numpy as np @@ -38,6 +40,14 @@ def getFps(self): raise RuntimeError("No thermal camera found!") +ISP_SCALE = 3 + +calibrationHandler = device.readCalibration() +rgbDistortion = calibrationHandler.getDistortionCoefficients(RGB_SOCKET) +distortionModel = calibrationHandler.getDistortionModel(RGB_SOCKET) +if distortionModel != dai.CameraModel.Perspective: + raise RuntimeError("Unsupported distortion model for RGB camera. This example supports only Perspective model.") + pipeline = dai.Pipeline() # Define sources and outputs @@ -55,7 +65,7 @@ def getFps(self): camRgb.setBoardSocket(RGB_SOCKET) camRgb.setResolution(COLOR_RESOLUTION) camRgb.setFps(FPS) -camRgb.setIspScale(1,3) +camRgb.setIspScale(1,ISP_SCALE) out.setStreamName("out") @@ -130,6 +140,14 @@ def updateDepthPlane(depth): frameRgbCv = frameRgb.getCvFrame() fpsCounter.tick() + rgbIntrinsics = calibrationHandler.getCameraIntrinsics(RGB_SOCKET, int(frameRgbCv.shape[1]), int(frameRgbCv.shape[0])) + + cvFrameUndistorted = cv2.undistort( + frameRgbCv, + np.array(rgbIntrinsics), + np.array(rgbDistortion), + ) + # Colorize the aligned depth thermalFrame = thermalAligned.getCvFrame().astype(np.float32) # Create a mask for nan values @@ -141,7 +159,7 @@ def updateDepthPlane(depth): # Apply the mask back with black pixels (0) colormappedFrame[mask] = 0 - blended = cv2.addWeighted(frameRgbCv, rgbWeight, colormappedFrame, thermalWeight, 0) + blended = cv2.addWeighted(cvFrameUndistorted, rgbWeight, colormappedFrame, thermalWeight, 0) cv2.putText( blended, diff --git a/examples/ImageAlign/tof_align.py b/examples/ImageAlign/tof_align.py old mode 100644 new mode 100755 index e4d0664be..070a37d78 --- a/examples/ImageAlign/tof_align.py +++ b/examples/ImageAlign/tof_align.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import numpy as np import cv2 import depthai as dai @@ -27,8 +29,18 @@ def getFps(self): return (len(self.frameTimes) - 1) / (self.frameTimes[-1] - self.frameTimes[0]) +ISP_SCALE = 2 + +device = dai.Device() + +calibrationHandler = device.readCalibration() +rgbDistortion = calibrationHandler.getDistortionCoefficients(RGB_SOCKET) +distortionModel = calibrationHandler.getDistortionModel(RGB_SOCKET) +if distortionModel != dai.CameraModel.Perspective: + raise RuntimeError("Unsupported distortion model for RGB camera. This example supports only Perspective model.") pipeline = dai.Pipeline() + # Define sources and outputs camRgb = pipeline.create(dai.node.ColorCamera) tof = pipeline.create(dai.node.ToF) @@ -46,7 +58,7 @@ def getFps(self): camRgb.setBoardSocket(RGB_SOCKET) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_800_P) camRgb.setFps(FPS) -camRgb.setIspScale(1, 2) +camRgb.setIspScale(1, ISP_SCALE) out.setStreamName("out") @@ -107,7 +119,8 @@ def updateBlendWeights(percentRgb): # Connect to device and start pipeline -with dai.Device(pipeline) as device: +with device: + device.startPipeline(pipeline) queue = device.getOutputQueue("out", 8, False) # Configure windows; trackbar adjusts blending ratio of rgb/depth @@ -136,6 +149,12 @@ def updateBlendWeights(percentRgb): # Blend when both received if frameDepth is not None: cvFrame = frameRgb.getCvFrame() + rgbIntrinsics = calibrationHandler.getCameraIntrinsics(RGB_SOCKET, int(cvFrame.shape[1]), int(cvFrame.shape[0])) + cvFrameUndistorted = cv2.undistort( + cvFrame, + np.array(rgbIntrinsics), + np.array(rgbDistortion), + ) # Colorize the aligned depth alignedDepthColorized = colorizeDepth(frameDepth.getFrame()) # Resize depth to match the rgb frame @@ -151,7 +170,7 @@ def updateBlendWeights(percentRgb): cv2.imshow("depth", alignedDepthColorized) blended = cv2.addWeighted( - cvFrame, rgbWeight, alignedDepthColorized, depthWeight, 0 + cvFrameUndistorted, rgbWeight, alignedDepthColorized, depthWeight, 0 ) cv2.imshow(rgbDepthWindowName, blended) diff --git a/examples/NeuralNetwork/thermal_nnet.py b/examples/NeuralNetwork/thermal_nnet.py old mode 100644 new mode 100755 index bdca65c5d..5293f37d5 --- a/examples/NeuralNetwork/thermal_nnet.py +++ b/examples/NeuralNetwork/thermal_nnet.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai import cv2 from pathlib import Path diff --git a/examples/PointCloud/pointcloud_control.py b/examples/PointCloud/pointcloud_control.py old mode 100644 new mode 100755 index 6ebe5b9a5..c03154223 --- a/examples/PointCloud/pointcloud_control.py +++ b/examples/PointCloud/pointcloud_control.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai import numpy as np import cv2 diff --git a/examples/PointCloud/visualize_pointcloud.py b/examples/PointCloud/visualize_pointcloud.py old mode 100644 new mode 100755 index 564329e4f..c2655eff9 --- a/examples/PointCloud/visualize_pointcloud.py +++ b/examples/PointCloud/visualize_pointcloud.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai from time import sleep import numpy as np diff --git a/examples/Script/script_emmc_access.py b/examples/Script/script_emmc_access.py old mode 100644 new mode 100755 index a93485976..37d85dd1b --- a/examples/Script/script_emmc_access.py +++ b/examples/Script/script_emmc_access.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai import cv2 diff --git a/examples/Script/script_read_calibration.py b/examples/Script/script_read_calibration.py old mode 100644 new mode 100755 diff --git a/examples/Script/script_uart.py b/examples/Script/script_uart.py old mode 100644 new mode 100755 diff --git a/examples/StereoDepth/stereo_depth_custom_mesh.py b/examples/StereoDepth/stereo_depth_custom_mesh.py old mode 100644 new mode 100755 diff --git a/examples/StereoDepth/stereo_runtime_calibration_update.py b/examples/StereoDepth/stereo_runtime_calibration_update.py new file mode 100755 index 000000000..4053336f2 --- /dev/null +++ b/examples/StereoDepth/stereo_runtime_calibration_update.py @@ -0,0 +1,83 @@ +#!/usr/bin/env python3 + +import cv2 +import depthai as dai +import numpy as np + +# Create pipeline +pipeline = dai.Pipeline() + +# Define sources and outputs +monoLeft = pipeline.create(dai.node.MonoCamera) +monoRight = pipeline.create(dai.node.MonoCamera) +stereo = pipeline.create(dai.node.StereoDepth) +xout = pipeline.create(dai.node.XLinkOut) +xoutLeft = pipeline.create(dai.node.XLinkOut) + +xout.setStreamName("disparity") +xoutLeft.setStreamName("left") + +# Properties +monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +monoLeft.setCamera("left") +monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +monoRight.setCamera("right") + +stereo.enableDistortionCorrection(True) + +# Linking +monoLeft.out.link(stereo.left) +monoRight.out.link(stereo.right) +stereo.disparity.link(xout.input) +stereo.rectifiedLeft.link(xoutLeft.input) + +cvColorMap = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET) +cvColorMap[0] = [0, 0, 0] + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + + try: + calibration = device.readCalibration() + except: + print("Device is not calibrated!") + exit() + + print("Press 'u' to update distortion coefficients with random values") + + # Output queue will be used to get the disparity frames from the outputs defined above + q = device.getOutputQueue(name="disparity", maxSize=4, blocking=False) + qLeft = device.getOutputQueue(name="left", maxSize=4, blocking=False) + + while True: + inDisparity = q.get() # blocking call, will wait until a new data has arrived + frame = inDisparity.getFrame() + # Normalization for better visualization + frame = (frame * (255 / stereo.initialConfig.getMaxDisparity())).astype(np.uint8) + + cv2.imshow("disparity", frame) + + frame = cv2.applyColorMap(frame, cvColorMap) + cv2.imshow("disparity_color", frame) + + inLeft = qLeft.get() + frame = inLeft.getCvFrame() + cv2.imshow("rectified left", frame) + + key = cv2.waitKey(1) + if key == ord('q'): + break + elif key == ord('u'): + randomDistortionCoeffs = np.random.rand(14) + calibration.setDistortionCoefficients(dai.CameraBoardSocket.LEFT, randomDistortionCoeffs) + try: + device.setCalibration(calibration) + except: + print("Failed to update calibration!") + try: + updatedCalib = device.getCalibration() + distortionCoeffs = updatedCalib.getDistortionCoefficients(dai.CameraBoardSocket.LEFT) + print("Updated distortion coefficients: ", distortionCoeffs) + except: + pass + diff --git a/examples/Sync/demux_message_group.py b/examples/Sync/demux_message_group.py old mode 100644 new mode 100755 diff --git a/examples/Sync/depth_video_synced.py b/examples/Sync/depth_video_synced.py old mode 100644 new mode 100755 diff --git a/examples/Sync/imu_video_synced.py b/examples/Sync/imu_video_synced.py old mode 100644 new mode 100755 diff --git a/examples/Sync/sync_scripts.py b/examples/Sync/sync_scripts.py old mode 100644 new mode 100755 diff --git a/examples/ToF/tof_depth.py b/examples/ToF/tof_depth.py old mode 100644 new mode 100755 diff --git a/examples/UVC/uvc_disparity.py b/examples/UVC/uvc_disparity.py old mode 100644 new mode 100755 diff --git a/examples/UVC/uvc_mono.py b/examples/UVC/uvc_mono.py old mode 100644 new mode 100755 diff --git a/examples/device/device_all_boot_bootloader.py b/examples/device/device_all_boot_bootloader.py index 4c2e9a56b..4ac04cb4e 100755 --- a/examples/device/device_all_boot_bootloader.py +++ b/examples/device/device_all_boot_bootloader.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai devices = dai.Device.getAllConnectedDevices() diff --git a/examples/device/device_boot_non_exclusive.py b/examples/device/device_boot_non_exclusive.py index 898b292f6..3a89acea7 100755 --- a/examples/device/device_boot_non_exclusive.py +++ b/examples/device/device_boot_non_exclusive.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai import time diff --git a/examples/mixed/collision_avoidance.py b/examples/mixed/collision_avoidance.py old mode 100644 new mode 100755 index c5683cb8a..d0df84ba3 --- a/examples/mixed/collision_avoidance.py +++ b/examples/mixed/collision_avoidance.py @@ -1,3 +1,5 @@ +#!/usr/bin/env python3 + import depthai as dai import cv2 import numpy as np diff --git a/setup.py b/setup.py index f9ebe5167..596c31c39 100644 --- a/setup.py +++ b/setup.py @@ -244,6 +244,7 @@ def build_extension(self, ext): "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", "Programming Language :: C++", "Programming Language :: Python :: Implementation :: CPython", "Topic :: Scientific/Engineering", diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index 6c27b4d6a..7f94027d1 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -504,6 +504,8 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def("getProfilingData", [](DeviceBase& d) { py::gil_scoped_release release; return d.getProfilingData(); }, DOC(dai, DeviceBase, getProfilingData)) .def("readCalibration", [](DeviceBase& d) { py::gil_scoped_release release; return d.readCalibration(); }, DOC(dai, DeviceBase, readCalibration)) .def("flashCalibration", [](DeviceBase& d, CalibrationHandler calibrationDataHandler) { py::gil_scoped_release release; return d.flashCalibration(calibrationDataHandler); }, py::arg("calibrationDataHandler"), DOC(dai, DeviceBase, flashCalibration)) + .def("setCalibration", [](DeviceBase& d, CalibrationHandler calibrationDataHandler) { py::gil_scoped_release release; return d.setCalibration(calibrationDataHandler); }, py::arg("calibrationDataHandler"), DOC(dai, DeviceBase, setCalibration)) + .def("getCalibration", [](DeviceBase& d) { py::gil_scoped_release release; return d.getCalibration(); }, DOC(dai, DeviceBase, getCalibration)) .def("setXLinkChunkSize", [](DeviceBase& d, int s) { py::gil_scoped_release release; d.setXLinkChunkSize(s); }, py::arg("sizeBytes"), DOC(dai, DeviceBase, setXLinkChunkSize)) .def("getXLinkChunkSize", [](DeviceBase& d) { py::gil_scoped_release release; return d.getXLinkChunkSize(); }, DOC(dai, DeviceBase, getXLinkChunkSize)) .def("setIrLaserDotProjectorBrightness", [](DeviceBase& d, float mA, int mask) { diff --git a/src/pipeline/datatype/CameraControlBindings.cpp b/src/pipeline/datatype/CameraControlBindings.cpp index ebaf0e751..d75541ee7 100644 --- a/src/pipeline/datatype/CameraControlBindings.cpp +++ b/src/pipeline/datatype/CameraControlBindings.cpp @@ -242,8 +242,13 @@ std::vector camCtrlAttr; .def("setEffectMode", &CameraControl::setEffectMode, py::arg("mode"), DOC(dai, CameraControl, setEffectMode)) .def("setControlMode", &CameraControl::setControlMode, py::arg("mode"), DOC(dai, CameraControl, setControlMode)) .def("setCaptureIntent", &CameraControl::setCaptureIntent, py::arg("mode"), DOC(dai, CameraControl, setCaptureIntent)) + .def("setMisc", py::overload_cast(&CameraControl::setMisc), py::arg("control"), py::arg("value"), DOC(dai, CameraControl, setMisc)) + .def("setMisc", py::overload_cast(&CameraControl::setMisc), py::arg("control"), py::arg("value"), DOC(dai, CameraControl, setMisc, 2)) + .def("setMisc", py::overload_cast(&CameraControl::setMisc), py::arg("control"), py::arg("value"), DOC(dai, CameraControl, setMisc, 3)) + .def("clearMiscControls", &CameraControl::clearMiscControls, DOC(dai, CameraControl, clearMiscControls)) .def("set", &CameraControl::set, py::arg("config"), DOC(dai, CameraControl, set)) // getters + .def("getMiscControls", &CameraControl::getMiscControls, DOC(dai, CameraControl, getMiscControls)) .def("getCaptureStill", &CameraControl::getCaptureStill, DOC(dai, CameraControl, getCaptureStill)) .def("getExposureTime", &CameraControl::getExposureTime, DOC(dai, CameraControl, getExposureTime)) .def("getSensitivity", &CameraControl::getSensitivity, DOC(dai, CameraControl, getSensitivity)) diff --git a/src/pipeline/datatype/EncodedFrameBindings.cpp b/src/pipeline/datatype/EncodedFrameBindings.cpp index 546fb93eb..912c51848 100644 --- a/src/pipeline/datatype/EncodedFrameBindings.cpp +++ b/src/pipeline/datatype/EncodedFrameBindings.cpp @@ -46,6 +46,8 @@ void bind_encodedframe(pybind11::module &m, void *pCallstack) { .def_readwrite("lossless", &RawEncodedFrame::lossless) .def_readwrite("type", &RawEncodedFrame::type) .def_readwrite("instanceNum", &RawEncodedFrame::instanceNum) + .def_readwrite("width", &RawEncodedFrame::width) + .def_readwrite("height", &RawEncodedFrame::height) .def_readwrite("sequenceNum", &RawEncodedFrame::sequenceNum) .def_property( "ts", @@ -85,10 +87,15 @@ void bind_encodedframe(pybind11::module &m, void *pCallstack) { py::overload_cast<>(&EncodedFrame::Buffer::getTimestamp, py::const_), DOC(dai, Buffer, getTimestamp)) .def("getTimestampDevice", - py::overload_cast<>(&EncodedFrame::Buffer::getTimestampDevice, py::const_), + py::overload_cast<>(&EncodedFrame::Buffer::getTimestampDevice, + py::const_), DOC(dai, Buffer, getTimestampDevice)) .def("getInstanceNum", &EncodedFrame::getInstanceNum, DOC(dai, EncodedFrame, getInstanceNum)) + .def("getWidth", &EncodedFrame::getWidth, + DOC(dai, EncodedFrame, getWidth)) + .def("getHeight", &EncodedFrame::getHeight, + DOC(dai, EncodedFrame, getHeight)) .def("getSequenceNum", &EncodedFrame::Buffer::getSequenceNum, DOC(dai, Buffer, getSequenceNum)) .def("getExposureTime", &EncodedFrame::getExposureTime, @@ -99,7 +106,7 @@ void bind_encodedframe(pybind11::module &m, void *pCallstack) { DOC(dai, EncodedFrame, getColorTemperature)) .def("getLensPosition", &EncodedFrame::getLensPosition, DOC(dai, EncodedFrame, getLensPosition)) - .def("getLensPositionRaw", &EncodedFrame::getLensPositionRaw, + .def("getLensPositionRaw", &EncodedFrame::getLensPositionRaw, DOC(dai, EncodedFrame, getLensPositionRaw)) .def("getQuality", &EncodedFrame::getQuality, DOC(dai, EncodedFrame, getQuality)) @@ -119,6 +126,10 @@ void bind_encodedframe(pybind11::module &m, void *pCallstack) { DOC(dai, EncodedFrame, setTimestampDevice)) .def("setSequenceNum", &EncodedFrame::setSequenceNum, DOC(dai, EncodedFrame, setSequenceNum)) + .def("setWidth", &EncodedFrame::setWidth, + DOC(dai, EncodedFrame, setWidth)) + .def("setHeight", &EncodedFrame::setHeight, + DOC(dai, EncodedFrame, setHeight)) .def("setQuality", &EncodedFrame::setQuality, DOC(dai, EncodedFrame, getQuality)) .def("setBitrate", &EncodedFrame::setBitrate, diff --git a/src/pipeline/datatype/StereoDepthConfigBindings.cpp b/src/pipeline/datatype/StereoDepthConfigBindings.cpp index 3e202908a..2c07e0bf5 100644 --- a/src/pipeline/datatype/StereoDepthConfigBindings.cpp +++ b/src/pipeline/datatype/StereoDepthConfigBindings.cpp @@ -22,6 +22,7 @@ void bind_stereodepthconfig(pybind11::module& m, void* pCallstack){ py::enum_ depthAlign(algorithmControl, "DepthAlign", DOC(dai, RawStereoDepthConfig, AlgorithmControl, DepthAlign)); py::enum_ depthUnit(algorithmControl, "DepthUnit", DOC(dai, RawStereoDepthConfig, AlgorithmControl, DepthUnit)); py::class_ postProcessing(rawStereoDepthConfig, "PostProcessing", DOC(dai, RawStereoDepthConfig, PostProcessing)); + py::enum_ filter(postProcessing, "Filter", DOC(dai, RawStereoDepthConfig, PostProcessing, Filter)); py::class_ spatialFilter(postProcessing, "SpatialFilter", DOC(dai, RawStereoDepthConfig, PostProcessing, SpatialFilter)); py::class_ temporalFilter(postProcessing, "TemporalFilter", DOC(dai, RawStereoDepthConfig, PostProcessing, TemporalFilter)); py::enum_ persistencyMode(temporalFilter, "PersistencyMode", DOC(dai, RawStereoDepthConfig, PostProcessing, TemporalFilter, PersistencyMode)); @@ -51,7 +52,15 @@ void bind_stereodepthconfig(pybind11::module& m, void* pCallstack){ /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// - // Metadata / raw + filter + .value("DECIMATION", RawStereoDepthConfig::PostProcessing::Filter::DECIMATION, DOC(dai, RawStereoDepthConfig, PostProcessing, Filter, DECIMATION)) + .value("SPECKLE", RawStereoDepthConfig::PostProcessing::Filter::SPECKLE, DOC(dai, RawStereoDepthConfig, PostProcessing, Filter, SPECKLE)) + .value("MEDIAN", RawStereoDepthConfig::PostProcessing::Filter::MEDIAN, DOC(dai, RawStereoDepthConfig, PostProcessing, Filter, MEDIAN)) + .value("TEMPORAL", RawStereoDepthConfig::PostProcessing::Filter::TEMPORAL, DOC(dai, RawStereoDepthConfig, PostProcessing, Filter, TEMPORAL)) + .value("SPATIAL", RawStereoDepthConfig::PostProcessing::Filter::SPATIAL, DOC(dai, RawStereoDepthConfig, PostProcessing, Filter, SPATIAL)) + .value("FILTER_COUNT", RawStereoDepthConfig::PostProcessing::Filter::FILTER_COUNT, DOC(dai, RawStereoDepthConfig, PostProcessing, Filter, FILTER_COUNT)) + ; + medianFilter .value("MEDIAN_OFF", MedianFilter::MEDIAN_OFF) .value("KERNEL_3x3", MedianFilter::KERNEL_3x3) @@ -137,6 +146,7 @@ void bind_stereodepthconfig(pybind11::module& m, void* pCallstack){ .def(py::init<>()) .def_readwrite("enable", &RawStereoDepthConfig::PostProcessing::SpeckleFilter::enable, DOC(dai, RawStereoDepthConfig, PostProcessing, SpeckleFilter, enable)) .def_readwrite("speckleRange", &RawStereoDepthConfig::PostProcessing::SpeckleFilter::speckleRange, DOC(dai, RawStereoDepthConfig, PostProcessing, SpeckleFilter, speckleRange)) + .def_readwrite("differenceThreshold", &RawStereoDepthConfig::PostProcessing::SpeckleFilter::differenceThreshold, DOC(dai, RawStereoDepthConfig, PostProcessing, SpeckleFilter, differenceThreshold)) ; decimationMode @@ -153,6 +163,7 @@ void bind_stereodepthconfig(pybind11::module& m, void* pCallstack){ postProcessing .def(py::init<>()) + .def_readwrite("filteringOrder", &RawStereoDepthConfig::PostProcessing::filteringOrder, DOC(dai, RawStereoDepthConfig, PostProcessing, filteringOrder)) .def_readwrite("median", &RawStereoDepthConfig::PostProcessing::median, DOC(dai, RawStereoDepthConfig, PostProcessing, median)) .def_readwrite("bilateralSigmaValue", &RawStereoDepthConfig::PostProcessing::bilateralSigmaValue, DOC(dai, RawStereoDepthConfig, PostProcessing, bilateralSigmaValue)) .def_readwrite("spatialFilter", &RawStereoDepthConfig::PostProcessing::spatialFilter, DOC(dai, RawStereoDepthConfig, PostProcessing, spatialFilter)) diff --git a/src/pipeline/node/StereoDepthBindings.cpp b/src/pipeline/node/StereoDepthBindings.cpp index 674a591a7..60ae02a86 100644 --- a/src/pipeline/node/StereoDepthBindings.cpp +++ b/src/pipeline/node/StereoDepthBindings.cpp @@ -64,8 +64,25 @@ void bind_stereodepth(pybind11::module& m, void* pCallstack){ ; stereoDepthPresetMode - .value("HIGH_ACCURACY", StereoDepth::PresetMode::HIGH_ACCURACY, DOC(dai, node, StereoDepth, PresetMode, HIGH_ACCURACY)) - .value("HIGH_DENSITY", StereoDepth::PresetMode::HIGH_DENSITY, DOC(dai, node, StereoDepth, PresetMode, HIGH_DENSITY)) + .value("HIGH_ACCURACY", StereoDepth::PresetMode::HIGH_ACCURACY, "**Deprecated:** Will be removed in future releases and replaced with DEFAULT") + .value("HIGH_DENSITY", StereoDepth::PresetMode::HIGH_DENSITY, "**Deprecated:** Will be removed in future releases and replaced with DEFAULT") + + .value("DEFAULT", StereoDepth::PresetMode::DEFAULT) + .value("FACE", StereoDepth::PresetMode::FACE) + .value("HIGH_DETAIL", StereoDepth::PresetMode::HIGH_DETAIL) + .value("ROBOTICS", StereoDepth::PresetMode::ROBOTICS) + + // Deprecated overriden + .def_property_readonly_static("HIGH_ACCURACY", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "HIGH_ACCURACY is deprecated, will be removed in future releases and replaced with DEFAULT.", 1); + return StereoDepth::PresetMode::HIGH_ACCURACY; + }) + + .def_property_readonly_static("HIGH_DENSITY", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "HIGH_DENSITY is deprecated, will be removed in future releases and replaced with DEFAULT.", 1); + return StereoDepth::PresetMode::HIGH_DENSITY; + }) + ; // Node diff --git a/utilities/cam_test.py b/utilities/cam_test.py index 58dda0579..482e9ac4d 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -50,6 +50,7 @@ from pathlib import Path import sys import signal +import math from stress_test import stress_test, YOLO_LABELS, create_yolo @@ -66,6 +67,8 @@ def socket_type_pair(arg): is_thermal = True if type in ['th', 'thermal'] else False return [socket, is_color, is_tof, is_thermal] +def string_pair(arg): + return arg.split('=') parser = argparse.ArgumentParser(add_help=False) parser.add_argument('-cams', '--cameras', type=socket_type_pair, nargs='+', @@ -104,6 +107,10 @@ def socket_type_pair(arg): help="Show RGB `preview` stream instead of full size `isp`") parser.add_argument('-show', '--show-meta', action='store_true', help="List frame metadata (seqno, timestamp, exp, iso etc). Can also toggle with `\`") +parser.add_argument('-misc', '--misc-controls', type=string_pair, nargs='+', + default=[], + help="List of miscellaneous camera controls to set initially, " + "as pairs: key1=value1 key2=value2 ...") parser.add_argument('-d', '--device', default="", type=str, help="Optional MX ID of the device to connect to.") @@ -198,8 +205,6 @@ def clamp(num, v0, v1): return max(v0, min(num, v1)) # Calculates FPS over a moving window, configurable - - class FPS: def __init__(self, window_size=30): self.dq = collections.deque(maxlen=window_size) @@ -360,6 +365,15 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: # cam[c].initialControl.setManualExposure(15000, 400) # exposure [us], iso # When set, takes effect after the first 2 frames # cam[c].initialControl.setManualWhiteBalance(4000) # light temperature in K, 1000..12000 + # cam[c].initialControl.setAutoExposureLimit(5000) # can also be updated at runtime + # cam[c].initialControl.setMisc("downsampling-mode", "binning") # default: "scaling" + # cam[c].initialControl.setMisc("binning-mode", "sum") # default: "avg" + # cam[c].initialControl.setMisc("manual-exposure-handling", "fast") # default: "default" + # cam[c].initialControl.setMisc("hdr-exposure-ratio", 4) # enables HDR when set `> 1`, current options: 2, 4, 8 + # cam[c].initialControl.setMisc("hdr-local-tone-weight", 75) # default 75, range 0..100 + # cam[c].initialControl.setMisc("high-conversion-gain", 0) # 1 to enable (default on supported sensors) + for kvPair in args.misc_controls: + cam[c].initialControl.setMisc(*kvPair) control.out.link(cam[c].inputControl) if rotate[c]: cam[c].setImageOrientation( @@ -534,6 +548,13 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: chroma_denoise = 0 control = 'none' show = args.show_meta + high_conversion_gain = 1 + print(args.misc_controls) + args_misc_dict = dict(args.misc_controls) + + hdr_exp_ratio = int(math.log2(float(args_misc_dict.get('hdr-exposure-ratio', 1)))) + hdr_local_tone_weight = int(32 * float(args_misc_dict.get('hdr-local-tone-weight', 0.75))) + hdr_on = (hdr_exp_ratio > 0) jet_custom = cv2.applyColorMap( np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET) @@ -664,6 +685,12 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: elif key == ord('c'): capture_list = streams.copy() capture_time = time.strftime('%Y%m%d_%H%M%S') + elif key == ord('h'): + high_conversion_gain = 1 - high_conversion_gain + print("High conversion gain:", high_conversion_gain) + ctrl = dai.CameraControl() + ctrl.setMisc("high-conversion-gain", high_conversion_gain) + controlQueue.send(ctrl) elif key == ord('t'): print("Autofocus trigger (and disable continuous)") ctrl = dai.CameraControl() @@ -742,7 +769,7 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: floodIntensity = 0 device.setIrFloodLightIntensity(floodIntensity) print(f'IR Flood intensity:', floodIntensity) - elif key >= 0 and chr(key) in '34567890[]p\\;\'': + elif key >= 0 and chr(key) in '34567890[]\\;\'rg': if key == ord('3'): control = 'awb_mode' elif key == ord('4'): @@ -771,6 +798,11 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: control = 'chroma_denoise' elif key == ord('p'): control = 'tof_amplitude_min' + elif key == ord('r') or key == ord('g'): + if hdr_on: + control = 'hdr_exp_ratio' if key == ord('r') else 'hdr_local_tone_weight' + else: + print("HDR was not enabled, start with `-misc hdr-exposure-ratio=2` or higher to enable") print("Selected control:", control) elif key in [ord('-'), ord('_'), ord('+'), ord('=')]: change = 0 @@ -780,7 +812,7 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: change = 1 ctrl = dai.CameraControl() if control == 'none': - print("Please select a control first using keys 3..9 0 [ ] \\ ; \'") + print("Please select a control first using keys 3..9 0 [ ] \\ ; \' r g") elif control == 'ae_comp': ae_comp = clamp(ae_comp + change, -9, 9) print("Auto exposure compensation:", ae_comp) @@ -833,6 +865,16 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: chroma_denoise = clamp(chroma_denoise + change, 0, 4) print("Chroma denoise:", chroma_denoise) ctrl.setChromaDenoise(chroma_denoise) + elif control == 'hdr_exp_ratio': + hdr_exp_ratio = clamp(hdr_exp_ratio + change, 0, 3) + value = pow(2, hdr_exp_ratio) + print("HDR exposure ratio:", value) + ctrl.setMisc("hdr-exposure-ratio", value) + elif control == 'hdr_local_tone_weight': + hdr_local_tone_weight = clamp(hdr_local_tone_weight + change, 0, 32) + value = hdr_local_tone_weight / 32 + print(f"HDR local tone weight (normalized): {value:.2f}") + ctrl.setMisc("hdr-local-tone-weight", value) controlQueue.send(ctrl) print()