diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 95b45658a..141d96f7b 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -72,7 +72,9 @@ jobs: steps: - name: Print home directory run: echo Home directory inside container $HOME - + - name: Setup cmake + if: matrix.os == 'macos-latest' + uses: jwlawson/actions-setup-cmake@v1.13 - name: Cache .hunter folder if: matrix.os != 'windows-latest' uses: actions/cache@v3 @@ -230,13 +232,13 @@ jobs: ARTIFACTORY_PASS: ${{ secrets.ARTIFACTORY_PASS }} # This job builds wheels for macOS x86_64 arch - build-macos-x86_64: + build-macos: needs: build-docstrings runs-on: macos-latest strategy: matrix: - python-version: [3.6, 3.7, 3.8, 3.9, '3.10', '3.11', '3.12'] - fail-fast: false + python-version: [3.8, 3.9, '3.10', '3.11', '3.12'] + os: [macos-13, macos-14] # macos-13 is x64, macos-14 is arm64 steps: - name: Cache .hunter folder uses: actions/cache@v3 @@ -290,63 +292,6 @@ jobs: ARTIFACTORY_USER: ${{ secrets.ARTIFACTORY_USER }} ARTIFACTORY_PASS: ${{ secrets.ARTIFACTORY_PASS }} - # This job builds wheels for macOS arm64 arch - build-macos-arm64: - needs: build-docstrings - runs-on: [self-hosted, macOS, ARM64] - steps: - # Cached locally on runner - # - name: Cache .hunter folder - # uses: actions/cache@v3 - # with: - # path: ~/.hunter - # key: hunter-macos-latest - - name: List .hunter cache directory - run: | - ls -a -l ~/.hunter/_Base/ || true - echo "PATH=$PATH" - - - uses: actions/checkout@v3 - with: - submodules: 'recursive' - - - uses: actions/download-artifact@v3 - with: - name: 'docstrings' - path: docstrings - - name: Specify docstring to use while building the wheel - run: echo "DEPTHAI_PYTHON_DOCSTRINGS_INPUT=$PWD/docstrings/depthai_python_docstring.hpp" >> $GITHUB_ENV - - - name: Append build hash if not a tagged commit - if: startsWith(github.ref, 'refs/tags/v') != true - run: echo "BUILD_COMMIT_HASH=${{github.sha}}" >> $GITHUB_ENV - - # - name: Build and install depthai-core - # run: | - # echo "MACOSX_DEPLOYMENT_TARGET=11.0" >> $GITHUB_ENV - # cmake -S depthai-core/ -B build_core -D CMAKE_BUILD_TYPE=Release -D CMAKE_TOOLCHAIN_FILE=$PWD/cmake/toolchain/pic.cmake - # cmake --build build_core --target install --parallel 4 - # echo "DEPTHAI_INSTALLATION_DIR=$PWD/build_core/install/" >> $GITHUB_ENV - - - name: Build wheels - run: for PYBIN in {9..12}; do "python3.${PYBIN}" -m pip wheel . -w wheelhouse/ --verbose; done - - - name: Auditing wheels - run: delocate-wheel -v -w wheelhouse/audited wheelhouse/*.whl - - - name: Archive wheel artifacts - uses: actions/upload-artifact@v3 - with: - name: audited-wheels - path: wheelhouse/audited/ - - name: Deploy wheels to artifactory (if not a release) - if: startsWith(github.ref, 'refs/tags/v') != true - run: bash ./ci/upload-artifactory.sh - env: - ARTIFACTORY_URL: ${{ secrets.ARTIFACTORY_URL }} - ARTIFACTORY_USER: ${{ secrets.ARTIFACTORY_USER }} - ARTIFACTORY_PASS: ${{ secrets.ARTIFACTORY_PASS }} - # This job builds wheels for x86_64 arch build-linux-x86_64: needs: build-docstrings @@ -470,7 +415,7 @@ jobs: release: if: startsWith(github.ref, 'refs/tags/v') - needs: [pytest, build-linux-armhf, build-windows-x86_64, build-macos-x86_64, build-macos-arm64, build-linux-x86_64, build-linux-arm64] + needs: [pytest, build-linux-armhf, build-windows-x86_64, build-macos, build-linux-x86_64, build-linux-arm64] runs-on: ubuntu-latest steps: diff --git a/CMakeLists.txt b/CMakeLists.txt index 0f58e9e45..c766c88bc 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -132,7 +132,8 @@ pybind11_add_module(${TARGET_NAME} src/pipeline/node/PointCloudBindings.cpp src/pipeline/node/SyncBindings.cpp src/pipeline/node/MessageDemuxBindings.cpp - + src/pipeline/node/CastBindings.cpp + src/pipeline/node/ImageAlignBindings.cpp src/pipeline/datatype/ADatatypeBindings.cpp src/pipeline/datatype/AprilTagConfigBindings.cpp src/pipeline/datatype/AprilTagsBindings.cpp @@ -157,6 +158,7 @@ pybind11_add_module(${TARGET_NAME} src/pipeline/datatype/TrackletsBindings.cpp src/pipeline/datatype/PointCloudConfigBindings.cpp src/pipeline/datatype/PointCloudDataBindings.cpp + src/pipeline/datatype/ImageAlignConfigBindings.cpp ) if(WIN32) diff --git a/depthai-core b/depthai-core index c21bdd374..90487456c 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit c21bdd3746c4ad7134b67c2c75a11fbdd0a01c40 +Subproject commit 90487456cb92d15bcf0129a2f54c332250f1df31 diff --git a/examples/Camera/thermal_cam.py b/examples/Camera/thermal_cam.py index 4fff60ca9..51a6d62ef 100644 --- a/examples/Camera/thermal_cam.py +++ b/examples/Camera/thermal_cam.py @@ -16,6 +16,7 @@ def onMouse(event, x, y, *args): # Thermal camera thermalCam = pipeline.create(dai.node.Camera) +thermalCam.setFps(25) # Limit to 25 to match what the sensor can do, capped even if left at default, but warns. width, height = -1, -1 thermalFound = False for features in device.getConnectedCameraFeatures(): diff --git a/examples/Cast/blur.py b/examples/Cast/blur.py new file mode 100644 index 000000000..733a491dd --- /dev/null +++ b/examples/Cast/blur.py @@ -0,0 +1,49 @@ +import depthai as dai +import cv2 +from pathlib import Path + +SHAPE = 300 + +p = dai.Pipeline() + +camRgb = p.create(dai.node.ColorCamera) +nn = p.create(dai.node.NeuralNetwork) +rgbOut = p.create(dai.node.XLinkOut) +cast = p.create(dai.node.Cast) +castXout = p.create(dai.node.XLinkOut) + +camRgb.setPreviewSize(SHAPE, SHAPE) +camRgb.setInterleaved(False) + +nnBlobPath = (Path(__file__).parent / Path('../models/blur_simplified_openvino_2021.4_6shave.blob')).resolve().absolute() + +nn.setBlobPath(nnBlobPath) + +rgbOut.setStreamName("rgb") + +castXout.setStreamName("cast") + +cast.setOutputFrameType(dai.RawImgFrame.Type.BGR888p) + +# Linking +camRgb.preview.link(nn.input) +camRgb.preview.link(rgbOut.input) +nn.out.link(cast.input) +cast.output.link(castXout.input) + +with dai.Device(p) as device: + qCam = device.getOutputQueue(name="rgb", maxSize=4, blocking=False) + qCast = device.getOutputQueue(name="cast", maxSize=4, blocking=False) + + + while True: + inCast = qCast.get() + assert isinstance(inCast, dai.ImgFrame) + inRgb = qCam.get() + assert isinstance(inRgb, dai.ImgFrame) + cv2.imshow("Blur", inCast.getCvFrame()) + cv2.imshow("Original", inRgb.getCvFrame()) + + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/Cast/concat.py b/examples/Cast/concat.py new file mode 100644 index 000000000..e4e3bcbb8 --- /dev/null +++ b/examples/Cast/concat.py @@ -0,0 +1,59 @@ +import numpy as np +import cv2 +import depthai as dai +from pathlib import Path + +SHAPE = 300 + +p = dai.Pipeline() + +camRgb = p.create(dai.node.ColorCamera) +left = p.create(dai.node.MonoCamera) +right = p.create(dai.node.MonoCamera) +manipLeft = p.create(dai.node.ImageManip) +manipRight = p.create(dai.node.ImageManip) +nn = p.create(dai.node.NeuralNetwork) +cast = p.create(dai.node.Cast) +castXout = p.create(dai.node.XLinkOut) + +camRgb.setPreviewSize(SHAPE, SHAPE) +camRgb.setInterleaved(False) +camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) + +left.setCamera("left") +left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +manipLeft.initialConfig.setResize(SHAPE, SHAPE) +manipLeft.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p) + +right.setCamera("right") +right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) +manipRight.initialConfig.setResize(SHAPE, SHAPE) +manipRight.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p) + +nnBlobPath = (Path(__file__).parent / Path('../models/concat_openvino_2021.4_6shave.blob')).resolve().absolute() +nn.setBlobPath(nnBlobPath) +nn.setNumInferenceThreads(2) + +castXout.setStreamName("cast") +cast.setOutputFrameType(dai.ImgFrame.Type.BGR888p) + +# Linking +left.out.link(manipLeft.inputImage) +right.out.link(manipRight.inputImage) +manipLeft.out.link(nn.inputs['img1']) +camRgb.preview.link(nn.inputs['img2']) +manipRight.out.link(nn.inputs['img3']) +nn.out.link(cast.input) +cast.output.link(castXout.input) + +# Pipeline is defined, now we can connect to the device +with dai.Device(p) as device: + qCast = device.getOutputQueue(name="cast", maxSize=4, blocking=False) + + while True: + inCast = qCast.get() + assert isinstance(inCast, dai.ImgFrame) + cv2.imshow("Concated frames", inCast.getCvFrame()) + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/Cast/diff.py b/examples/Cast/diff.py new file mode 100644 index 000000000..aaaaf750d --- /dev/null +++ b/examples/Cast/diff.py @@ -0,0 +1,60 @@ +import cv2 +import depthai as dai +from pathlib import Path + +SHAPE = 720 + +p = dai.Pipeline() + +camRgb = p.create(dai.node.ColorCamera) +nn = p.create(dai.node.NeuralNetwork) +script = p.create(dai.node.Script) +rgbXout = p.create(dai.node.XLinkOut) +cast = p.create(dai.node.Cast) +castXout = p.create(dai.node.XLinkOut) + +camRgb.setVideoSize(SHAPE, SHAPE) +camRgb.setPreviewSize(SHAPE, SHAPE) +camRgb.setInterleaved(False) + +nnBlobPath = (Path(__file__).parent / Path('../models/diff_openvino_2022.1_6shave.blob')).resolve().absolute() +nn.setBlobPath(nnBlobPath) + +script.setScript(""" +old = node.io['in'].get() +while True: + frame = node.io['in'].get() + node.io['img1'].send(old) + node.io['img2'].send(frame) + old = frame +""") + +rgbXout.setStreamName("rgb") +castXout.setStreamName("cast") +cast.setOutputFrameType(dai.RawImgFrame.Type.GRAY8) + +# Linking +camRgb.preview.link(script.inputs['in']) +script.outputs['img1'].link(nn.inputs['img1']) +script.outputs['img2'].link(nn.inputs['img2']) +camRgb.video.link(rgbXout.input) +nn.out.link(cast.input) +cast.output.link(castXout.input) + +# Pipeline is defined, now we can connect to the device +with dai.Device(p) as device: + qCam = device.getOutputQueue(name="rgb", maxSize=4, blocking=False) + qCast = device.getOutputQueue(name="cast", maxSize=4, blocking=False) + + + while True: + colorFrame = qCam.get() + assert isinstance(colorFrame, dai.ImgFrame) + cv2.imshow("Color", colorFrame.getCvFrame()) + + inCast = qCast.get() + assert isinstance(inCast, dai.ImgFrame) + cv2.imshow("Diff", inCast.getCvFrame()) + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/ImageAlign/depth_align.py b/examples/ImageAlign/depth_align.py new file mode 100644 index 000000000..29475daa8 --- /dev/null +++ b/examples/ImageAlign/depth_align.py @@ -0,0 +1,188 @@ +import numpy as np +import cv2 +import depthai as dai +import time +from datetime import timedelta +FPS = 30.0 + +RGB_SOCKET = dai.CameraBoardSocket.CAM_A +LEFT_SOCKET = dai.CameraBoardSocket.CAM_B +RIGHT_SOCKET = dai.CameraBoardSocket.CAM_C +ALIGN_SOCKET = LEFT_SOCKET + +COLOR_RESOLUTION = dai.ColorCameraProperties.SensorResolution.THE_1080_P +LEFT_RIGHT_RESOLUTION = dai.MonoCameraProperties.SensorResolution.THE_400_P + +ISP_SCALE = 3 + +class FPSCounter: + def __init__(self): + self.frameTimes = [] + + def tick(self): + now = time.time() + self.frameTimes.append(now) + self.frameTimes = self.frameTimes[-10:] + + def getFps(self): + if len(self.frameTimes) <= 1: + return 0 + return (len(self.frameTimes) - 1) / (self.frameTimes[-1] - self.frameTimes[0]) + +device = dai.Device() + +calibrationHandler = device.readCalibration() +rgbIntrinsics = calibrationHandler.getCameraIntrinsics(RGB_SOCKET, int(1920 / ISP_SCALE), int(1080 / ISP_SCALE)) +rgbDistortion = calibrationHandler.getDistortionCoefficients(RGB_SOCKET) +distortionModel = calibrationHandler.getDistortionModel(RGB_SOCKET) +if distortionModel != dai.CameraModel.Perspective: + raise RuntimeError("Unsupported distortion model for RGB camera. This example supports only Perspective model.") + +pipeline = dai.Pipeline() + +# Define sources and outputs +camRgb = pipeline.create(dai.node.ColorCamera) +left = pipeline.create(dai.node.MonoCamera) +right = pipeline.create(dai.node.MonoCamera) +stereo = pipeline.create(dai.node.StereoDepth) +sync = pipeline.create(dai.node.Sync) +out = pipeline.create(dai.node.XLinkOut) +align = pipeline.create(dai.node.ImageAlign) + +left.setResolution(LEFT_RIGHT_RESOLUTION) +left.setBoardSocket(LEFT_SOCKET) +left.setFps(FPS) + +right.setResolution(LEFT_RIGHT_RESOLUTION) +right.setBoardSocket(RIGHT_SOCKET) +right.setFps(FPS) + +camRgb.setBoardSocket(RGB_SOCKET) +camRgb.setResolution(COLOR_RESOLUTION) +camRgb.setFps(FPS) +camRgb.setIspScale(1, ISP_SCALE) + + +stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) +stereo.setDepthAlign(dai.CameraBoardSocket.LEFT) + +out.setStreamName("out") + +sync.setSyncThreshold(timedelta(seconds=(1 / FPS) * 0.5)) + +# Linking +camRgb.isp.link(sync.inputs["rgb"]) +left.out.link(stereo.left) +right.out.link(stereo.right) +stereo.depth.link(align.input) +align.outputAligned.link(sync.inputs["depth_aligned"]) +camRgb.isp.link(align.inputAlignTo) +sync.out.link(out.input) + + +def colorizeDepth(frameDepth): + invalidMask = frameDepth == 0 + # Log the depth, minDepth and maxDepth + try: + minDepth = np.percentile(frameDepth[frameDepth != 0], 3) + maxDepth = np.percentile(frameDepth[frameDepth != 0], 95) + logDepth = np.log(frameDepth, where=frameDepth != 0) + logMinDepth = np.log(minDepth) + logMaxDepth = np.log(maxDepth) + np.nan_to_num(logDepth, copy=False, nan=logMinDepth) + # Clip the values to be in the 0-255 range + logDepth = np.clip(logDepth, logMinDepth, logMaxDepth) + + # Interpolate only valid logDepth values, setting the rest based on the mask + depthFrameColor = np.interp(logDepth, (logMinDepth, logMaxDepth), (0, 255)) + depthFrameColor = np.nan_to_num(depthFrameColor) + depthFrameColor = depthFrameColor.astype(np.uint8) + depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_JET) + # Set invalid depth pixels to black + depthFrameColor[invalidMask] = 0 + except IndexError: + # Frame is likely empty + depthFrameColor = np.zeros((frameDepth.shape[0], frameDepth.shape[1], 3), dtype=np.uint8) + except Exception as e: + raise e + return depthFrameColor + + +rgbWeight = 0.4 +depthWeight = 0.6 + + +def updateBlendWeights(percentRgb): + """ + Update the rgb and depth weights used to blend depth/rgb image + + @param[in] percent_rgb The rgb weight expressed as a percentage (0..100) + """ + global depthWeight + global rgbWeight + rgbWeight = float(percentRgb) / 100.0 + depthWeight = 1.0 - rgbWeight + + +# Connect to device and start pipeline +with device: + device.startPipeline(pipeline) + queue = device.getOutputQueue("out", 8, False) + + # Configure windows; trackbar adjusts blending ratio of rgb/depth + windowName = "rgb-depth" + + # Set the window to be resizable and the initial size + cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) + cv2.resizeWindow(windowName, 1280, 720) + cv2.createTrackbar( + "RGB Weight %", + windowName, + int(rgbWeight * 100), + 100, + updateBlendWeights, + ) + fpsCounter = FPSCounter() + while True: + messageGroup = queue.get() + fpsCounter.tick() + assert isinstance(messageGroup, dai.MessageGroup) + frameRgb = messageGroup["rgb"] + assert isinstance(frameRgb, dai.ImgFrame) + frameDepth = messageGroup["depth_aligned"] + assert isinstance(frameDepth, dai.ImgFrame) + + sizeRgb = frameRgb.getData().size + sizeDepth = frameDepth.getData().size + # Blend when both received + if frameDepth is not None: + cvFrame = frameRgb.getCvFrame() + + # Undistort the rgb frame + cvFrameUndistorted = cv2.undistort( + cvFrame, + np.array(rgbIntrinsics), + np.array(rgbDistortion), + ) + # Colorize the aligned depth + alignedDepthColorized = colorizeDepth(frameDepth.getFrame()) + # Resize depth to match the rgb frame + cv2.imshow("Depth aligned", alignedDepthColorized) + + blended = cv2.addWeighted( + cvFrame, rgbWeight, alignedDepthColorized, depthWeight, 0 + ) + cv2.putText( + blended, + f"FPS: {fpsCounter.getFps():.2f}", + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 1, + (255, 255, 255), + 2, + ) + cv2.imshow(windowName, blended) + + key = cv2.waitKey(1) + if key == ord("q"): + break diff --git a/examples/ImageAlign/image_align.py b/examples/ImageAlign/image_align.py new file mode 100644 index 000000000..0d5751a42 --- /dev/null +++ b/examples/ImageAlign/image_align.py @@ -0,0 +1,129 @@ +import cv2 +import depthai as dai +from datetime import timedelta + +# This is an interactive example that shows how two frame sources without depth information. +FPS = 30.0 + +RGB_SOCKET = dai.CameraBoardSocket.CAM_A +LEFT_SOCKET = dai.CameraBoardSocket.CAM_B +RIGHT_SOCKET = dai.CameraBoardSocket.CAM_C +ALIGN_SOCKET = LEFT_SOCKET + +COLOR_RESOLUTION = dai.ColorCameraProperties.SensorResolution.THE_1080_P +LEFT_RIGHT_RESOLUTION = dai.MonoCameraProperties.SensorResolution.THE_720_P + +device = dai.Device() +pipeline = dai.Pipeline() + +# Define sources and outputs +camRgb = pipeline.create(dai.node.ColorCamera) +left = pipeline.create(dai.node.MonoCamera) +right = pipeline.create(dai.node.MonoCamera) +sync = pipeline.create(dai.node.Sync) +out = pipeline.create(dai.node.XLinkOut) +align = pipeline.create(dai.node.ImageAlign) +cfgIn = pipeline.create(dai.node.XLinkIn) + +left.setResolution(LEFT_RIGHT_RESOLUTION) +left.setBoardSocket(LEFT_SOCKET) +left.setFps(FPS) + +right.setResolution(LEFT_RIGHT_RESOLUTION) +right.setBoardSocket(RIGHT_SOCKET) +right.setFps(FPS) + +camRgb.setBoardSocket(RGB_SOCKET) +camRgb.setResolution(COLOR_RESOLUTION) +camRgb.setFps(FPS) +camRgb.setIspScale(1, 3) + +out.setStreamName("out") + +sync.setSyncThreshold(timedelta(seconds=(1 / FPS) * 0.5)) + +cfgIn.setStreamName("config") + +cfg = align.initialConfig.get() +staticDepthPlane = cfg.staticDepthPlane + +# Linking +align.outputAligned.link(sync.inputs["aligned"]) +camRgb.isp.link(sync.inputs["rgb"]) +camRgb.isp.link(align.inputAlignTo) +left.out.link(align.input) +sync.out.link(out.input) +cfgIn.out.link(align.inputConfig) + + +rgbWeight = 0.4 +depthWeight = 0.6 + + +def updateBlendWeights(percentRgb): + """ + Update the rgb and depth weights used to blend depth/rgb image + + @param[in] percent_rgb The rgb weight expressed as a percentage (0..100) + """ + global depthWeight + global rgbWeight + rgbWeight = float(percentRgb) / 100.0 + depthWeight = 1.0 - rgbWeight + +def updateDepthPlane(depth): + global staticDepthPlane + staticDepthPlane = depth + +# Connect to device and start pipeline +with device: + device.startPipeline(pipeline) + queue = device.getOutputQueue("out", 8, False) + cfgQ = device.getInputQueue("config") + + # Configure windows; trackbar adjusts blending ratio of rgb/depth + windowName = "rgb-left" + + # Set the window to be resizable and the initial size + cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) + cv2.resizeWindow(windowName, 1280, 720) + cv2.createTrackbar( + "RGB Weight %", + windowName, + int(rgbWeight * 100), + 100, + updateBlendWeights, + ) + cv2.createTrackbar( + "Static Depth Plane [mm]", + windowName, + 0, + 2000, + updateDepthPlane, + ) + while True: + messageGroup = queue.get() + assert isinstance(messageGroup, dai.MessageGroup) + frameRgb = messageGroup["rgb"] + assert isinstance(frameRgb, dai.ImgFrame) + leftAligned = messageGroup["aligned"] + assert isinstance(leftAligned, dai.ImgFrame) + + frameRgbCv = frameRgb.getCvFrame() + # Colorize the aligned depth + leftCv = leftAligned.getCvFrame() + + if len(leftCv.shape) == 2: + leftCv = cv2.cvtColor(leftCv, cv2.COLOR_GRAY2BGR) + if leftCv.shape != frameRgbCv.shape: + leftCv = cv2.resize(leftCv, (frameRgbCv.shape[1], frameRgbCv.shape[0])) + + blended = cv2.addWeighted(frameRgbCv, rgbWeight, leftCv, depthWeight, 0) + cv2.imshow(windowName, blended) + + key = cv2.waitKey(1) + if key == ord("q"): + break + + cfg.staticDepthPlane = staticDepthPlane + cfgQ.send(cfg) diff --git a/examples/ImageAlign/thermal_align.py b/examples/ImageAlign/thermal_align.py new file mode 100644 index 000000000..91d49b25f --- /dev/null +++ b/examples/ImageAlign/thermal_align.py @@ -0,0 +1,163 @@ +import cv2 +import depthai as dai +import numpy as np +import time +from datetime import timedelta + +FPS = 25.0 + +RGB_SOCKET = dai.CameraBoardSocket.CAM_A +COLOR_RESOLUTION = dai.ColorCameraProperties.SensorResolution.THE_1080_P + +class FPSCounter: + def __init__(self): + self.frameTimes = [] + + def tick(self): + now = time.time() + self.frameTimes.append(now) + self.frameTimes = self.frameTimes[-100:] + + def getFps(self): + if len(self.frameTimes) <= 1: + return 0 + # Calculate the FPS + return (len(self.frameTimes) - 1) / (self.frameTimes[-1] - self.frameTimes[0]) + +device = dai.Device() + +thermalWidth, thermalHeight = -1, -1 +thermalFound = False +for features in device.getConnectedCameraFeatures(): + if dai.CameraSensorType.THERMAL in features.supportedTypes: + thermalFound = True + thermalSocket = features.socket + thermalWidth, thermalHeight = features.width, features.height + break +if not thermalFound: + raise RuntimeError("No thermal camera found!") + + +pipeline = dai.Pipeline() + +# Define sources and outputs +camRgb = pipeline.create(dai.node.ColorCamera) +thermalCam = pipeline.create(dai.node.Camera) +thermalCam.setBoardSocket(thermalSocket) +thermalCam.setFps(FPS) + +sync = pipeline.create(dai.node.Sync) +out = pipeline.create(dai.node.XLinkOut) +align = pipeline.create(dai.node.ImageAlign) +cfgIn = pipeline.create(dai.node.XLinkIn) + + +camRgb.setBoardSocket(RGB_SOCKET) +camRgb.setResolution(COLOR_RESOLUTION) +camRgb.setFps(FPS) +camRgb.setIspScale(1,3) + +out.setStreamName("out") + +sync.setSyncThreshold(timedelta(seconds=1/FPS * 0.5)) + +cfgIn.setStreamName("config") + +cfg = align.initialConfig.get() +staticDepthPlane = cfg.staticDepthPlane + +# Linking +align.outputAligned.link(sync.inputs["aligned"]) +camRgb.isp.link(sync.inputs["rgb"]) +camRgb.isp.link(align.inputAlignTo) +thermalCam.raw.link(align.input) +sync.out.link(out.input) +cfgIn.out.link(align.inputConfig) + + +rgbWeight = 0.4 +thermalWeight = 0.6 + + +def updateBlendWeights(percentRgb): + """ + Update the rgb and depth weights used to blend depth/rgb image + @param[in] percent_rgb The rgb weight expressed as a percentage (0..100) + """ + global thermalWeight + global rgbWeight + rgbWeight = float(percentRgb) / 100.0 + thermalWeight = 1.0 - rgbWeight + +def updateDepthPlane(depth): + global staticDepthPlane + staticDepthPlane = depth + +# Connect to device and start pipeline +with device: + device.startPipeline(pipeline) + queue = device.getOutputQueue("out", 8, False) + cfgQ = device.getInputQueue("config") + + # Configure windows; trackbar adjusts blending ratio of rgb/depth + windowName = "rgb-thermal" + + # Set the window to be resizable and the initial size + cv2.namedWindow(windowName, cv2.WINDOW_NORMAL) + cv2.resizeWindow(windowName, 1280, 720) + cv2.createTrackbar( + "RGB Weight %", + windowName, + int(rgbWeight * 100), + 100, + updateBlendWeights, + ) + cv2.createTrackbar( + "Static Depth Plane [mm]", + windowName, + 0, + 2000, + updateDepthPlane, + ) + fpsCounter = FPSCounter() + while True: + messageGroup = queue.get() + assert isinstance(messageGroup, dai.MessageGroup) + frameRgb = messageGroup["rgb"] + assert isinstance(frameRgb, dai.ImgFrame) + thermalAligned = messageGroup["aligned"] + assert isinstance(thermalAligned, dai.ImgFrame) + frameRgbCv = frameRgb.getCvFrame() + fpsCounter.tick() + + # Colorize the aligned depth + thermalFrame = thermalAligned.getCvFrame().astype(np.float32) + # Create a mask for nan values + mask = np.isnan(thermalFrame) + # Replace nan values with a mean for visualization + thermalFrame[mask] = np.nanmean(thermalFrame) + thermalFrame = cv2.normalize(thermalFrame, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U) + colormappedFrame = cv2.applyColorMap(thermalFrame, cv2.COLORMAP_MAGMA) + # Apply the mask back with black pixels (0) + colormappedFrame[mask] = 0 + + blended = cv2.addWeighted(frameRgbCv, rgbWeight, colormappedFrame, thermalWeight, 0) + + cv2.putText( + blended, + f"FPS: {fpsCounter.getFps():.2f}", + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 1, + (255, 255, 255), + 2, + ) + + cv2.imshow(windowName, blended) + + key = cv2.waitKey(1) + if key == ord("q"): + break + + cfg.staticDepthPlane = staticDepthPlane + cfgQ.send(cfg) diff --git a/examples/ImageAlign/tof_align.py b/examples/ImageAlign/tof_align.py new file mode 100644 index 000000000..1c8797d7e --- /dev/null +++ b/examples/ImageAlign/tof_align.py @@ -0,0 +1,160 @@ +import numpy as np +import cv2 +import depthai as dai +import time +from datetime import timedelta + +# This example is intended to run unchanged on an OAK-D-SR-PoE camera +FPS = 30.0 + +RGB_SOCKET = dai.CameraBoardSocket.CAM_C +TOF_SOCKET = dai.CameraBoardSocket.CAM_A +ALIGN_SOCKET = RGB_SOCKET + +class FPSCounter: + def __init__(self): + self.frameTimes = [] + + def tick(self): + now = time.time() + self.frameTimes.append(now) + self.frameTimes = self.frameTimes[-100:] + + def getFps(self): + if len(self.frameTimes) <= 1: + return 0 + # Calculate the FPS + return (len(self.frameTimes) - 1) / (self.frameTimes[-1] - self.frameTimes[0]) + + + +pipeline = dai.Pipeline() +# Define sources and outputs +camRgb = pipeline.create(dai.node.ColorCamera) +tof = pipeline.create(dai.node.ToF) +camTof = pipeline.create(dai.node.Camera) +sync = pipeline.create(dai.node.Sync) +align = pipeline.create(dai.node.ImageAlign) +out = pipeline.create(dai.node.XLinkOut) + +# ToF settings +camTof.setFps(FPS) +camTof.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) +camTof.setBoardSocket(TOF_SOCKET) + +# rgb settings +camRgb.setBoardSocket(RGB_SOCKET) +camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_800_P) +camRgb.setFps(FPS) +camRgb.setIspScale(1, 2) + +out.setStreamName("out") + +sync.setSyncThreshold(timedelta(seconds=(1 / FPS))) + +# Linking +camRgb.isp.link(sync.inputs["rgb"]) +camTof.raw.link(tof.input) +tof.depth.link(align.input) +align.outputAligned.link(sync.inputs["depth_aligned"]) +sync.inputs["rgb"].setBlocking(False) +camRgb.isp.link(align.inputAlignTo) +sync.out.link(out.input) + +def colorizeDepth(frameDepth): + invalidMask = frameDepth == 0 + # Log the depth, minDepth and maxDepth + try: + minDepth = np.percentile(frameDepth[frameDepth != 0], 3) + maxDepth = np.percentile(frameDepth[frameDepth != 0], 95) + logDepth = np.log(frameDepth, where=frameDepth != 0) + logMinDepth = np.log(minDepth) + logMaxDepth = np.log(maxDepth) + np.nan_to_num(logDepth, copy=False, nan=logMinDepth) + # Clip the values to be in the 0-255 range + logDepth = np.clip(logDepth, logMinDepth, logMaxDepth) + + # Interpolate only valid logDepth values, setting the rest based on the mask + depthFrameColor = np.interp(logDepth, (logMinDepth, logMaxDepth), (0, 255)) + depthFrameColor = np.nan_to_num(depthFrameColor) + depthFrameColor = depthFrameColor.astype(np.uint8) + depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_JET) + # Set invalid depth pixels to black + depthFrameColor[invalidMask] = 0 + except IndexError: + # Frame is likely empty + depthFrameColor = np.zeros((frameDepth.shape[0], frameDepth.shape[1], 3), dtype=np.uint8) + except Exception as e: + raise e + return depthFrameColor + + +rgbWeight = 0.4 +depthWeight = 0.6 + + +def updateBlendWeights(percentRgb): + """ + Update the rgb and depth weights used to blend depth/rgb image + + @param[in] percent_rgb The rgb weight expressed as a percentage (0..100) + """ + global depthWeight + global rgbWeight + rgbWeight = float(percentRgb) / 100.0 + depthWeight = 1.0 - rgbWeight + + + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + queue = device.getOutputQueue("out", 8, False) + + # Configure windows; trackbar adjusts blending ratio of rgb/depth + rgbDepthWindowName = "rgb-depth" + + cv2.namedWindow(rgbDepthWindowName) + cv2.createTrackbar( + "RGB Weight %", + rgbDepthWindowName, + int(rgbWeight * 100), + 100, + updateBlendWeights, + ) + fpsCounter = FPSCounter() + while True: + messageGroup = queue.get() + fpsCounter.tick() + assert isinstance(messageGroup, dai.MessageGroup) + frameRgb = messageGroup["rgb"] + assert isinstance(frameRgb, dai.ImgFrame) + frameDepth = messageGroup["depth_aligned"] + assert isinstance(frameDepth, dai.ImgFrame) + + sizeRgb = frameRgb.getData().size + sizeDepth = frameDepth.getData().size + # Blend when both received + if frameDepth is not None: + cvFrame = frameRgb.getCvFrame() + # Colorize the aligned depth + alignedDepthColorized = colorizeDepth(frameDepth.getFrame()) + # Resize depth to match the rgb frame + cv2.putText( + alignedDepthColorized, + f"FPS: {fpsCounter.getFps():.2f}", + (10, 30), + cv2.FONT_HERSHEY_SIMPLEX, + 1, + (255, 255, 255), + 2, + ) + cv2.imshow("depth", alignedDepthColorized) + + blended = cv2.addWeighted( + cvFrame, rgbWeight, alignedDepthColorized, depthWeight, 0 + ) + cv2.imshow(rgbDepthWindowName, blended) + + key = cv2.waitKey(1) + if key == ord("q"): + break \ No newline at end of file diff --git a/examples/SpatialDetection/spatial_tiny_yolo_tof.py b/examples/SpatialDetection/spatial_tiny_yolo_tof.py new file mode 100755 index 000000000..fdcba91ba --- /dev/null +++ b/examples/SpatialDetection/spatial_tiny_yolo_tof.py @@ -0,0 +1,201 @@ +#!/usr/bin/env python3 + +from pathlib import Path +import sys +import cv2 +import depthai as dai +import numpy as np +import time + +FPS = 15 + +''' +Spatial Tiny-yolo example for ToF camera + Performs inference on RGB camera and retrieves spatial location coordinates: x,y,z relative to the center of depth map. + Can be used for tiny-yolo-v3 or tiny-yolo-v4 networks +''' + +# Get argument first +nnBlobPath = (Path(__file__).parent / Path('../models/yolo-v4-tiny-tf_openvino_2021.4_6shave.blob')).resolve().absolute() +if 1 < len(sys.argv): + arg = sys.argv[1] + if arg == "yolo3": + nnBlobPath = (Path(__file__).parent / Path('../models/yolo-v3-tiny-tf_openvino_2021.4_6shave.blob')).resolve().absolute() + elif arg == "yolo4": + nnBlobPath = (Path(__file__).parent / Path('../models/yolo-v4-tiny-tf_openvino_2021.4_6shave.blob')).resolve().absolute() + else: + nnBlobPath = Path(arg) +else: + print("Using Tiny YoloV4 model. If you wish to use Tiny YOLOv3, call 'tiny_yolo.py yolo3'") + +if not Path(nnBlobPath).exists(): + import sys + raise FileNotFoundError(f'Required file/s not found, please run "{sys.executable} install_requirements.py"') + +# Tiny yolo v3/4 label texts +labelMap = [ + "person", "bicycle", "car", "motorbike", "aeroplane", "bus", "train", + "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", + "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", + "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", + "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", + "baseball glove", "skateboard", "surfboard", "tennis racket", "bottle", "wine glass", "cup", + "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", + "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", + "chair", "sofa", "pottedplant", "bed", "diningtable", "toilet", "tvmonitor", + "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", + "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", + "teddy bear", "hair drier", "toothbrush" +] + +syncNN = True + +# Create pipeline +pipeline = dai.Pipeline() + +# Define sources and outputs +camRgb = pipeline.create(dai.node.ColorCamera) +spatialDetectionNetwork = pipeline.create(dai.node.YoloSpatialDetectionNetwork) + +tof = pipeline.create(dai.node.ToF) +camTof = pipeline.create(dai.node.Camera) +imageAlign = pipeline.create(dai.node.ImageAlign) + +xoutRgb = pipeline.create(dai.node.XLinkOut) +xoutNN = pipeline.create(dai.node.XLinkOut) +xoutDepth = pipeline.create(dai.node.XLinkOut) + +xoutRgb.setStreamName("rgb") +xoutNN.setStreamName("detections") +xoutDepth.setStreamName("depth") + +# Properties +camRgb.setPreviewSize(416, 416) +camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_800_P) +camRgb.setInterleaved(False) +camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_B) +camRgb.setFps(FPS) + +# ToF settings +camTof.setFps(FPS) +camTof.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) +camTof.setBoardSocket(dai.CameraBoardSocket.CAM_A) + + +# Image align +imageAlign.setOutputSize(640, 400) + +spatialDetectionNetwork.setBlobPath(nnBlobPath) +spatialDetectionNetwork.setConfidenceThreshold(0.5) +spatialDetectionNetwork.input.setBlocking(False) +spatialDetectionNetwork.setBoundingBoxScaleFactor(0.5) +spatialDetectionNetwork.setDepthLowerThreshold(100) +spatialDetectionNetwork.setDepthUpperThreshold(5000) + +# Yolo specific parameters +spatialDetectionNetwork.setNumClasses(80) +spatialDetectionNetwork.setCoordinateSize(4) +spatialDetectionNetwork.setAnchors([10,14, 23,27, 37,58, 81,82, 135,169, 344,319]) +spatialDetectionNetwork.setAnchorMasks({ "side26": [1,2,3], "side13": [3,4,5] }) +spatialDetectionNetwork.setIouThreshold(0.5) + +# Linking +camTof.raw.link(tof.input) +tof.depth.link(imageAlign.input) + +camRgb.preview.link(spatialDetectionNetwork.input) +if syncNN: + spatialDetectionNetwork.passthrough.link(xoutRgb.input) +else: + camRgb.preview.link(xoutRgb.input) + +spatialDetectionNetwork.out.link(xoutNN.input) + +# stereo.depth.link(imageAlign.input) +camRgb.isp.link(imageAlign.inputAlignTo) +imageAlign.outputAligned.link(spatialDetectionNetwork.inputDepth) +spatialDetectionNetwork.passthroughDepth.link(xoutDepth.input) + +# Connect to device and start pipeline +with dai.Device(pipeline) as device: + + # Output queues will be used to get the rgb frames and nn data from the outputs defined above + previewQueue = device.getOutputQueue(name="rgb", maxSize=4, blocking=False) + detectionNNQueue = device.getOutputQueue(name="detections", maxSize=4, blocking=False) + depthQueue = device.getOutputQueue(name="depth", maxSize=4, blocking=False) + + startTime = time.monotonic() + counter = 0 + fps = 0 + color = (255, 255, 255) + printOutputLayersOnce = True + + while True: + inPreview = previewQueue.get() + assert isinstance(inPreview, dai.ImgFrame) + inDet = detectionNNQueue.get() + assert isinstance(inDet, dai.SpatialImgDetections) + depth = depthQueue.get() + assert isinstance(depth, dai.ImgFrame) + + + frame = inPreview.getCvFrame() + depthFrame = depth.getFrame() # depthFrame values are in millimeters + + depth_downscaled = depthFrame[::4] + if np.all(depth_downscaled == 0): + min_depth = 0 # Set a default minimum depth value when all elements are zero + else: + min_depth = np.percentile(depth_downscaled[depth_downscaled != 0], 1) + max_depth = np.percentile(depth_downscaled, 99) + depthFrameColor = np.interp(depthFrame, (min_depth, max_depth), (0, 255)).astype(np.uint8) + depthFrameColor = cv2.applyColorMap(depthFrameColor, cv2.COLORMAP_HOT) + + counter+=1 + current_time = time.monotonic() + if (current_time - startTime) > 1 : + fps = counter / (current_time - startTime) + counter = 0 + startTime = current_time + + detections = inDet.detections + + # If the frame is available, draw bounding boxes on it and show the frame + height = frame.shape[0] + width = frame.shape[1] + for detection in detections: + roiData = detection.boundingBoxMapping + roi = roiData.roi + roi = roi.denormalize(depthFrameColor.shape[1], depthFrameColor.shape[0]) + topLeft = roi.topLeft() + bottomRight = roi.bottomRight() + xmin = int(topLeft.x) + ymin = int(topLeft.y) + xmax = int(bottomRight.x) + ymax = int(bottomRight.y) + cv2.rectangle(depthFrameColor, (xmin, ymin), (xmax, ymax), color, 1) + + # Denormalize bounding box + x1 = int(detection.xmin * width) + x2 = int(detection.xmax * width) + y1 = int(detection.ymin * height) + y2 = int(detection.ymax * height) + try: + label = labelMap[detection.label] + except: + label = detection.label + cv2.putText(frame, str(label), (x1 + 10, y1 + 20), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) + cv2.putText(frame, "{:.2f}".format(detection.confidence*100), (x1 + 10, y1 + 35), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) + cv2.putText(frame, f"X: {int(detection.spatialCoordinates.x)} mm", (x1 + 10, y1 + 50), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) + cv2.putText(frame, f"Y: {int(detection.spatialCoordinates.y)} mm", (x1 + 10, y1 + 65), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) + cv2.putText(frame, f"Z: {int(detection.spatialCoordinates.z)} mm", (x1 + 10, y1 + 80), cv2.FONT_HERSHEY_TRIPLEX, 0.5, 255) + + cv2.rectangle(frame, (x1, y1), (x2, y2), color, cv2.FONT_HERSHEY_SIMPLEX) + + cv2.putText(frame, "NN fps: {:.2f}".format(fps), (2, frame.shape[0] - 4), cv2.FONT_HERSHEY_TRIPLEX, 0.4, color) + cv2.imshow("depth", depthFrameColor) + cv2.imshow("rgb", frame) + + if cv2.waitKey(1) == ord('q'): + break diff --git a/examples/models/cast-models/model.yml b/examples/models/cast-models/model.yml new file mode 100644 index 000000000..968204178 --- /dev/null +++ b/examples/models/cast-models/model.yml @@ -0,0 +1,29 @@ +# Copyright (c) 2021 Intel Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +description: >- + cast models +task_type: image_translation +files: + - name: blur_simplified_openvino_2021.4_6shave.blob + size: 5760 + sha256: 6a37c12917a41c13344ff1370588dc72736a2cf129fddb1a94a3af8c3ba8cf20 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/blur_simplified_openvino_2021.4_6shave.blob + - name: diff_openvino_2022.1_6shave.blob + size: 2010 + sha256: 3178020fede75b1e63da944790fd2030c4a16fe5b05b62191657a3f18fed3c54 + source: https://artifacts.luxonis.com/artifactory/luxonis-depthai-data-local/network/diff_openvino_2022.1_6shave.blob + +framework: dldt +license: https://raw.githubusercontent.com/openvinotoolkit/open_model_zoo/master/LICENSE diff --git a/generate_stubs.py b/generate_stubs.py index 66386b4c9..e65a90fa5 100644 --- a/generate_stubs.py +++ b/generate_stubs.py @@ -29,7 +29,18 @@ print(f'Could not import depthai: {ex}') print(f'PYTHONPATH set to {env["PYTHONPATH"]}') - subprocess.check_call(['stubgen', '-p', MODULE_NAME, '-o', f'{DIRECTORY}'], cwd=DIRECTORY, env=env) + # Check if stubgen has the `--include-docstrings` flag + includeDocstrings = False + output = subprocess.check_output(['stubgen', '--help'], env=env) + if b'--include-docstrings' in output: + includeDocstrings = True + print("Will include docstrings in stubs") + else: + print("Will not include docstrings in stubs") + parameters = ['stubgen', '-p', MODULE_NAME, '-o', f'{DIRECTORY}'] + if includeDocstrings: + parameters.insert(1, '--include-docstrings') + subprocess.check_call(parameters, cwd=DIRECTORY, env=env) # Add py.typed open(f'{DIRECTORY}/depthai/py.typed', 'a').close() @@ -49,7 +60,8 @@ import typing json = dict from pathlib import Path - from typing import Set + from typing import Set, Type, TypeVar + T = TypeVar('T') ''') + contents # Create 'create' overloads @@ -59,6 +71,20 @@ overloads = overloads + f'\\1@overload\\1def create(self, arg0: typing.Type[node.{node}]) -> node.{node}: ...' final_stubs = re.sub(r"([\s]*)def create\(self, arg0: object\) -> Node: ...", f'{overloads}', stubs_import) + final_lines = [] + for line in final_stubs.split('\n'): + if 'class Pipeline:' in line: + final_lines.append(line) + final_lines.append(' @overload') + final_lines.append(' def create(self, arg0: Type[T], *args, **kwargs) -> T: ...') + continue + match = re.match(r'^( def getCvFrame\(self\) -> )object(:.*)$', line) + if match: + final_lines.append(f"{match.group(1)}numpy.ndarray{match.group(2)}") + continue + final_lines.append(line) + + final_stubs = '\n'.join(final_lines) # Writeout changes file.seek(0) file.truncate(0) diff --git a/pyproject.toml b/pyproject.toml index d770236f7..90ac84a91 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,2 +1,2 @@ [build-system] -requires = ["setuptools", "wheel", "mypy<=1.3.0", "cmake==3.25"] \ No newline at end of file +requires = ["setuptools", "wheel", "mypy", "typing-extensions", "cmake==3.25"] diff --git a/src/DatatypeBindings.cpp b/src/DatatypeBindings.cpp index 1e4ea63a6..68d3dec47 100644 --- a/src/DatatypeBindings.cpp +++ b/src/DatatypeBindings.cpp @@ -27,6 +27,7 @@ void bind_tofconfig(pybind11::module& m, void* pCallstack); void bind_tracklets(pybind11::module& m, void* pCallstack); void bind_pointcloudconfig(pybind11::module& m, void* pCallstack); void bind_pointclouddata(pybind11::module& m, void* pCallstack); +void bind_imagealignconfig(pybind11::module& m, void* pCallstack); void DatatypeBindings::addToCallstack(std::deque& callstack) { // Bind common datatypebindings @@ -57,6 +58,7 @@ void DatatypeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_tracklets); callstack.push_front(bind_pointcloudconfig); callstack.push_front(bind_pointclouddata); + callstack.push_front(bind_imagealignconfig); } void DatatypeBindings::bind(pybind11::module& m, void* pCallstack){ diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index b92831cf3..197e89cee 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -682,6 +682,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ bindConstructors(device); // Bind the rest device + .def("__enter__", [](Device& d) -> Device& { return d; }) .def("getOutputQueue", static_cast(Device::*)(const std::string&)>(&Device::getOutputQueue), py::arg("name"), DOC(dai, Device, getOutputQueue)) .def("getOutputQueue", static_cast(Device::*)(const std::string&, unsigned int, bool)>(&Device::getOutputQueue), py::arg("name"), py::arg("maxSize"), py::arg("blocking") = true, DOC(dai, Device, getOutputQueue, 2)) .def("getOutputQueueNames", &Device::getOutputQueueNames, DOC(dai, Device, getOutputQueueNames)) diff --git a/src/pipeline/datatype/ImageAlignConfigBindings.cpp b/src/pipeline/datatype/ImageAlignConfigBindings.cpp new file mode 100644 index 000000000..5021eb627 --- /dev/null +++ b/src/pipeline/datatype/ImageAlignConfigBindings.cpp @@ -0,0 +1,49 @@ +#include "DatatypeBindings.hpp" +#include "pipeline/CommonBindings.hpp" +#include +#include + +// depthai +#include "depthai/pipeline/datatype/ImageAlignConfig.hpp" + +//pybind +#include +#include + +// #include "spdlog/spdlog.h" + +void bind_imagealignconfig(pybind11::module& m, void* pCallstack){ + + using namespace dai; + + py::class_> rawImageAlignConfig(m, "RawImageAlignConfig", DOC(dai, RawImageAlignConfig)); + py::class_> imageAlignConfig(m, "ImageAlignConfig", DOC(dai, ImageAlignConfig)); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Metadata / raw + + rawImageAlignConfig + .def(py::init<>()) + .def_readwrite("staticDepthPlane", &RawImageAlignConfig::staticDepthPlane, DOC(dai, RawImageAlignConfig, staticDepthPlane)) + ; + + // Message + imageAlignConfig + .def(py::init<>()) + .def("get", &ImageAlignConfig::get, DOC(dai, ImageAlignConfig, get)) + .def("set", &ImageAlignConfig::set, py::arg("config"), DOC(dai, ImageAlignConfig, set)) + ; + +} diff --git a/src/pipeline/datatype/ToFConfigBindings.cpp b/src/pipeline/datatype/ToFConfigBindings.cpp index 3e2aaf3c7..1657a58e1 100644 --- a/src/pipeline/datatype/ToFConfigBindings.cpp +++ b/src/pipeline/datatype/ToFConfigBindings.cpp @@ -17,8 +17,6 @@ void bind_tofconfig(pybind11::module& m, void* pCallstack){ using namespace dai; py::class_> rawToFConfig(m, "RawToFConfig", DOC(dai, RawToFConfig)); - py::class_ depthParams(rawToFConfig, "DepthParams", DOC(dai, RawToFConfig, DepthParams)); - py::enum_ depthParamsTypeFMod(depthParams, "TypeFMod", DOC(dai, RawToFConfig, DepthParams, TypeFMod)); py::class_> toFConfig(m, "ToFConfig", DOC(dai, ToFConfig)); /////////////////////////////////////////////////////////////////////// @@ -37,32 +35,23 @@ void bind_tofconfig(pybind11::module& m, void* pCallstack){ // Metadata / raw rawToFConfig .def(py::init<>()) - .def_readwrite("depthParams", &RawToFConfig::depthParams, DOC(dai, RawToFConfig, depthParams)) - ; - - depthParamsTypeFMod - .value("ALL", RawToFConfig::DepthParams::TypeFMod::F_MOD_ALL) - .value("MIN", RawToFConfig::DepthParams::TypeFMod::F_MOD_MIN) - .value("MAX", RawToFConfig::DepthParams::TypeFMod::F_MOD_MAX) - ; - - depthParams - .def(py::init<>()) - .def_readwrite("freqModUsed", &RawToFConfig::DepthParams::freqModUsed, DOC(dai, RawToFConfig, DepthParams, freqModUsed)) - .def_readwrite("avgPhaseShuffle", &RawToFConfig::DepthParams::avgPhaseShuffle, DOC(dai, RawToFConfig, DepthParams, avgPhaseShuffle)) - .def_readwrite("minimumAmplitude", &RawToFConfig::DepthParams::minimumAmplitude, DOC(dai, RawToFConfig, DepthParams, minimumAmplitude)) - .def_readwrite("median", &RawToFConfig::DepthParams::median, DOC(dai, RawToFConfig, DepthParams, median)) + .def_readwrite("median", &RawToFConfig::median, DOC(dai, RawToFConfig, median)) + .def_readwrite("enablePhaseShuffleTemporalFilter", &RawToFConfig::enablePhaseShuffleTemporalFilter, DOC(dai, RawToFConfig, enablePhaseShuffleTemporalFilter)) + .def_readwrite("enableBurstMode", &RawToFConfig::enableBurstMode, DOC(dai, RawToFConfig, enableBurstMode)) + .def_readwrite("phaseUnwrappingLevel", &RawToFConfig::phaseUnwrappingLevel, DOC(dai, RawToFConfig, phaseUnwrappingLevel)) + .def_readwrite("enableFPPNCorrection", &RawToFConfig::enableFPPNCorrection, DOC(dai, RawToFConfig, enableFPPNCorrection)) + .def_readwrite("enableOpticalCorrection", &RawToFConfig::enableOpticalCorrection, DOC(dai, RawToFConfig, enableOpticalCorrection)) + .def_readwrite("enableTemperatureCorrection", &RawToFConfig::enableTemperatureCorrection, DOC(dai, RawToFConfig, enableTemperatureCorrection)) + .def_readwrite("enableWiggleCorrection", &RawToFConfig::enableWiggleCorrection, DOC(dai, RawToFConfig, enableWiggleCorrection)) + .def_readwrite("enablePhaseUnwrapping", &RawToFConfig::enablePhaseUnwrapping, DOC(dai, RawToFConfig, enablePhaseUnwrapping)) + .def_readwrite("phaseUnwrapErrorThreshold", &RawToFConfig::phaseUnwrapErrorThreshold, DOC(dai, RawToFConfig, phaseUnwrapErrorThreshold)) ; // Message toFConfig .def(py::init<>()) .def(py::init>()) - - .def("setDepthParams", static_cast(&ToFConfig::setDepthParams), py::arg("config"), DOC(dai, ToFConfig, setDepthParams)) - .def("setFreqModUsed", static_cast(&ToFConfig::setFreqModUsed), DOC(dai, ToFConfig, setFreqModUsed)) - .def("setAvgPhaseShuffle", &ToFConfig::setAvgPhaseShuffle, DOC(dai, ToFConfig, setAvgPhaseShuffle)) - .def("setMinAmplitude", &ToFConfig::setMinAmplitude, DOC(dai, ToFConfig, setMinAmplitude)) + .def("setMedianFilter", &ToFConfig::setMedianFilter, DOC(dai, ToFConfig, setMedianFilter)) .def("set", &ToFConfig::set, py::arg("config"), DOC(dai, ToFConfig, set)) @@ -70,6 +59,5 @@ void bind_tofconfig(pybind11::module& m, void* pCallstack){ ; // add aliases - m.attr("ToFConfig").attr("DepthParams") = m.attr("RawToFConfig").attr("DepthParams"); } diff --git a/src/pipeline/node/CastBindings.cpp b/src/pipeline/node/CastBindings.cpp new file mode 100644 index 000000000..92498d82d --- /dev/null +++ b/src/pipeline/node/CastBindings.cpp @@ -0,0 +1,51 @@ +#include "NodeBindings.hpp" +#include "Common.hpp" + +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/node/Cast.hpp" + +void bind_cast(pybind11::module& m, void* pCallstack){ + + using namespace dai; + using namespace dai::node; + + // Node and Properties declare upfront + py::class_> castProperties(m, "CastProperties", DOC(dai, CastProperties)); + auto cast = ADD_NODE(Cast); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Properties + + castProperties + .def_readwrite("numFramesPool", &CastProperties::numFramesPool, DOC(dai, CastProperties, numFramesPool)) + .def_readwrite("outputType", &CastProperties::outputType, DOC(dai, CastProperties, outputType)) + .def_readwrite("scale", &CastProperties::scale, DOC(dai, CastProperties, scale)) + .def_readwrite("offset", &CastProperties::offset, DOC(dai, CastProperties, offset)) + ; + + // Node + cast + .def_readonly("input", &Cast::input, DOC(dai, node, Cast, input)) + .def_readonly("output", &Cast::output, DOC(dai, node, Cast, output)) + .def_readonly("passthroughInput", &Cast::passthroughInput, DOC(dai, node, Cast, passthroughInput)) + .def("setNumFramesPool", &Cast::setNumFramesPool, DOC(dai, node, Cast, setNumFramesPool)) + .def("setOutputFrameType", &Cast::setOutputFrameType, DOC(dai, node, Cast, setOutputFrameType)) + .def("setScale", &Cast::setScale, DOC(dai, node, Cast, setScale)) + .def("setOffset", &Cast::setOffset, DOC(dai, node, Cast, setOffset)) + ; + daiNodeModule.attr("Cast").attr("Properties") = castProperties; + +} diff --git a/src/pipeline/node/ImageAlignBindings.cpp b/src/pipeline/node/ImageAlignBindings.cpp new file mode 100644 index 000000000..95c89bcbc --- /dev/null +++ b/src/pipeline/node/ImageAlignBindings.cpp @@ -0,0 +1,74 @@ +#include "NodeBindings.hpp" +#include "Common.hpp" + +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/node/ImageAlign.hpp" + +void bind_imagealign(pybind11::module &m, void *pCallstack) { + + using namespace dai; + using namespace dai::node; + + // Node and Properties declare upfront + py::class_ properties(m, "ImageAlignProperties", + DOC(dai, ImageAlignProperties)); + auto node = ADD_NODE(ImageAlign); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack *callstack = (Callstack *)pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Properties + properties + .def_readwrite("initialConfig", &ImageAlignProperties::initialConfig, + DOC(dai, ImageAlignProperties, initialConfig)) + .def_readwrite("numFramesPool", &ImageAlignProperties::numFramesPool, + DOC(dai, ImageAlignProperties, numFramesPool)) + .def_readwrite("alignWidth", &ImageAlignProperties::alignWidth, + DOC(dai, ImageAlignProperties, alignWidth)) + .def_readwrite("alignHeight", &ImageAlignProperties::alignHeight, + DOC(dai, ImageAlignProperties, alignHeight)) + .def_readwrite("warpHwIds", &ImageAlignProperties::warpHwIds, + DOC(dai, ImageAlignProperties, warpHwIds)) + .def_readwrite("interpolation", &ImageAlignProperties::interpolation, + DOC(dai, ImageAlignProperties, interpolation)) + .def_readwrite("outKeepAspectRatio", + &ImageAlignProperties::outKeepAspectRatio, + DOC(dai, ImageAlignProperties, outKeepAspectRatio)) + .def_readwrite("numShaves", &ImageAlignProperties::numShaves, + DOC(dai, ImageAlignProperties, numShaves)); + // Node + node.def_readonly("inputConfig", &ImageAlign::inputConfig, + DOC(dai, node, ImageAlign, inputConfig)) + .def_readonly("input", &ImageAlign::input, + DOC(dai, node, ImageAlign, input)) + .def_readonly("inputAlignTo", &ImageAlign::inputAlignTo, + DOC(dai, node, ImageAlign, inputAlignTo)) + .def_readonly("outputAligned", &ImageAlign::outputAligned, + DOC(dai, node, ImageAlign, outputAligned)) + .def_readonly("passthroughInput", &ImageAlign::passthroughInput, + DOC(dai, node, ImageAlign, passthroughInput)) + .def_readonly("initialConfig", &ImageAlign::initialConfig, + DOC(dai, node, ImageAlign, initialConfig)) + .def("setOutputSize", &ImageAlign::setOutputSize, + DOC(dai, node, ImageAlign, setOutputSize)) + .def("setOutKeepAspectRatio", &ImageAlign::setOutKeepAspectRatio, + DOC(dai, node, ImageAlign, setOutKeepAspectRatio)) + .def("setInterpolation", &ImageAlign::setInterpolation, + DOC(dai, node, ImageAlign, setInterpolation)) + .def("setNumShaves", &ImageAlign::setNumShaves, + DOC(dai, node, ImageAlign, setNumShaves)) + .def("setNumFramesPool", &ImageAlign::setNumFramesPool, + DOC(dai, node, ImageAlign, setNumFramesPool)); + daiNodeModule.attr("ImageAlign").attr("Properties") = properties; +} diff --git a/src/pipeline/node/NodeBindings.cpp b/src/pipeline/node/NodeBindings.cpp index 543a78a9f..cedf46669 100644 --- a/src/pipeline/node/NodeBindings.cpp +++ b/src/pipeline/node/NodeBindings.cpp @@ -119,6 +119,8 @@ void bind_tof(pybind11::module& m, void* pCallstack); void bind_pointcloud(pybind11::module& m, void* pCallstack); void bind_sync(pybind11::module& m, void* pCallstack); void bind_messagedemux(pybind11::module& m, void* pCallstack); +void bind_cast(pybind11::module& m, void* pCallstack); +void bind_imagealign(pybind11::module &m, void *pCallstack); void NodeBindings::addToCallstack(std::deque& callstack) { // Bind Node et al @@ -153,6 +155,8 @@ void NodeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_pointcloud); callstack.push_front(bind_sync); callstack.push_front(bind_messagedemux); + callstack.push_front(bind_cast); + callstack.push_front(bind_imagealign); } void NodeBindings::bind(pybind11::module& m, void* pCallstack){ diff --git a/src/pipeline/node/ToFBindings.cpp b/src/pipeline/node/ToFBindings.cpp index 21199a3fc..06b79348e 100644 --- a/src/pipeline/node/ToFBindings.cpp +++ b/src/pipeline/node/ToFBindings.cpp @@ -30,6 +30,8 @@ void bind_tof(pybind11::module& m, void* pCallstack){ // Properties tofProperties .def_readwrite("initialConfig", &ToFProperties::initialConfig, DOC(dai, ToFProperties, initialConfig)) + .def_readwrite("numFramesPool", &ToFProperties::numFramesPool, DOC(dai, ToFProperties, numFramesPool)) + .def_readwrite("numShaves", &ToFProperties::numShaves, DOC(dai, ToFProperties, numShaves)) ; // Node @@ -39,8 +41,11 @@ void bind_tof(pybind11::module& m, void* pCallstack){ .def_readonly("depth", &ToF::depth, DOC(dai, node, ToF, depth), DOC(dai, node, ToF, depth)) .def_readonly("amplitude", &ToF::amplitude, DOC(dai, node, ToF, amplitude), DOC(dai, node, ToF, amplitude)) .def_readonly("intensity", &ToF::intensity, DOC(dai, node, ToF, intensity), DOC(dai, node, ToF, intensity)) - .def_readonly("error", &ToF::error, DOC(dai, node, ToF, error), DOC(dai, node, ToF, error)) + .def_readonly("phase", &ToF::phase, DOC(dai, node, ToF, phase), DOC(dai, node, ToF, phase)) .def_readonly("initialConfig", &ToF::initialConfig, DOC(dai, node, ToF, initialConfig), DOC(dai, node, ToF, initialConfig)) + + .def("setNumShaves", &ToF::setNumShaves, DOC(dai, node, ToF, setNumShaves)) + .def("setNumFramesPool", &ToF::setNumFramesPool, DOC(dai, node, ToF, setNumFramesPool)) ; // ALIAS daiNodeModule.attr("ToF").attr("Properties") = tofProperties; diff --git a/utilities/cam_test.py b/utilities/cam_test.py index c12b79cec..58dda0579 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -92,6 +92,8 @@ def socket_type_pair(arg): help='Enable the RAW camera streams') parser.add_argument('-tofraw', '--tof-raw', action='store_true', help="Show just ToF raw output instead of post-processed depth") +parser.add_argument('-tofint', '--tof-intensity', action='store_true', + help="Show also ToF intensity output alongside depth") parser.add_argument('-tofamp', '--tof-amplitude', action='store_true', help="Show also ToF amplitude output alongside depth") parser.add_argument('-tofcm', '--tof-cm', action='store_true', @@ -285,8 +287,8 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: xout = {} xout_raw = {} xout_tof_amp = {} + xout_tof_int = {} streams = [] - tofConfig = {} yolo_passthrough_q_name = None for c in cam_list: print("CAM: ", c) @@ -303,27 +305,27 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: cam[c].raw.link(tof[c].input) tof[c].depth.link(xout[c].input) xinTofConfig.out.link(tof[c].inputConfig) - tofConfig = tof[c].initialConfig.get() - tofConfig.depthParams.freqModUsed = dai.RawToFConfig.DepthParams.TypeFMod.MIN - tofConfig.depthParams.avgPhaseShuffle = False - tofConfig.depthParams.minimumAmplitude = 3.0 - tof[c].initialConfig.set(tofConfig) - if args.tof_median == 0: - tofConfig.depthParams.median = dai.MedianFilter.MEDIAN_OFF + tof[c].initialConfig.setMedianFilter(dai.MedianFilter.MEDIAN_OFF) elif args.tof_median == 3: - tofConfig.depthParams.median = dai.MedianFilter.KERNEL_3x3 + tof[c].initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_3x3) elif args.tof_median == 5: - tofConfig.depthParams.median = dai.MedianFilter.KERNEL_5x5 + tof[c].initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_5x5) elif args.tof_median == 7: - tofConfig.depthParams.median = dai.MedianFilter.KERNEL_7x7 - tof[c].initialConfig.set(tofConfig) + tof[c].initialConfig.setMedianFilter(dai.MedianFilter.KERNEL_7x7) + tofConfig = tof[c].initialConfig.get() # TODO multiple instances if args.tof_amplitude: amp_name = 'tof_amplitude_' + c xout_tof_amp[c] = pipeline.create(dai.node.XLinkOut) xout_tof_amp[c].setStreamName(amp_name) streams.append(amp_name) tof[c].amplitude.link(xout_tof_amp[c].input) + if args.tof_intensity: + int_name = 'tof_intensity_' + c + xout_tof_int[c] = pipeline.create(dai.node.XLinkOut) + xout_tof_int[c].setStreamName(int_name) + streams.append(int_name) + tof[c].intensity.link(xout_tof_int[c].input) elif cam_type_thermal[c]: cam[c] = pipeline.create(dai.node.Camera) cam[c].setBoardSocket(cam_socket_opts[c]) @@ -467,6 +469,8 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: cam_name['raw_'+p.socket.name] = p.sensorName if args.tof_amplitude: cam_name['tof_amplitude_'+p.socket.name] = p.sensorName + if args.tof_intensity: + cam_name['tof_intensity_'+p.socket.name] = p.sensorName print('USB speed:', device.getUsbSpeed().name) @@ -578,14 +582,13 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: continue - if cam_type_tof.get(cam_skt, None) and not (c.startswith('raw_') or c.startswith('tof_amplitude_')): + if cam_type_tof.get(cam_skt, None) and not (c.startswith('raw_') or c.startswith('tof_amplitude_') or c.startswith('tof_intensity_')): if args.tof_cm: # pixels represent `cm`, capped to 255. Value can be checked hovering the mouse frame = (frame // 10).clip(0, 255).astype(np.uint8) else: - frame = (frame.view(np.int16).astype(float)) - frame = cv2.normalize( - frame, frame, alpha=255, beta=0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) + max_depth = (tofConfig.phaseUnwrappingLevel + 1) * 1874 # 80MHz modulation freq. TODO slider + frame = np.interp(frame, (0, max_depth), (0, 255)).astype(np.uint8) frame = cv2.applyColorMap(frame, jet_custom) elif cam_type_thermal[cam_skt] and c.startswith('cam'): frame = frame.astype(np.float32) @@ -614,7 +617,7 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: ) capture_list.remove(c) print() - if c.startswith('raw_') or c.startswith('tof_amplitude_'): + if c.startswith('raw_') or c.startswith('tof_amplitude_') or c.startswith('tof_intensity_'): if capture: filename = capture_file_info + '_10bit.bw' print('Saving:', filename) @@ -661,16 +664,6 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: elif key == ord('c'): capture_list = streams.copy() capture_time = time.strftime('%Y%m%d_%H%M%S') - elif key == ord('g') and tof: - f_mod = dai.RawToFConfig.DepthParams.TypeFMod.MAX if tofConfig.depthParams.freqModUsed == dai.RawToFConfig.DepthParams.TypeFMod.MIN else dai.RawToFConfig.DepthParams.TypeFMod.MIN - print("ToF toggling f_mod value to:", f_mod) - tofConfig.depthParams.freqModUsed = f_mod - tofCfgQueue.send(tofConfig) - elif key == ord('h') and tof: - tofConfig.depthParams.avgPhaseShuffle = not tofConfig.depthParams.avgPhaseShuffle - print("ToF toggling avgPhaseShuffle value to:", - tofConfig.depthParams.avgPhaseShuffle) - tofCfgQueue.send(tofConfig) elif key == ord('t'): print("Autofocus trigger (and disable continuous)") ctrl = dai.CameraControl() @@ -840,12 +833,6 @@ def socket_to_socket_opt(socket: dai.CameraBoardSocket) -> str: chroma_denoise = clamp(chroma_denoise + change, 0, 4) print("Chroma denoise:", chroma_denoise) ctrl.setChromaDenoise(chroma_denoise) - elif control == 'tof_amplitude_min' and tof: - amp_min = clamp( - tofConfig.depthParams.minimumAmplitude + change, 0, 50) - print("Setting min amplitude(confidence) to:", amp_min) - tofConfig.depthParams.minimumAmplitude = amp_min - tofCfgQueue.send(tofConfig) controlQueue.send(ctrl) print() diff --git a/utilities/stress_test.py b/utilities/stress_test.py index 0dc43320e..7d7a1e002 100644 --- a/utilities/stress_test.py +++ b/utilities/stress_test.py @@ -390,11 +390,6 @@ def build_pipeline(device: dai.Device, args) -> Tuple[dai.Pipeline, List[Tuple[s tof_xout = pipeline.createXLinkOut() tof_xout.setStreamName("tof") tof.depth.link(tof_xout.input) - tofConfig = tof.initialConfig.get() - tofConfig.depthParams.freqModUsed = dai.RawToFConfig.DepthParams.TypeFMod.MIN - tofConfig.depthParams.avgPhaseShuffle = False - tofConfig.depthParams.minimumAmplitude = 3.0 - tof.initialConfig.set(tofConfig) xlink_outs.append(("tof", 4)) continue # No video encoder and edge detector for TOF else: