diff --git a/CMakeLists.txt b/CMakeLists.txt index 2183df54f..4bdd118e6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -127,6 +127,8 @@ pybind11_add_module(${TARGET_NAME} src/pipeline/node/AprilTagBindings.cpp src/pipeline/node/DetectionParserBindings.cpp src/pipeline/node/WarpBindings.cpp + src/pipeline/node/UVCBindings.cpp + src/pipeline/node/ToFBindings.cpp src/pipeline/datatype/ADatatypeBindings.cpp src/pipeline/datatype/AprilTagConfigBindings.cpp @@ -135,6 +137,7 @@ pybind11_add_module(${TARGET_NAME} src/pipeline/datatype/CameraControlBindings.cpp src/pipeline/datatype/EdgeDetectorConfigBindings.cpp src/pipeline/datatype/FeatureTrackerConfigBindings.cpp + src/pipeline/datatype/ToFConfigBindings.cpp src/pipeline/datatype/ImageManipConfigBindings.cpp src/pipeline/datatype/ImgDetectionsBindings.cpp src/pipeline/datatype/ImgFrameBindings.cpp diff --git a/depthai-core b/depthai-core index 125feb8c2..82ab07d03 160000 --- a/depthai-core +++ b/depthai-core @@ -1 +1 @@ -Subproject commit 125feb8c2e16ee4bf71b7873a7b990f1c5f17b18 +Subproject commit 82ab07d037c02f56042d1d2d55a718f379651ed9 diff --git a/docs/source/components/nodes/color_camera.rst b/docs/source/components/nodes/color_camera.rst index 6323af090..d9718f8f7 100644 --- a/docs/source/components/nodes/color_camera.rst +++ b/docs/source/components/nodes/color_camera.rst @@ -87,7 +87,7 @@ Usage pipeline = dai.Pipeline() cam = pipeline.create(dai.node.ColorCamera) cam.setPreviewSize(300, 300) - cam.setBoardSocket(dai.CameraBoardSocket.RGB) + cam.setBoardSocket(dai.CameraBoardSocket.CAM_A) cam.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) cam.setInterleaved(False) cam.setColorOrder(dai.ColorCameraProperties.ColorOrder.RGB) @@ -97,7 +97,7 @@ Usage dai::Pipeline pipeline; auto cam = pipeline.create(); cam->setPreviewSize(300, 300); - cam->setBoardSocket(dai::CameraBoardSocket::RGB); + cam->setBoardSocket(dai::CameraBoardSocket::CAM_A); cam->setResolution(dai::ColorCameraProperties::SensorResolution::THE_1080_P); cam->setInterleaved(false); cam->setColorOrder(dai::ColorCameraProperties::ColorOrder::RGB); diff --git a/docs/source/components/nodes/mono_camera.rst b/docs/source/components/nodes/mono_camera.rst index 3995de685..e5b278337 100644 --- a/docs/source/components/nodes/mono_camera.rst +++ b/docs/source/components/nodes/mono_camera.rst @@ -48,14 +48,14 @@ Usage pipeline = dai.Pipeline() mono = pipeline.create(dai.node.MonoCamera) - mono.setBoardSocket(dai.CameraBoardSocket.RIGHT) + mono.setCamera("right") mono.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) .. code-tab:: c++ dai::Pipeline pipeline; auto mono = pipeline.create(); - mono->setBoardSocket(dai::CameraBoardSocket::RIGHT); + mono->setCamera("right"); mono->setResolution(dai::MonoCameraProperties::SensorResolution::THE_720_P); Examples of functionality diff --git a/docs/source/components/nodes/stereo_depth.rst b/docs/source/components/nodes/stereo_depth.rst index 87d2d3041..6947eaec5 100644 --- a/docs/source/components/nodes/stereo_depth.rst +++ b/docs/source/components/nodes/stereo_depth.rst @@ -288,6 +288,221 @@ as: For the final disparity map, a filtering is applied based on the confidence threshold value: the pixels that have their confidence score larger than the threshold get invalidated, i.e. their disparity value is set to zero. You can set the confidence threshold with :code:`stereo.initialConfig.setConfidenceThreshold()`. +Calculate depth using disparity map +=================================== + +Disparity and depth are inversely related. As disparity decreases, depth increases exponentially depending on baseline and focal length. Meaning, if the disparity value is close to zero, then a small change in disparity generates a large change in depth. Similarly, if the disparity value is big, then large changes in disparity do not lead to a large change in depth. + +By considering this fact, depth can be calculated using this formula: + +.. code-block:: python + + depth = focal_length_in_pixels * baseline / disparity_in_pixels + +Where baseline is the distance between two mono cameras. Note the unit used for baseline and depth is the same. + +To get focal length in pixels, you can :ref:`read camera calibration `, as focal length in pixels is +written in camera intrinsics (``intrinsics[0][0]``): + +.. code-block:: python + + import depthai as dai + + with dai.Device() as device: + calibData = device.readCalibration() + intrinsics = calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C) + print('Right mono camera focal length in pixels:', intrinsics[0][0]) + +Here's theoretical calculation of the focal length in pixels: + +.. code-block:: python + + focal_length_in_pixels = image_width_in_pixels * 0.5 / tan(HFOV * 0.5 * PI/180) + + # With 400P mono camera resolution where HFOV=71.9 degrees + focal_length_in_pixels = 640 * 0.5 / tan(71.9 * 0.5 * PI / 180) = 441.25 + + # With 800P mono camera resolution where HFOV=71.9 degrees + focal_length_in_pixels = 1280 * 0.5 / tan(71.9 * 0.5 * PI / 180) = 882.5 + +Examples for calculating the depth value, using the OAK-D (7.5cm baseline): + +.. code-block:: python + + # For OAK-D @ 400P mono cameras and disparity of eg. 50 pixels + depth = 441.25 * 7.5 / 50 = 66.19 # cm + + # For OAK-D @ 800P mono cameras and disparity of eg. 10 pixels + depth = 882.5 * 7.5 / 10 = 661.88 # cm + +Note the value of disparity depth data is stored in :code:`uint16`, where 0 is a special value, meaning that distance is unknown. + +Min stereo depth distance +========================= + +If the depth results for close-in objects look weird, this is likely because they are below the minimum depth-perception distance of the device. + +To calculate this minimum distance, use the :ref:`depth formula ` and choose the maximum value for disparity_in_pixels parameter (keep in mind it is inveresly related, so maximum value will yield the smallest result). + +For example OAK-D has a baseline of **7.5cm**, focal_length_in_pixels of **882.5 pixels** and the default maximum value for disparity_in_pixels is **95**. By using the :ref:`depth formula ` we get: + +.. code-block:: python + + min_distance = focal_length_in_pixels * baseline / disparity_in_pixels = 882.5 * 7.5cm / 95 = 69.67cm + +or roughly 70cm. + +However this distance can be cut in 1/2 (to around 35cm for the OAK-D) with the following options: + +1. Changing the resolution to 640x400, instead of the standard 1280x800. + +2. Enabling Extended Disparity. + +Extended Disparity mode increases the levels of disparity to 191 from the standard 96 pixels, thereby 1/2-ing the minimum depth. It does so by computing the 96-pixel disparities on the original 1280x720 and on the downscaled 640x360 image, which are then merged to a 191-level disparity. For more information see the Extended Disparity tab in :ref:`this table `. + +Using the previous OAK-D example, disparity_in_pixels now becomes **190** and the minimum distance is: + +.. code-block:: python + + min_distance = focal_length_in_pixels * baseline / disparity_in_pixels = 882.5 * 7.5cm / 190 = 34.84cm + +or roughly 35cm. + +.. note:: + + Applying both of those options is possible, which would set the minimum depth to 1/4 of the standard settings, but at such short distances the minimum depth is limited by focal length, which is 19.6cm, since OAK-D mono cameras have fixed focus distance: 19.6cm - infinity. + +See `these examples `__ for how to enable Extended Disparity. + +Disparity shift to lower min depth perception +--------------------------------------------- + +Another option to perceive closer depth range is to use disparity shift. Disparity shift will shift the starting point +of the disparity search, which will significantly decrease max depth (MazZ) perception, but it will also decrease min depth (MinZ) perception. +Disparity shift can be combined with extended/subpixel/LR-check modes. + +.. image:: https://user-images.githubusercontent.com/18037362/189375017-2fa137d2-ad6b-46de-8899-6304bbc6c9d7.png + +**Left graph** shows min and max disparity and depth for OAK-D (7.5cm baseline, 800P resolution, ~70° HFOV) by default (disparity shift=0). See :ref:`Calculate depth using disparity map`. +Since hardware (stereo block) has a fixed 95 pixel disparity search, DepthAI will search from 0 pixels (depth=INF) to 95 pixels (depth=71cm). + +**Right graph** shows the same, but at disparity shift set to 30 pixels. This means that disparity search will be from 30 pixels (depth=2.2m) to 125 pixels (depth=50cm). +This also means that depth will be very accurate at the short range (**theoretically** below 5mm depth error). + +**Limitations**: + +- Because of the inverse relationship between disparity and depth, MaxZ will decrease much faster than MinZ as the disparity shift is increased. Therefore, it is **advised not to use a larger than necessary disparity shift**. +- Tradeoff in reducing the MinZ this way is that objects at **distances farther away than MaxZ will not be seen**. +- Because of the point above, **we only recommend using disparity shift when MaxZ is known**, such as having a depth camera mounted above a table pointing down at the table surface. +- Output disparity map is not expanded, only the depth map. So if disparity shift is set to 50, and disparity value obtained is 90, the real disparity is 140. + +**Compared to Extended disparity**, disparity shift: + +- **(+)** Is faster, as it doesn't require an extra computation, which means there's also no extra latency +- **(-)** Reduces the MaxZ (significantly), while extended disparity only reduces MinZ. + +Disparity shift can be combined with extended disparity. + +.. doxygenfunction:: dai::StereoDepthConfig::setDisparityShift + :project: depthai-core + :no-link: + +Max stereo depth distance +========================= + +The maximum depth perception distance depends on the :ref:`accuracy of the depth perception `. The formula used to calculate this distance is an approximation, but is as follows: + +.. code-block:: python + + Dm = (baseline/2) * tan((90 - HFOV / HPixels)*pi/180) + +So using this formula for existing models the *theoretical* max distance is: + +.. code-block:: python + + # For OAK-D (7.5cm baseline) + Dm = (7.5/2) * tan((90 - 71.9/1280)*pi/180) = 3825.03cm = 38.25 meters + + # For OAK-D-CM4 (9cm baseline) + Dm = (9/2) * tan((90 - 71.9/1280)*pi/180) = 4590.04cm = 45.9 meters + +If greater precision for long range measurements is required, consider enabling Subpixel Disparity or using a larger baseline distance between mono cameras. For a custom baseline, you could consider using `OAK-FFC `__ device or design your own baseboard PCB with required baseline. For more information see Subpixel Disparity under the Stereo Mode tab in :ref:`this table `. + +Depth perception accuracy +========================= + +Disparity depth works by matching features from one image to the other and its accuracy is based on multiple parameters: + +* Texture of objects / backgrounds + +Backgrounds may interfere with the object detection, since backgrounds are objects too, which will make depth perception less accurate. So disparity depth works very well outdoors as there are very rarely perfectly-clean/blank surfaces there - but these are relatively commonplace indoors (in clean buildings at least). + +* Lighting + +If the illumination is low, the diparity map will be of low confidence, which will result in a noisy depth map. + +* Baseline / distance to objects + +Lower baseline enables us to detect the depth at a closer distance as long as the object is visible in both the frames. However, this reduces the accuracy for large distances due to less pixels representing the object and disparity decreasing towards 0 much faster. +So the common norm is to adjust the baseline according to how far/close we want to be able to detect objects. + +Limitation +========== + +Since depth is calculated from disparity, which requires the pixels to overlap, there is inherently a vertical +band on the left side of the left mono camera and on the right side of the right mono camera, where depth +cannot be calculated, since it is seen by only 1 camera. That band is marked with :code:`B` +on the following picture. + +.. image:: https://user-images.githubusercontent.com/59799831/135310921-67726c28-07e7-4ffa-bc8d-74861049517e.png + +Meaning of variables on the picture: + +- ``BL [cm]`` - Baseline of stereo cameras. +- ``Dv [cm]`` - Minimum distace where both cameras see an object (thus where depth can be calculated). +- ``B [pixels]`` - Width of the band where depth cannot be calculated. +- ``W [pixels]`` - Width of mono in pixels camera or amount of horizontal pixels, also noted as :code:`HPixels` in other formulas. +- ``D [cm]`` - Distance from the **camera plane** to an object (see image :ref:`here `). +- ``F [cm]`` - Width of image at the distance ``D``. + +.. image:: https://user-images.githubusercontent.com/59799831/135310972-c37ba40b-20ad-4967-92a7-c71078bcef99.png + +With the use of the :code:`tan` function, the following formulas can be obtained: + +- :code:`F = 2 * D * tan(HFOV/2)` +- :code:`Dv = (BL/2) * tan(90 - HFOV/2)` + +In order to obtain :code:`B`, we can use :code:`tan` function again (same as for :code:`F`), but this time +we must also multiply it by the ratio between :code:`W` and :code:`F` in order to convert units to pixels. +That gives the following formula: + +.. code-block:: python + + B = 2 * Dv * tan(HFOV/2) * W / F + B = 2 * Dv * tan(HFOV/2) * W / (2 * D * tan(HFOV/2)) + B = W * Dv / D # pixels + +Example: If we are using OAK-D, which has a :code:`HFOV` of 72°, a baseline (:code:`BL`) of 7.5 cm and +:code:`640x400 (400P)` resolution is used, therefore :code:`W = 640` and an object is :code:`D = 100` cm away, we can +calculate :code:`B` in the following way: + +.. code-block:: + + Dv = 7.5 / 2 * tan(90 - 72/2) = 3.75 * tan(54°) = 5.16 cm + B = 640 * 5.16 / 100 = 33 # pixels + +Credit for calculations and images goes to our community member gregflurry, which he made on +`this `__ +forum post. + +.. note:: + + OAK-D-PRO will include both IR dot projector and IR LED, which will enable operation in no light. + IR LED is used to illuminate the whole area (for mono/color frames), while IR dot projector is mostly + for accurate disparity matching - to have good quality depth maps on blank surfaces as well. For outdoors, + the IR laser dot projector is only relevant at night. For more information see the development progress + `here `__. + Measuring real-world object dimensions ====================================== diff --git a/docs/source/samples/host_side/device_information.rst b/docs/source/samples/host_side/device_information.rst index 265c5c7f7..d19302bef 100644 --- a/docs/source/samples/host_side/device_information.rst +++ b/docs/source/samples/host_side/device_information.rst @@ -24,7 +24,7 @@ Demo Found device '192.168.33.192', MxId: '1844301011F4C51200', State: 'BOOTLOADER' Booting the first available camera (1.3)... - Available camera sensors: {: 'OV9282', : 'IMX378', : 'OV9282'} + Available camera sensors: {: 'OV9282', : 'IMX378', : 'OV9282'} Product name: OAK-D Pro AF, board name DM9098 diff --git a/examples/AprilTag/apriltag.py b/examples/AprilTag/apriltag.py index 2141ca132..bb41d5e70 100755 --- a/examples/AprilTag/apriltag.py +++ b/examples/AprilTag/apriltag.py @@ -19,7 +19,7 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") aprilTag.initialConfig.setFamily(dai.AprilTagConfig.Family.TAG_36H11) diff --git a/examples/AprilTag/apriltag_rgb.py b/examples/AprilTag/apriltag_rgb.py index 5d2858749..fa4f5ad31 100755 --- a/examples/AprilTag/apriltag_rgb.py +++ b/examples/AprilTag/apriltag_rgb.py @@ -20,7 +20,7 @@ # Properties camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) manip.initialConfig.setResize(480, 270) manip.initialConfig.setFrameType(dai.ImgFrame.Type.GRAY8) diff --git a/examples/ColorCamera/rgb_isp_scale.py b/examples/ColorCamera/rgb_isp_scale.py index 78f5370a8..fd8d725ea 100755 --- a/examples/ColorCamera/rgb_isp_scale.py +++ b/examples/ColorCamera/rgb_isp_scale.py @@ -13,7 +13,7 @@ xoutVideo.setStreamName("video") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) camRgb.setIspScale(1, 2) camRgb.setVideoSize(1920, 1080) diff --git a/examples/ColorCamera/rgb_undistort.py b/examples/ColorCamera/rgb_undistort.py index 5111bbca6..97e71b184 100755 --- a/examples/ColorCamera/rgb_undistort.py +++ b/examples/ColorCamera/rgb_undistort.py @@ -3,7 +3,7 @@ import numpy as np camRes = dai.ColorCameraProperties.SensorResolution.THE_1080_P -camSocket = dai.CameraBoardSocket.RGB +camSocket = dai.CameraBoardSocket.CAM_A ispScale = (1,2) def getMesh(calibData, ispSize): diff --git a/examples/ColorCamera/rgb_uvc.py b/examples/ColorCamera/rgb_uvc.py new file mode 100755 index 000000000..78a27129b --- /dev/null +++ b/examples/ColorCamera/rgb_uvc.py @@ -0,0 +1,98 @@ +#!/usr/bin/env python3 + +import time +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-fb', '--flash-bootloader', default=False, action="store_true") +parser.add_argument('-f', '--flash-app', default=False, action="store_true") +parser.add_argument('-l', '--load-and-exit', default=False, action="store_true") +args = parser.parse_args() + +if args.load_and_exit: + import os + # Disabling device watchdog, so it doesn't need the host to ping periodically. + # Note: this is done before importing `depthai` + os.environ["DEPTHAI_WATCHDOG"] = "0" + +import depthai as dai + +def getPipeline(): + enable_4k = True # Will downscale 4K -> 1080p + + pipeline = dai.Pipeline() + + # Define a source - color camera + cam_rgb = pipeline.createColorCamera() + cam_rgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) + cam_rgb.setInterleaved(False) + #cam_rgb.initialControl.setManualFocus(130) + + if enable_4k: + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) + cam_rgb.setIspScale(1, 2) + else: + cam_rgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) + + # Create an UVC (USB Video Class) output node + uvc = pipeline.createUVC() + cam_rgb.video.link(uvc.input) + + # Note: if the pipeline is sent later to device (using startPipeline()), + # it is important to pass the device config separately when creating the device + config = dai.Device.Config() + # config.board.uvc = dai.BoardConfig.UVC() # enable default 1920x1080 NV12 + config.board.uvc = dai.BoardConfig.UVC(1920, 1080) + config.board.uvc.frameType = dai.ImgFrame.Type.NV12 + # config.board.uvc.cameraName = "My Custom Cam" + pipeline.setBoardConfig(config.board) + + return pipeline + +# Will flash the bootloader if no pipeline is provided as argument +def flash(pipeline=None): + (f, bl) = dai.DeviceBootloader.getFirstAvailableDevice() + bootloader = dai.DeviceBootloader(bl, True) + + # Create a progress callback lambda + progress = lambda p : print(f'Flashing progress: {p*100:.1f}%') + + startTime = time.monotonic() + if pipeline is None: + print("Flashing bootloader...") + bootloader.flashBootloader(progress) + else: + print("Flashing application pipeline...") + bootloader.flash(progress, pipeline) + + elapsedTime = round(time.monotonic() - startTime, 2) + print("Done in", elapsedTime, "seconds") + +if args.flash_bootloader or args.flash_app: + if args.flash_bootloader: flash() + if args.flash_app: flash(getPipeline()) + print("Flashing successful. Please power-cycle the device") + quit() + +if args.load_and_exit: + device = dai.Device(getPipeline()) + print("\nDevice started. Attempting to force-terminate this process...") + print("Open an UVC viewer to check the camera stream.") + print("To reconnect with depthai, a device power-cycle may be required in some cases") + # We do not want the device to be closed, so terminate the process uncleanly. + # (TODO add depthai API to be able to cleanly exit without closing device) + import signal + os.kill(os.getpid(), signal.SIGTERM) + +# Standard UVC load with depthai +with dai.Device(getPipeline()) as device: + print("\nDevice started, please keep this process running") + print("and open an UVC viewer to check the camera stream.") + print("\nTo close: Ctrl+C") + + # Doing nothing here, just keeping the host feeding the watchdog + while True: + try: + time.sleep(0.1) + except KeyboardInterrupt: + break diff --git a/examples/ColorCamera/rgb_video.py b/examples/ColorCamera/rgb_video.py index c808e333b..66a32d2ae 100755 --- a/examples/ColorCamera/rgb_video.py +++ b/examples/ColorCamera/rgb_video.py @@ -13,7 +13,7 @@ xoutVideo.setStreamName("video") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setVideoSize(1920, 1080) diff --git a/examples/EdgeDetector/edge_detector.py b/examples/EdgeDetector/edge_detector.py index db699e404..b23dfd1a3 100755 --- a/examples/EdgeDetector/edge_detector.py +++ b/examples/EdgeDetector/edge_detector.py @@ -32,13 +32,13 @@ xinEdgeCfg.setStreamName(edgeCfgStr) # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") edgeDetectorRgb.setMaxOutputFrameSize(camRgb.getVideoWidth() * camRgb.getVideoHeight()) diff --git a/examples/FeatureTracker/feature_detector.py b/examples/FeatureTracker/feature_detector.py index c9a5642cc..ebad14411 100755 --- a/examples/FeatureTracker/feature_detector.py +++ b/examples/FeatureTracker/feature_detector.py @@ -27,9 +27,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Disable optical flow featureTrackerLeft.initialConfig.setMotionEstimator(False) diff --git a/examples/FeatureTracker/feature_tracker.py b/examples/FeatureTracker/feature_tracker.py index 61ddf4ce7..4be337e91 100755 --- a/examples/FeatureTracker/feature_tracker.py +++ b/examples/FeatureTracker/feature_tracker.py @@ -94,9 +94,9 @@ def __init__(self, trackbarName, windowName): # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Linking monoLeft.out.link(featureTrackerLeft.inputImage) diff --git a/examples/ImageManip/image_manip_rotate.py b/examples/ImageManip/image_manip_rotate.py index f986919e4..37b09733d 100755 --- a/examples/ImageManip/image_manip_rotate.py +++ b/examples/ImageManip/image_manip_rotate.py @@ -27,7 +27,7 @@ # Rotate mono frames monoLeft = pipeline.create(dai.node.MonoCamera) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") manipLeft = pipeline.create(dai.node.ImageManip) rr = dai.RotatedRect() diff --git a/examples/MobileNet/mono_mobilenet.py b/examples/MobileNet/mono_mobilenet.py index 635dfc6af..49ccae728 100755 --- a/examples/MobileNet/mono_mobilenet.py +++ b/examples/MobileNet/mono_mobilenet.py @@ -33,7 +33,7 @@ nnOut.setStreamName("nn") # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Convert the grayscale frame into the nn-acceptable form diff --git a/examples/MonoCamera/mono_camera_control.py b/examples/MonoCamera/mono_camera_control.py index 059ae8c66..b70c59441 100755 --- a/examples/MonoCamera/mono_camera_control.py +++ b/examples/MonoCamera/mono_camera_control.py @@ -48,8 +48,8 @@ def clamp(num, v0, v1): bottomRight = dai.Point2f(0.8, 0.8) # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoRight.setCamera("right") +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) manipRight.initialConfig.setCropRect(topLeft.x, topLeft.y, bottomRight.x, bottomRight.y) diff --git a/examples/MonoCamera/mono_full_resolution_saver.py b/examples/MonoCamera/mono_full_resolution_saver.py index 42a489361..06e263418 100755 --- a/examples/MonoCamera/mono_full_resolution_saver.py +++ b/examples/MonoCamera/mono_full_resolution_saver.py @@ -15,7 +15,7 @@ xoutRight.setStreamName("right") # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Linking diff --git a/examples/MonoCamera/mono_preview.py b/examples/MonoCamera/mono_preview.py index 204164609..2a7936d5b 100755 --- a/examples/MonoCamera/mono_preview.py +++ b/examples/MonoCamera/mono_preview.py @@ -16,9 +16,9 @@ xoutRight.setStreamName('right') # Properties -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) # Linking diff --git a/examples/MonoCamera/mono_preview_alternate_pro.py b/examples/MonoCamera/mono_preview_alternate_pro.py index 826d5d927..6f5997150 100755 --- a/examples/MonoCamera/mono_preview_alternate_pro.py +++ b/examples/MonoCamera/mono_preview_alternate_pro.py @@ -19,11 +19,11 @@ monoL = pipeline.create(dai.node.MonoCamera) monoR = pipeline.create(dai.node.MonoCamera) -monoL.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoL.setCamera("left") monoL.setResolution(res) monoL.setFps(fps) monoL.setNumFramesPool(poolSize) -monoR.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoR.setCamera("right") monoR.setResolution(res) monoR.setFps(fps) monoR.setNumFramesPool(poolSize) diff --git a/examples/NeuralNetwork/concat_multi_input.py b/examples/NeuralNetwork/concat_multi_input.py index 93e9d03d0..08b65a5f7 100755 --- a/examples/NeuralNetwork/concat_multi_input.py +++ b/examples/NeuralNetwork/concat_multi_input.py @@ -42,8 +42,8 @@ def create_mono(p, socket): nn.setNumInferenceThreads(2) camRgb.preview.link(nn.inputs['img2']) -create_mono(p, dai.CameraBoardSocket.LEFT).link(nn.inputs['img1']) -create_mono(p, dai.CameraBoardSocket.RIGHT).link(nn.inputs['img3']) +create_mono(p, dai.CameraBoardSocket.CAM_B).link(nn.inputs['img1']) +create_mono(p, dai.CameraBoardSocket.CAM_C).link(nn.inputs['img3']) # Send bouding box from the NN to the host via XLink nn_xout = p.createXLinkOut() diff --git a/examples/ObjectTracker/spatial_object_tracker.py b/examples/ObjectTracker/spatial_object_tracker.py index a181563cc..09053b09b 100755 --- a/examples/ObjectTracker/spatial_object_tracker.py +++ b/examples/ObjectTracker/spatial_object_tracker.py @@ -43,14 +43,14 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) spatialDetectionNetwork.setBlobPath(args.nnPath) diff --git a/examples/Script/script_change_pipeline_flow.py b/examples/Script/script_change_pipeline_flow.py index 3774faf50..b528b99d7 100755 --- a/examples/Script/script_change_pipeline_flow.py +++ b/examples/Script/script_change_pipeline_flow.py @@ -10,7 +10,7 @@ pipeline = dai.Pipeline() cam = pipeline.createColorCamera() -cam.setBoardSocket(dai.CameraBoardSocket.RGB) +cam.setBoardSocket(dai.CameraBoardSocket.CAM_A) cam.setInterleaved(False) cam.setIspScale(2,3) cam.setVideoSize(720,720) diff --git a/examples/SpatialDetection/spatial_calculator_multi_roi.py b/examples/SpatialDetection/spatial_calculator_multi_roi.py index 4752f5bfd..24d29020f 100755 --- a/examples/SpatialDetection/spatial_calculator_multi_roi.py +++ b/examples/SpatialDetection/spatial_calculator_multi_roi.py @@ -24,9 +24,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) stereo.setLeftRightCheck(True) diff --git a/examples/SpatialDetection/spatial_location_calculator.py b/examples/SpatialDetection/spatial_location_calculator.py index 4f2779dfd..2e35e409f 100755 --- a/examples/SpatialDetection/spatial_location_calculator.py +++ b/examples/SpatialDetection/spatial_location_calculator.py @@ -26,9 +26,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) stereo.setLeftRightCheck(True) diff --git a/examples/SpatialDetection/spatial_mobilenet.py b/examples/SpatialDetection/spatial_mobilenet.py index b67757b44..42643b207 100755 --- a/examples/SpatialDetection/spatial_mobilenet.py +++ b/examples/SpatialDetection/spatial_mobilenet.py @@ -52,14 +52,14 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setSubpixel(True) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) diff --git a/examples/SpatialDetection/spatial_mobilenet_mono.py b/examples/SpatialDetection/spatial_mobilenet_mono.py index f6c265071..8c7634a72 100755 --- a/examples/SpatialDetection/spatial_mobilenet_mono.py +++ b/examples/SpatialDetection/spatial_mobilenet_mono.py @@ -53,9 +53,9 @@ imageManip.initialConfig.setFrameType(dai.ImgFrame.Type.BGR888p) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # StereoDepth stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/SpatialDetection/spatial_tiny_yolo.py b/examples/SpatialDetection/spatial_tiny_yolo.py index 0158f2b9e..6350ca9f1 100755 --- a/examples/SpatialDetection/spatial_tiny_yolo.py +++ b/examples/SpatialDetection/spatial_tiny_yolo.py @@ -75,14 +75,14 @@ camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) stereo.setSubpixel(True) diff --git a/examples/StereoDepth/depth_colormap.py b/examples/StereoDepth/depth_colormap.py index 05d86f85c..26de44431 100755 --- a/examples/StereoDepth/depth_colormap.py +++ b/examples/StereoDepth/depth_colormap.py @@ -24,9 +24,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/depth_crop_control.py b/examples/StereoDepth/depth_crop_control.py index a9a2c7a69..70dcb5a11 100755 --- a/examples/StereoDepth/depth_crop_control.py +++ b/examples/StereoDepth/depth_crop_control.py @@ -32,8 +32,8 @@ bottomRight = dai.Point2f(0.8, 0.8) # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoRight.setCamera("right") +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) diff --git a/examples/StereoDepth/depth_post_processing.py b/examples/StereoDepth/depth_post_processing.py index b1844b1ca..732e91232 100755 --- a/examples/StereoDepth/depth_post_processing.py +++ b/examples/StereoDepth/depth_post_processing.py @@ -24,9 +24,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/depth_preview.py b/examples/StereoDepth/depth_preview.py index 017d3a403..b916ecb55 100755 --- a/examples/StereoDepth/depth_preview.py +++ b/examples/StereoDepth/depth_preview.py @@ -24,9 +24,9 @@ # Properties monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map (using disparity output as it's easier to visualize depth this way) depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/depth_preview_lr.py b/examples/StereoDepth/depth_preview_lr.py index 5cc37c063..8bf6fff6d 100755 --- a/examples/StereoDepth/depth_preview_lr.py +++ b/examples/StereoDepth/depth_preview_lr.py @@ -51,7 +51,7 @@ # Properties left.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setIspScale(2, 3) center.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) @@ -59,7 +59,7 @@ center.setIspScale(2, 3) right.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1200_P) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setIspScale(2, 3) LC_depth.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) diff --git a/examples/StereoDepth/rgb_depth_aligned.py b/examples/StereoDepth/rgb_depth_aligned.py index c76dd7b79..3e72fc023 100755 --- a/examples/StereoDepth/rgb_depth_aligned.py +++ b/examples/StereoDepth/rgb_depth_aligned.py @@ -48,7 +48,7 @@ def updateBlendWeights(percent_rgb): queueNames.append("disp") #Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setFps(fps) if downscaleColor: camRgb.setIspScale(2, 3) @@ -56,22 +56,22 @@ def updateBlendWeights(percent_rgb): # This value was used during calibration try: calibData = device.readCalibration2() - lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB) + lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.CAM_A) if lensPosition: camRgb.initialControl.setManualFocus(lensPosition) except: raise left.setResolution(monoResolution) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setFps(fps) right.setResolution(monoResolution) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setFps(fps) stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # LR-check is required for depth alignment stereo.setLeftRightCheck(True) -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) # Linking camRgb.isp.link(rgbOut.input) diff --git a/examples/StereoDepth/rgb_depth_confidence_aligned.py b/examples/StereoDepth/rgb_depth_confidence_aligned.py index e1487c9ac..226707a81 100755 --- a/examples/StereoDepth/rgb_depth_confidence_aligned.py +++ b/examples/StereoDepth/rgb_depth_confidence_aligned.py @@ -68,7 +68,7 @@ def updateConfBlendWeights(percent): queueNames.append("disp") #Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setFps(fps) if downscaleColor: camRgb.setIspScale(2, 3) @@ -76,24 +76,24 @@ def updateConfBlendWeights(percent): # This value was used during calibration try: calibData = device.readCalibration2() - lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.RGB) + lensPosition = calibData.getLensPosition(dai.CameraBoardSocket.CAM_A) if lensPosition: camRgb.initialControl.setManualFocus(lensPosition) except: raise left.setResolution(monoResolution) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setFps(fps) right.setResolution(monoResolution) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setFps(fps) stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # LR-check is required for depth alignment stereo.setLeftRightCheck(True) if 0: stereo.setSubpixel(True) # TODO enable for test -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) xoutConfMap = pipeline.create(dai.node.XLinkOut) xoutConfMap.setStreamName('confidence_map') diff --git a/examples/StereoDepth/stereo_depth_from_host.py b/examples/StereoDepth/stereo_depth_from_host.py index d6082df2e..bf5328785 100755 --- a/examples/StereoDepth/stereo_depth_from_host.py +++ b/examples/StereoDepth/stereo_depth_from_host.py @@ -281,7 +281,7 @@ def trackbarDisparityShift(value): for tr in StereoConfigHandler.trDisparityShift: tr.set(value) - def trackbarCenterAlignmentShift(value): + def trackbarCenterAlignmentShift(value): if StereoConfigHandler.config.algorithmControl.depthAlign != dai.StereoDepthConfig.AlgorithmControl.DepthAlign.CENTER: print("Center alignment shift factor requires CENTER alignment enabled!") return @@ -641,7 +641,7 @@ def convertToCv2Frame(name, image, config): stereoDepthConfigInQueue = device.getInputQueue("stereoDepthConfig") inStreams = ['in_left', 'in_right'] - inStreamsCameraID = [dai.CameraBoardSocket.LEFT, dai.CameraBoardSocket.RIGHT] + inStreamsCameraID = [dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_C] in_q_list = [] for s in inStreams: q = device.getInputQueue(s) diff --git a/examples/StereoDepth/stereo_depth_video.py b/examples/StereoDepth/stereo_depth_video.py index 3289c0202..2c59d65dd 100755 --- a/examples/StereoDepth/stereo_depth_video.py +++ b/examples/StereoDepth/stereo_depth_video.py @@ -126,11 +126,11 @@ def getMesh(calibData): - M1 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.LEFT, resolution[0], resolution[1])) - d1 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.LEFT)) + M1 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_B, resolution[0], resolution[1])) + d1 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_B)) R1 = np.array(calibData.getStereoLeftRectificationRotation()) - M2 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, resolution[0], resolution[1])) - d2 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.RIGHT)) + M2 = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, resolution[0], resolution[1])) + d2 = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_C)) R2 = np.array(calibData.getStereoRightRectificationRotation()) mapXL, mapYL = cv2.initUndistortRectifyMap(M1, d1, R1, M2, resolution, cv2.CV_32FC1) mapXR, mapYR = cv2.initUndistortRectifyMap(M2, d2, R2, M2, resolution, cv2.CV_32FC1) @@ -209,11 +209,11 @@ def getDisparityFrame(frame, cvColorMap): xoutRectifRight = pipeline.create(dai.node.XLinkOut) if args.swap_left_right: - camLeft.setBoardSocket(dai.CameraBoardSocket.RIGHT) - camRight.setBoardSocket(dai.CameraBoardSocket.LEFT) + camLeft.setCamera("right") + camRight.setCamera("left") else: - camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) - camRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) + camLeft.setCamera("left") + camRight.setCamera("right") for monoCam in (camLeft, camRight): # Common config monoCam.setResolution(resolution['res']) diff --git a/examples/VideoEncoder/disparity_colormap_encoding.py b/examples/VideoEncoder/disparity_colormap_encoding.py index 09d602b10..c0d2c8ce1 100755 --- a/examples/VideoEncoder/disparity_colormap_encoding.py +++ b/examples/VideoEncoder/disparity_colormap_encoding.py @@ -8,11 +8,11 @@ # Create left/right mono cameras for Stereo depth monoLeft = pipeline.create(dai.node.MonoCamera) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight = pipeline.create(dai.node.MonoCamera) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map depth = pipeline.create(dai.node.StereoDepth) diff --git a/examples/VideoEncoder/disparity_encoding.py b/examples/VideoEncoder/disparity_encoding.py index 951e1c1b8..1e9483d4f 100755 --- a/examples/VideoEncoder/disparity_encoding.py +++ b/examples/VideoEncoder/disparity_encoding.py @@ -10,11 +10,11 @@ # Create left/right mono cameras for Stereo depth monoLeft = pipeline.create(dai.node.MonoCamera) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight = pipeline.create(dai.node.MonoCamera) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Create a node that will produce the depth map depth = pipeline.create(dai.node.StereoDepth) diff --git a/examples/VideoEncoder/encoding_max_limit.py b/examples/VideoEncoder/encoding_max_limit.py index b01f430ed..94c0b555d 100755 --- a/examples/VideoEncoder/encoding_max_limit.py +++ b/examples/VideoEncoder/encoding_max_limit.py @@ -22,10 +22,10 @@ ve3Out.setStreamName('ve3Out') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoLeft.setCamera("left") +monoRight.setCamera("right") # Setting to 26fps will trigger error ve1.setDefaultProfilePreset(25, dai.VideoEncoderProperties.Profile.H264_MAIN) diff --git a/examples/VideoEncoder/rgb_encoding.py b/examples/VideoEncoder/rgb_encoding.py index 45b51f991..3d5eadcdd 100755 --- a/examples/VideoEncoder/rgb_encoding.py +++ b/examples/VideoEncoder/rgb_encoding.py @@ -13,7 +13,7 @@ xout.setStreamName('h265') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) videoEnc.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/VideoEncoder/rgb_full_resolution_saver.py b/examples/VideoEncoder/rgb_full_resolution_saver.py index 381784f46..d2c06640a 100755 --- a/examples/VideoEncoder/rgb_full_resolution_saver.py +++ b/examples/VideoEncoder/rgb_full_resolution_saver.py @@ -9,7 +9,7 @@ pipeline = dai.Pipeline() camRgb = pipeline.create(dai.node.ColorCamera) -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_4_K) xoutRgb = pipeline.create(dai.node.XLinkOut) @@ -56,7 +56,7 @@ with open(fName, "wb") as f: f.write(qStill.get().getData()) print('Image saved to', fName) - + key = cv2.waitKey(1) if key == ord('q'): break diff --git a/examples/VideoEncoder/rgb_mono_encoding.py b/examples/VideoEncoder/rgb_mono_encoding.py index 09ec1c036..5aeffb92e 100755 --- a/examples/VideoEncoder/rgb_mono_encoding.py +++ b/examples/VideoEncoder/rgb_mono_encoding.py @@ -22,9 +22,9 @@ ve3Out.setStreamName('ve3Out') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) +monoLeft.setCamera("left") +monoRight.setCamera("right") # Create encoders, one for each camera, consuming the frames and encoding them using H.264 / H.265 encoding ve1.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H264_MAIN) ve2.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/calibration/calibration_load.py b/examples/calibration/calibration_load.py index 9a2995a22..44e8996dc 100755 --- a/examples/calibration/calibration_load.py +++ b/examples/calibration/calibration_load.py @@ -28,10 +28,10 @@ # MonoCamera monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") # monoLeft.setFps(5.0) monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # monoRight.setFps(5.0) # Linking diff --git a/examples/calibration/calibration_reader.py b/examples/calibration/calibration_reader.py index c49b93b59..9f3d82564 100755 --- a/examples/calibration/calibration_reader.py +++ b/examples/calibration/calibration_reader.py @@ -14,66 +14,66 @@ calibData = device.readCalibration() calibData.eepromToJsonFile(calibFile) - M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.RGB) + M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.CAM_A) print("RGB Camera Default intrinsics...") print(M_rgb) print(width) print(height) if "OAK-1" in calibData.getEepromData().boardName or "BW1093OAK" in calibData.getEepromData().boardName: - M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 1280, 720)) + M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A, 1280, 720)) print("RGB Camera resized intrinsics...") print(M_rgb) - D_rgb = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.RGB)) + D_rgb = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_A)) print("RGB Distortion Coefficients...") [print(name + ": " + value) for (name, value) in zip(["k1", "k2", "p1", "p2", "k3", "k4", "k5", "k6", "s1", "s2", "s3", "s4", "τx", "τy"], [str(data) for data in D_rgb])] - print(f'RGB FOV {calibData.getFov(dai.CameraBoardSocket.RGB)}') + print(f'RGB FOV {calibData.getFov(dai.CameraBoardSocket.CAM_A)}') else: - M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.RGB) + M_rgb, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.CAM_A) print("RGB Camera Default intrinsics...") print(M_rgb) print(width) print(height) - M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 3840, 2160)) + M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A, 3840, 2160)) print("RGB Camera resized intrinsics... 3840 x 2160 ") print(M_rgb) - M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RGB, 4056, 3040 )) + M_rgb = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_A, 4056, 3040 )) print("RGB Camera resized intrinsics... 4056 x 3040 ") print(M_rgb) - M_left, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.LEFT) + M_left, width, height = calibData.getDefaultIntrinsics(dai.CameraBoardSocket.CAM_B) print("LEFT Camera Default intrinsics...") print(M_left) print(width) print(height) - M_left = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.LEFT, 1280, 720)) + M_left = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_B, 1280, 720)) print("LEFT Camera resized intrinsics... 1280 x 720") print(M_left) - M_right = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.RIGHT, 1280, 720)) + M_right = np.array(calibData.getCameraIntrinsics(dai.CameraBoardSocket.CAM_C, 1280, 720)) print("RIGHT Camera resized intrinsics... 1280 x 720") print(M_right) - D_left = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.LEFT)) + D_left = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_B)) print("LEFT Distortion Coefficients...") [print(name+": "+value) for (name, value) in zip(["k1","k2","p1","p2","k3","k4","k5","k6","s1","s2","s3","s4","τx","τy"],[str(data) for data in D_left])] - D_right = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.RIGHT)) + D_right = np.array(calibData.getDistortionCoefficients(dai.CameraBoardSocket.CAM_C)) print("RIGHT Distortion Coefficients...") [print(name+": "+value) for (name, value) in zip(["k1","k2","p1","p2","k3","k4","k5","k6","s1","s2","s3","s4","τx","τy"],[str(data) for data in D_right])] - print(f"RGB FOV {calibData.getFov(dai.CameraBoardSocket.RGB)}, Mono FOV {calibData.getFov(dai.CameraBoardSocket.LEFT)}") + print(f"RGB FOV {calibData.getFov(dai.CameraBoardSocket.CAM_A)}, Mono FOV {calibData.getFov(dai.CameraBoardSocket.CAM_B)}") R1 = np.array(calibData.getStereoLeftRectificationRotation()) R2 = np.array(calibData.getStereoRightRectificationRotation()) @@ -87,10 +87,10 @@ print("RIGHT Camera stereo rectification matrix...") print(H_right) - lr_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.LEFT, dai.CameraBoardSocket.RIGHT)) + lr_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_C)) print("Transformation matrix of where left Camera is W.R.T right Camera's optical center") print(lr_extrinsics) - l_rgb_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.LEFT, dai.CameraBoardSocket.RGB)) + l_rgb_extrinsics = np.array(calibData.getCameraExtrinsics(dai.CameraBoardSocket.CAM_B, dai.CameraBoardSocket.CAM_A)) print("Transformation matrix of where left Camera is W.R.T RGB Camera's optical center") print(l_rgb_extrinsics) diff --git a/examples/host_side/opencv_support.py b/examples/host_side/opencv_support.py index f439c3fe3..ae98a9b02 100755 --- a/examples/host_side/opencv_support.py +++ b/examples/host_side/opencv_support.py @@ -16,7 +16,7 @@ # Properties camRgb.setPreviewSize(300, 300) -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setInterleaved(True) camRgb.setColorOrder(dai.ColorCameraProperties.ColorOrder.BGR) diff --git a/examples/host_side/queue_add_callback.py b/examples/host_side/queue_add_callback.py index e9c21d0f3..6feb194d0 100755 --- a/examples/host_side/queue_add_callback.py +++ b/examples/host_side/queue_add_callback.py @@ -17,9 +17,9 @@ # Properties camRgb.setPreviewSize(300, 300) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) # Stream all the camera streams through the same XLink node diff --git a/examples/mixed/frame_sync.py b/examples/mixed/frame_sync.py index 2fff31edc..29226e285 100755 --- a/examples/mixed/frame_sync.py +++ b/examples/mixed/frame_sync.py @@ -15,12 +15,12 @@ left = pipeline.create(dai.node.MonoCamera) left.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -left.setBoardSocket(dai.CameraBoardSocket.LEFT) +left.setCamera("left") left.setFps(FPS) right = pipeline.create(dai.node.MonoCamera) right.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -right.setBoardSocket(dai.CameraBoardSocket.RIGHT) +right.setCamera("right") right.setFps(FPS) stereo = pipeline.createStereoDepth() diff --git a/examples/mixed/mono_depth_mobilenetssd.py b/examples/mixed/mono_depth_mobilenetssd.py index 145b1f915..d7bc31127 100755 --- a/examples/mixed/mono_depth_mobilenetssd.py +++ b/examples/mixed/mono_depth_mobilenetssd.py @@ -38,9 +38,9 @@ nnOut.setStreamName("nn") # Properties -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) # Produce the depth map (using disparity output as it's easier to visualize depth this way) diff --git a/examples/mixed/multiple_devices.py b/examples/mixed/multiple_devices.py index 7c3252823..789694d63 100755 --- a/examples/mixed/multiple_devices.py +++ b/examples/mixed/multiple_devices.py @@ -11,7 +11,7 @@ def createPipeline(): camRgb = pipeline.create(dai.node.ColorCamera) camRgb.setPreviewSize(300, 300) - camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) + camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setInterleaved(False) @@ -47,7 +47,7 @@ def createPipeline(): print(" >>> Board name:", eepromData.boardName) if eepromData.productName != "": print(" >>> Product name:", eepromData.productName) - + pipeline = createPipeline() device.startPipeline(pipeline) @@ -55,7 +55,7 @@ def createPipeline(): q_rgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False) stream_name = "rgb-" + mxId + "-" + eepromData.productName qRgbMap.append((q_rgb, stream_name)) - + while True: for q_rgb, stream_name in qRgbMap: if q_rgb.has(): diff --git a/examples/mixed/report_camera_settings.py b/examples/mixed/report_camera_settings.py index 25b1780fc..78283f09d 100755 --- a/examples/mixed/report_camera_settings.py +++ b/examples/mixed/report_camera_settings.py @@ -16,7 +16,7 @@ camLeft = pipeline.create(dai.node.MonoCamera) camLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -camLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +camLeft.setCamera("left") xoutLeft = pipeline.create(dai.node.XLinkOut) xoutLeft.setStreamName("left") diff --git a/examples/mixed/rgb_encoding_mobilenet.py b/examples/mixed/rgb_encoding_mobilenet.py index fbcb10192..bf00c8bb8 100755 --- a/examples/mixed/rgb_encoding_mobilenet.py +++ b/examples/mixed/rgb_encoding_mobilenet.py @@ -36,7 +36,7 @@ nnOut.setStreamName("nn") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) camRgb.setPreviewSize(300, 300) camRgb.setInterleaved(False) diff --git a/examples/mixed/rgb_encoding_mono_mobilenet.py b/examples/mixed/rgb_encoding_mono_mobilenet.py index f4822ab0e..850635b21 100755 --- a/examples/mixed/rgb_encoding_mono_mobilenet.py +++ b/examples/mixed/rgb_encoding_mono_mobilenet.py @@ -40,9 +40,9 @@ nnOut.setStreamName("nn") # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_720_P) videoEncoder.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/mixed/rgb_encoding_mono_mobilenet_depth.py b/examples/mixed/rgb_encoding_mono_mobilenet_depth.py index 5b08ce42f..f6a0f627d 100755 --- a/examples/mixed/rgb_encoding_mono_mobilenet_depth.py +++ b/examples/mixed/rgb_encoding_mono_mobilenet_depth.py @@ -44,11 +44,11 @@ nnOut.setStreamName('nn') # Properties -camRgb.setBoardSocket(dai.CameraBoardSocket.RGB) +camRgb.setBoardSocket(dai.CameraBoardSocket.CAM_A) camRgb.setResolution(dai.ColorCameraProperties.SensorResolution.THE_1080_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) videoEncoder.setDefaultProfilePreset(30, dai.VideoEncoderProperties.Profile.H265_MAIN) diff --git a/examples/mixed/rotated_spatial_detections.py b/examples/mixed/rotated_spatial_detections.py index b3ad05d80..c22dd057b 100755 --- a/examples/mixed/rotated_spatial_detections.py +++ b/examples/mixed/rotated_spatial_detections.py @@ -50,14 +50,14 @@ camRgb.setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) monoLeft.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoLeft.setBoardSocket(dai.CameraBoardSocket.LEFT) +monoLeft.setCamera("left") monoRight.setResolution(dai.MonoCameraProperties.SensorResolution.THE_400_P) -monoRight.setBoardSocket(dai.CameraBoardSocket.RIGHT) +monoRight.setCamera("right") # Setting node configs stereo.setDefaultProfilePreset(dai.node.StereoDepth.PresetMode.HIGH_DENSITY) # Align depth map to the perspective of RGB camera, on which inference is done -stereo.setDepthAlign(dai.CameraBoardSocket.RGB) +stereo.setDepthAlign(dai.CameraBoardSocket.CAM_A) stereo.setSubpixel(True) stereo.setOutputSize(monoLeft.getResolutionWidth(), monoLeft.getResolutionHeight()) diff --git a/src/CalibrationHandlerBindings.cpp b/src/CalibrationHandlerBindings.cpp index 734692c07..52f7bf3ae 100644 --- a/src/CalibrationHandlerBindings.cpp +++ b/src/CalibrationHandlerBindings.cpp @@ -47,7 +47,7 @@ void CalibrationHandlerBindings::bind(pybind11::module& m, void* pCallstack){ .def("getCameraExtrinsics", &CalibrationHandler::getCameraExtrinsics, py::arg("srcCamera"), py::arg("dstCamera"), py::arg("useSpecTranslation") = false, DOC(dai, CalibrationHandler, getCameraExtrinsics)) .def("getCameraTranslationVector", &CalibrationHandler::getCameraTranslationVector, py::arg("srcCamera"), py::arg("dstCamera"), py::arg("useSpecTranslation") = true, DOC(dai, CalibrationHandler, getCameraTranslationVector)) - .def("getBaselineDistance", &CalibrationHandler::getBaselineDistance, py::arg("cam1") = dai::CameraBoardSocket::RIGHT, py::arg("cam2") = dai::CameraBoardSocket::LEFT, py::arg("useSpecTranslation") = true, DOC(dai, CalibrationHandler, getBaselineDistance)) + .def("getBaselineDistance", &CalibrationHandler::getBaselineDistance, py::arg("cam1") = dai::CameraBoardSocket::CAM_C, py::arg("cam2") = dai::CameraBoardSocket::CAM_B, py::arg("useSpecTranslation") = true, DOC(dai, CalibrationHandler, getBaselineDistance)) .def("getCameraToImuExtrinsics", &CalibrationHandler::getCameraToImuExtrinsics, py::arg("cameraId"), py::arg("useSpecTranslation") = false, DOC(dai, CalibrationHandler, getCameraToImuExtrinsics)) .def("getImuToCameraExtrinsics", &CalibrationHandler::getImuToCameraExtrinsics, py::arg("cameraId"), py::arg("useSpecTranslation") = false, DOC(dai, CalibrationHandler, getImuToCameraExtrinsics)) diff --git a/src/DatatypeBindings.cpp b/src/DatatypeBindings.cpp index deae33696..5ff683f3d 100644 --- a/src/DatatypeBindings.cpp +++ b/src/DatatypeBindings.cpp @@ -21,6 +21,7 @@ void bind_spatiallocationcalculatordata(pybind11::module& m, void* pCallstack); void bind_stereodepthconfig(pybind11::module& m, void* pCallstack); void bind_systeminformation(pybind11::module& m, void* pCallstack); void bind_trackedfeatures(pybind11::module& m, void* pCallstack); +void bind_tofconfig(pybind11::module& m, void* pCallstack); void bind_tracklets(pybind11::module& m, void* pCallstack); void DatatypeBindings::addToCallstack(std::deque& callstack) { @@ -46,6 +47,7 @@ void DatatypeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_stereodepthconfig); callstack.push_front(bind_systeminformation); callstack.push_front(bind_trackedfeatures); + callstack.push_front(bind_tofconfig); callstack.push_front(bind_tracklets); } diff --git a/src/DeviceBindings.cpp b/src/DeviceBindings.cpp index 673bac270..afd3c99eb 100644 --- a/src/DeviceBindings.cpp +++ b/src/DeviceBindings.cpp @@ -219,6 +219,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(pipeline, dev); }), py::arg("pipeline"), DOC(dai, DeviceBase, DeviceBase)) .def(py::init([](const Pipeline& pipeline, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); auto dev = deviceSearchHelper(); py::gil_scoped_release release; return std::make_unique(pipeline, dev, usb2Mode); @@ -234,6 +235,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(pipeline, dev, pathToCmd); }), py::arg("pipeline"), py::arg("pathToCmd"), DOC(dai, DeviceBase, DeviceBase, 4)) .def(py::init([](const Pipeline& pipeline, const DeviceInfo& deviceInfo, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); py::gil_scoped_release release; return std::make_unique(pipeline, deviceInfo, usb2Mode); }), py::arg("pipeline"), py::arg("devInfo"), py::arg("usb2Mode") = false, DOC(dai, DeviceBase, DeviceBase, 6)) @@ -253,6 +255,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(version, dev); }), py::arg("version") = OpenVINO::VERSION_UNIVERSAL, DOC(dai, DeviceBase, DeviceBase, 10)) .def(py::init([](OpenVINO::Version version, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); auto dev = deviceSearchHelper(); py::gil_scoped_release release; return std::make_unique(version, dev, usb2Mode); @@ -268,6 +271,7 @@ static void bindConstructors(ARG& arg){ return std::make_unique(version, dev, pathToCmd); }), py::arg("version"), py::arg("pathToCmd"), DOC(dai, DeviceBase, DeviceBase, 13)) .def(py::init([](OpenVINO::Version version, const DeviceInfo& deviceInfo, bool usb2Mode){ + PyErr_WarnEx(PyExc_DeprecationWarning, "Use constructor taking 'UsbSpeed' instead", 1); py::gil_scoped_release release; return std::make_unique(version, deviceInfo, usb2Mode); }), py::arg("version"), py::arg("deviceInfo"), py::arg("usb2Mode") = false, DOC(dai, DeviceBase, DeviceBase, 15)) @@ -339,6 +343,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ py::enum_ boardConfigGpioPull(boardConfigGpio, "Pull", DOC(dai, BoardConfig, GPIO, Pull)); py::enum_ boardConfigGpioDrive(boardConfigGpio, "Drive", DOC(dai, BoardConfig, GPIO, Drive)); py::class_ boardConfigUart(boardConfig, "UART", DOC(dai, BoardConfig, UART)); + py::class_ boardConfigUvc(boardConfig, "UVC", DOC(dai, BoardConfig, UVC)); struct PyClock{}; py::class_ clock(m, "Clock"); @@ -375,6 +380,8 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("flashBootedVid", &BoardConfig::USB::flashBootedVid) .def_readwrite("flashBootedPid", &BoardConfig::USB::flashBootedPid) .def_readwrite("maxSpeed", &BoardConfig::USB::maxSpeed) + .def_readwrite("productName", &BoardConfig::USB::productName) + .def_readwrite("manufacturer", &BoardConfig::USB::manufacturer) ; // Bind BoardConfig::Network @@ -452,6 +459,17 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("tmp", &BoardConfig::UART::tmp) ; + // Bind BoardConfig::UVC + boardConfigUvc + .def(py::init<>()) + .def(py::init()) + .def_readwrite("cameraName", &BoardConfig::UVC::cameraName) + .def_readwrite("width", &BoardConfig::UVC::width) + .def_readwrite("height", &BoardConfig::UVC::height) + .def_readwrite("frameType", &BoardConfig::UVC::frameType) + .def_readwrite("enable", &BoardConfig::UVC::enable) + ; + // Bind BoardConfig boardConfig .def(py::init<>()) @@ -470,6 +488,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("logSizeMax", &BoardConfig::logSizeMax, DOC(dai, BoardConfig, logSizeMax)) .def_readwrite("logVerbosity", &BoardConfig::logVerbosity, DOC(dai, BoardConfig, logVerbosity)) .def_readwrite("logDevicePrints", &BoardConfig::logDevicePrints, DOC(dai, BoardConfig, logDevicePrints)) + .def_readwrite("uvc", &BoardConfig::uvc, DOC(dai, BoardConfig, uvc)) ; // Bind Device::Config @@ -478,13 +497,15 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("version", &Device::Config::version) .def_readwrite("board", &Device::Config::board) .def_readwrite("nonExclusiveMode", &Device::Config::nonExclusiveMode) + .def_readwrite("outputLogLevel", &Device::Config::outputLogLevel) + .def_readwrite("logLevel", &Device::Config::logLevel) ; // Bind CrashDump crashDump .def(py::init<>()) .def("serializeToJson", &CrashDump::serializeToJson, DOC(dai, CrashDump, serializeToJson)) - + .def_readwrite("crashReports", &CrashDump::crashReports, DOC(dai, CrashDump, crashReports)) .def_readwrite("depthaiCommitHash", &CrashDump::depthaiCommitHash, DOC(dai, CrashDump, depthaiCommitHash)) .def_readwrite("deviceId", &CrashDump::deviceId, DOC(dai, CrashDump, deviceId)) @@ -497,7 +518,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("crashedThreadId", &CrashDump::CrashReport::crashedThreadId, DOC(dai, CrashDump, CrashReport, crashedThreadId)) .def_readwrite("threadCallstack", &CrashDump::CrashReport::threadCallstack, DOC(dai, CrashDump, CrashReport, threadCallstack)) ; - + errorSourceInfo .def(py::init<>()) .def_readwrite("assertContext", &CrashDump::CrashReport::ErrorSourceInfo::assertContext, DOC(dai, CrashDump, CrashReport, ErrorSourceInfo, assertContext)) @@ -562,6 +583,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def_static("getEmbeddedDeviceBinary", py::overload_cast(&DeviceBase::getEmbeddedDeviceBinary), py::arg("config"), DOC(dai, DeviceBase, getEmbeddedDeviceBinary, 2)) .def_static("getDeviceByMxId", &DeviceBase::getDeviceByMxId, py::arg("mxId"), DOC(dai, DeviceBase, getDeviceByMxId)) .def_static("getAllConnectedDevices", &DeviceBase::getAllConnectedDevices, DOC(dai, DeviceBase, getAllConnectedDevices)) + .def_static("getGlobalProfilingData", &DeviceBase::getGlobalProfilingData, DOC(dai, DeviceBase, getGlobalProfilingData)) // methods .def("getBootloaderVersion", &DeviceBase::getBootloaderVersion, DOC(dai, DeviceBase, getBootloaderVersion)) @@ -586,7 +608,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def("getLogLevel", [](DeviceBase& d) { py::gil_scoped_release release; return d.getLogLevel(); }, DOC(dai, DeviceBase, getLogLevel)) .def("setSystemInformationLoggingRate", [](DeviceBase& d, float hz) { py::gil_scoped_release release; d.setSystemInformationLoggingRate(hz); }, py::arg("rateHz"), DOC(dai, DeviceBase, setSystemInformationLoggingRate)) .def("getSystemInformationLoggingRate", [](DeviceBase& d) { py::gil_scoped_release release; return d.getSystemInformationLoggingRate(); }, DOC(dai, DeviceBase, getSystemInformationLoggingRate)) - .def("getCrashDump", [](DeviceBase& d) { py::gil_scoped_release release; return d.getCrashDump(); }, DOC(dai, DeviceBase, getCrashDump)) + .def("getCrashDump", [](DeviceBase& d, bool clearCrashDump) { py::gil_scoped_release release; return d.getCrashDump(clearCrashDump); }, py::arg("clearCrashDump") = true, DOC(dai, DeviceBase, getCrashDump)) .def("hasCrashDump", [](DeviceBase& d) { py::gil_scoped_release release; return d.hasCrashDump(); }, DOC(dai, DeviceBase, hasCrashDump)) .def("getConnectedCameras", [](DeviceBase& d) { py::gil_scoped_release release; return d.getConnectedCameras(); }, DOC(dai, DeviceBase, getConnectedCameras)) .def("getConnectedCameraFeatures", [](DeviceBase& d) { py::gil_scoped_release release; return d.getConnectedCameraFeatures(); }, DOC(dai, DeviceBase, getConnectedCameraFeatures)) @@ -608,6 +630,7 @@ void DeviceBindings::bind(pybind11::module& m, void* pCallstack){ .def("getUsbSpeed", [](DeviceBase& d) { py::gil_scoped_release release; return d.getUsbSpeed(); }, DOC(dai, DeviceBase, getUsbSpeed)) .def("getDeviceInfo", [](DeviceBase& d) { py::gil_scoped_release release; return d.getDeviceInfo(); }, DOC(dai, DeviceBase, getDeviceInfo)) .def("getMxId", [](DeviceBase& d) { py::gil_scoped_release release; return d.getMxId(); }, DOC(dai, DeviceBase, getMxId)) + .def("getProfilingData", [](DeviceBase& d) { py::gil_scoped_release release; return d.getProfilingData(); }, DOC(dai, DeviceBase, getProfilingData)) .def("readCalibration", [](DeviceBase& d) { py::gil_scoped_release release; return d.readCalibration(); }, DOC(dai, DeviceBase, readCalibration)) .def("flashCalibration", [](DeviceBase& d, CalibrationHandler calibrationDataHandler) { py::gil_scoped_release release; return d.flashCalibration(calibrationDataHandler); }, py::arg("calibrationDataHandler"), DOC(dai, DeviceBase, flashCalibration)) .def("setXLinkChunkSize", [](DeviceBase& d, int s) { py::gil_scoped_release release; d.setXLinkChunkSize(s); }, py::arg("sizeBytes"), DOC(dai, DeviceBase, setXLinkChunkSize)) diff --git a/src/XLinkBindings.cpp b/src/XLinkBindings.cpp index 080a0deb8..bb5475fd3 100644 --- a/src/XLinkBindings.cpp +++ b/src/XLinkBindings.cpp @@ -137,6 +137,7 @@ void XLinkBindings::bind(pybind11::module &m, void *pCallstack) .def_static("getFirstDevice", &XLinkConnection::getFirstDevice, py::arg("state") = X_LINK_ANY_STATE, py::arg("skipInvalidDevice") = true) .def_static("getDeviceByMxId", &XLinkConnection::getDeviceByMxId, py::arg("mxId"), py::arg("state") = X_LINK_ANY_STATE, py::arg("skipInvalidDevice") = true) .def_static("bootBootloader", &XLinkConnection::bootBootloader, py::arg("devInfo")) + .def_static("getGlobalProfilingData", &XLinkConnection::getGlobalProfilingData, DOC(dai, XLinkConnection, getGlobalProfilingData)) ; xLinkError diff --git a/src/pipeline/CommonBindings.cpp b/src/pipeline/CommonBindings.cpp index abb40023c..cca7e769d 100644 --- a/src/pipeline/CommonBindings.cpp +++ b/src/pipeline/CommonBindings.cpp @@ -1,5 +1,8 @@ #include "CommonBindings.hpp" +// Libraries +#include "hedley/hedley.h" + // depthai-shared #include "depthai-shared/common/CameraBoardSocket.hpp" #include "depthai-shared/common/EepromData.hpp" @@ -25,6 +28,7 @@ // depthai #include "depthai/common/CameraFeatures.hpp" #include "depthai/common/CameraExposureOffset.hpp" +#include "depthai/utility/ProfilingData.hpp" void CommonBindings::bind(pybind11::module& m, void* pCallstack){ @@ -57,6 +61,7 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ py::enum_ cameraExposureOffset(m, "CameraExposureOffset"); py::enum_ colormap(m, "Colormap", DOC(dai, Colormap)); py::enum_ frameEvent(m, "FrameEvent", DOC(dai, FrameEvent)); + py::class_ profilingData(m, "ProfilingData", DOC(dai, ProfilingData)); /////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////// @@ -129,12 +134,12 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ ; // CameraBoardSocket enum bindings + + // Deprecated + HEDLEY_DIAGNOSTIC_PUSH + HEDLEY_DIAGNOSTIC_DISABLE_DEPRECATED cameraBoardSocket .value("AUTO", CameraBoardSocket::AUTO) - .value("RGB", CameraBoardSocket::RGB) - .value("LEFT", CameraBoardSocket::LEFT) - .value("RIGHT", CameraBoardSocket::RIGHT) - .value("CENTER", CameraBoardSocket::CENTER) .value("CAM_A", CameraBoardSocket::CAM_A) .value("CAM_B", CameraBoardSocket::CAM_B) .value("CAM_C", CameraBoardSocket::CAM_C) @@ -143,7 +148,31 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .value("CAM_F", CameraBoardSocket::CAM_F) .value("CAM_G", CameraBoardSocket::CAM_G) .value("CAM_H", CameraBoardSocket::CAM_H) + + .value("RGB", CameraBoardSocket::RGB, "**Deprecated:** Use CAM_A or address camera by name instead") + .value("LEFT", CameraBoardSocket::LEFT, "**Deprecated:** Use CAM_B or address camera by name instead") + .value("RIGHT", CameraBoardSocket::RIGHT, "**Deprecated:** Use CAM_C or address camera by name instead") + .value("CENTER", CameraBoardSocket::CENTER, "**Deprecated:** Use CAM_A or address camera by name instead") + + // Deprecated overriden + .def_property_readonly_static("RGB", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "RGB is deprecated, use CAM_A or address camera by name instead.", 1); + return CameraBoardSocket::CAM_A; + }) + .def_property_readonly_static("CENTER", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "CENTER is deprecated, use CAM_A or address camera by name instead.", 1); + return CameraBoardSocket::CAM_A; + }) + .def_property_readonly_static("LEFT", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "LEFT is deprecated, use CAM_B or address camera by name instead.", 1); + return CameraBoardSocket::CAM_B; + }) + .def_property_readonly_static("RIGHT", [](py::object){ + PyErr_WarnEx(PyExc_DeprecationWarning, "RIGHT is deprecated, use CAM_C or address camera by name instead.", 1); + return CameraBoardSocket::CAM_C; + }) ; + HEDLEY_DIAGNOSTIC_POP // CameraSensorType enum bindings cameraSensorType @@ -348,4 +377,9 @@ void CommonBindings::bind(pybind11::module& m, void* pCallstack){ .value("READOUT_END", FrameEvent::READOUT_END) ; + profilingData + .def_readwrite("numBytesWritten", &ProfilingData::numBytesWritten, DOC(dai, ProfilingData, numBytesWritten)) + .def_readwrite("numBytesRead", &ProfilingData::numBytesRead, DOC(dai, ProfilingData, numBytesRead)) + ; + } diff --git a/src/pipeline/PipelineBindings.cpp b/src/pipeline/PipelineBindings.cpp index 4068d81ae..3b4248710 100644 --- a/src/pipeline/PipelineBindings.cpp +++ b/src/pipeline/PipelineBindings.cpp @@ -28,6 +28,7 @@ #include "depthai/pipeline/node/FeatureTracker.hpp" #include "depthai/pipeline/node/AprilTag.hpp" #include "depthai/pipeline/node/DetectionParser.hpp" +#include "depthai/pipeline/node/UVC.hpp" // depthai-shared #include "depthai-shared/properties/GlobalProperties.hpp" @@ -142,6 +143,7 @@ void PipelineBindings::bind(pybind11::module& m, void* pCallstack){ .def("createFeatureTracker", &Pipeline::create) .def("createAprilTag", &Pipeline::create) .def("createDetectionParser", &Pipeline::create) + .def("createUVC", &Pipeline::create) ; diff --git a/src/pipeline/datatype/CameraControlBindings.cpp b/src/pipeline/datatype/CameraControlBindings.cpp index 9fb47a62d..06ebb4ab4 100644 --- a/src/pipeline/datatype/CameraControlBindings.cpp +++ b/src/pipeline/datatype/CameraControlBindings.cpp @@ -214,11 +214,13 @@ std::vector camCtrlAttr; .def("setChromaDenoise", &CameraControl::setChromaDenoise, py::arg("value"), DOC(dai, CameraControl, setChromaDenoise)) .def("setSceneMode", &CameraControl::setSceneMode, py::arg("mode"), DOC(dai, CameraControl, setSceneMode)) .def("setEffectMode", &CameraControl::setEffectMode, py::arg("mode"), DOC(dai, CameraControl, setEffectMode)) + .def("set", &CameraControl::set, py::arg("config"), DOC(dai, CameraControl, set)) // getters .def("getCaptureStill", &CameraControl::getCaptureStill, DOC(dai, CameraControl, getCaptureStill)) .def("getExposureTime", &CameraControl::getExposureTime, DOC(dai, CameraControl, getExposureTime)) .def("getSensitivity", &CameraControl::getSensitivity, DOC(dai, CameraControl, getSensitivity)) .def("getLensPosition", &CameraControl::getLensPosition, DOC(dai, CameraControl, getLensPosition)) + .def("get", &CameraControl::get, DOC(dai, CameraControl, get)) ; // Add also enum attributes from RawCameraControl for (const auto& a : camCtrlAttr) { diff --git a/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp b/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp index fd5e39a11..eaefb6825 100644 --- a/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp +++ b/src/pipeline/datatype/EdgeDetectorConfigBindings.cpp @@ -50,6 +50,8 @@ void bind_edgedetectorconfig(pybind11::module& m, void* pCallstack){ .def(py::init<>()) .def("setSobelFilterKernels", &EdgeDetectorConfig::setSobelFilterKernels, py::arg("horizontalKernel"), py::arg("verticalKernel"), DOC(dai, EdgeDetectorConfig, setSobelFilterKernels)) .def("getConfigData", &EdgeDetectorConfig::getConfigData, DOC(dai, EdgeDetectorConfig, getConfigData)) + .def("get", &EdgeDetectorConfig::get, DOC(dai, EdgeDetectorConfig, get)) + .def("set", &EdgeDetectorConfig::set, py::arg("config"), DOC(dai, EdgeDetectorConfig, set)) ; } diff --git a/src/pipeline/datatype/ImageManipConfigBindings.cpp b/src/pipeline/datatype/ImageManipConfigBindings.cpp index e94ced202..694912999 100644 --- a/src/pipeline/datatype/ImageManipConfigBindings.cpp +++ b/src/pipeline/datatype/ImageManipConfigBindings.cpp @@ -120,6 +120,7 @@ void bind_imagemanipconfig(pybind11::module& m, void* pCallstack){ .def("setReusePreviousImage", &ImageManipConfig::setReusePreviousImage, py::arg("reuse"), DOC(dai, ImageManipConfig, setReusePreviousImage)) .def("setSkipCurrentImage", &ImageManipConfig::setSkipCurrentImage, py::arg("skip"), DOC(dai, ImageManipConfig, setSkipCurrentImage)) .def("setKeepAspectRatio", &ImageManipConfig::setKeepAspectRatio, py::arg("keep"), DOC(dai, ImageManipConfig, setKeepAspectRatio)) + .def("set", &ImageManipConfig::set, py::arg("config"), DOC(dai, ImageManipConfig, set)) // getters .def("getCropXMin", &ImageManipConfig::getCropXMin, DOC(dai, ImageManipConfig, getCropXMin)) @@ -133,6 +134,7 @@ void bind_imagemanipconfig(pybind11::module& m, void* pCallstack){ .def("getFormatConfig", &ImageManipConfig::getFormatConfig, DOC(dai, ImageManipConfig, getFormatConfig)) .def("isResizeThumbnail", &ImageManipConfig::isResizeThumbnail, DOC(dai, ImageManipConfig, isResizeThumbnail)) .def("getColormap", &ImageManipConfig::getColormap, DOC(dai, ImageManipConfig, getColormap)) + .def("get", &ImageManipConfig::get, DOC(dai, ImageManipConfig, get)) ; diff --git a/src/pipeline/datatype/ImgFrameBindings.cpp b/src/pipeline/datatype/ImgFrameBindings.cpp index bc2c8b768..a89ff4230 100644 --- a/src/pipeline/datatype/ImgFrameBindings.cpp +++ b/src/pipeline/datatype/ImgFrameBindings.cpp @@ -202,6 +202,9 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){ break; case ImgFrame::Type::RAW16: + case ImgFrame::Type::RAW14: + case ImgFrame::Type::RAW12: + case ImgFrame::Type::RAW10: shape = {img.getHeight(), img.getWidth()}; dtype = py::dtype::of(); break; @@ -234,7 +237,7 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){ + ", actual " + std::to_string(actualSize) + ". Maybe metadataOnly transfer was made?"); } else if(actualSize > requiredSize) { // FIXME check build on Windows - // spdlog::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); + // logger::warn("ImgFrame has excess data: actual {}, expected {}", actualSize, requiredSize); } if(img.getWidth() <= 0 || img.getHeight() <= 0){ throw std::runtime_error("ImgFrame size invalid (width: " + std::to_string(img.getWidth()) + ", height: " + std::to_string(img.getHeight()) + ")"); @@ -303,6 +306,9 @@ void bind_imgframe(pybind11::module& m, void* pCallstack){ case ImgFrame::Type::RAW8: case ImgFrame::Type::RAW16: + case ImgFrame::Type::RAW14: + case ImgFrame::Type::RAW12: + case ImgFrame::Type::RAW10: case ImgFrame::Type::GRAY8: case ImgFrame::Type::GRAYF16: default: diff --git a/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp b/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp index 84b6a4851..7d56a7d5e 100644 --- a/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp +++ b/src/pipeline/datatype/SpatialLocationCalculatorConfigBindings.cpp @@ -64,6 +64,8 @@ void bind_spatiallocationcalculatorconfig(pybind11::module& m, void* pCallstack) .def("setROIs", &SpatialLocationCalculatorConfig::setROIs, py::arg("ROIs"), DOC(dai, SpatialLocationCalculatorConfig, setROIs)) .def("addROI", &SpatialLocationCalculatorConfig::addROI, py::arg("ROI"), DOC(dai, SpatialLocationCalculatorConfig, addROI)) .def("getConfigData", &SpatialLocationCalculatorConfig::getConfigData, DOC(dai, SpatialLocationCalculatorConfig, getConfigData)) + .def("set", &SpatialLocationCalculatorConfig::set, py::arg("config"), DOC(dai, SpatialLocationCalculatorConfig, set)) + .def("get", &SpatialLocationCalculatorConfig::get, DOC(dai, SpatialLocationCalculatorConfig, get)) ; } diff --git a/src/pipeline/datatype/ToFConfigBindings.cpp b/src/pipeline/datatype/ToFConfigBindings.cpp new file mode 100644 index 000000000..a51fae8ac --- /dev/null +++ b/src/pipeline/datatype/ToFConfigBindings.cpp @@ -0,0 +1,74 @@ +#include "DatatypeBindings.hpp" +#include "pipeline/CommonBindings.hpp" +#include +#include + +// depthai +#include "depthai/pipeline/datatype/ToFConfig.hpp" + +//pybind +#include +#include + +// #include "spdlog/spdlog.h" + +void bind_tofconfig(pybind11::module& m, void* pCallstack){ + + using namespace dai; + + py::class_> rawToFConfig(m, "RawToFConfig", DOC(dai, RawToFConfig)); + py::class_ depthParams(rawToFConfig, "DepthParams", DOC(dai, RawToFConfig, DepthParams)); + py::enum_ depthParamsTypeFMod(depthParams, "TypeFMod", DOC(dai, RawToFConfig, DepthParams, TypeFMod)); + py::class_> toFConfig(m, "ToFConfig", DOC(dai, ToFConfig)); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Metadata / raw + rawToFConfig + .def(py::init<>()) + .def_readwrite("depthParams", &RawToFConfig::depthParams, DOC(dai, RawToFConfig, depthParams)) + ; + + depthParamsTypeFMod + .value("ALL", RawToFConfig::DepthParams::TypeFMod::F_MOD_ALL) + .value("MIN", RawToFConfig::DepthParams::TypeFMod::F_MOD_MIN) + .value("MAX", RawToFConfig::DepthParams::TypeFMod::F_MOD_MAX) + ; + + depthParams + .def(py::init<>()) + .def_readwrite("enable", &RawToFConfig::DepthParams::enable, DOC(dai, RawToFConfig, DepthParams, enable)) + .def_readwrite("freqModUsed", &RawToFConfig::DepthParams::freqModUsed, DOC(dai, RawToFConfig, DepthParams, freqModUsed)) + .def_readwrite("avgPhaseShuffle", &RawToFConfig::DepthParams::avgPhaseShuffle, DOC(dai, RawToFConfig, DepthParams, avgPhaseShuffle)) + .def_readwrite("minimumAmplitude", &RawToFConfig::DepthParams::minimumAmplitude, DOC(dai, RawToFConfig, DepthParams, minimumAmplitude)) + ; + + // Message + toFConfig + .def(py::init<>()) + .def(py::init>()) + + .def("setDepthParams", static_cast(&ToFConfig::setDepthParams), py::arg("config"), DOC(dai, ToFConfig, setDepthParams)) + .def("setFreqModUsed", static_cast(&ToFConfig::setFreqModUsed), DOC(dai, ToFConfig, setFreqModUsed)) + .def("setAvgPhaseShuffle", &ToFConfig::setAvgPhaseShuffle, DOC(dai, ToFConfig, setAvgPhaseShuffle)) + .def("setMinAmplitude", &ToFConfig::setMinAmplitude, DOC(dai, ToFConfig, setMinAmplitude)) + + .def("set", &ToFConfig::set, py::arg("config"), DOC(dai, ToFConfig, set)) + .def("get", &ToFConfig::get, DOC(dai, ToFConfig, get)) + ; + + // add aliases + m.attr("ToFConfig").attr("DepthParams") = m.attr("RawToFConfig").attr("DepthParams"); + +} diff --git a/src/pipeline/node/CameraBindings.cpp b/src/pipeline/node/CameraBindings.cpp index d99d0eab0..2c52bb282 100644 --- a/src/pipeline/node/CameraBindings.cpp +++ b/src/pipeline/node/CameraBindings.cpp @@ -159,6 +159,7 @@ void bind_camera(pybind11::module& m, void* pCallstack){ .def("setCalibrationAlpha", &Camera::setCalibrationAlpha, py::arg("alpha"), DOC(dai, node, Camera, setCalibrationAlpha)) .def("getCalibrationAlpha", &Camera::getCalibrationAlpha, DOC(dai, node, Camera, getCalibrationAlpha)) + .def("setRawOutputPacked", &Camera::setRawOutputPacked, py::arg("packed"), DOC(dai, node, Camera, setRawOutputPacked)) ; // ALIAS daiNodeModule.attr("Camera").attr("Properties") = cameraProperties; diff --git a/src/pipeline/node/ColorCameraBindings.cpp b/src/pipeline/node/ColorCameraBindings.cpp index 0816ffefd..fb345d221 100644 --- a/src/pipeline/node/ColorCameraBindings.cpp +++ b/src/pipeline/node/ColorCameraBindings.cpp @@ -188,6 +188,8 @@ void bind_colorcamera(pybind11::module& m, void* pCallstack){ .def("getIspNumFramesPool", &ColorCamera::getIspNumFramesPool, DOC(dai, node, ColorCamera, getIspNumFramesPool)) .def("setCamera", &ColorCamera::setCamera, py::arg("name"), DOC(dai, node, ColorCamera, setCamera)) .def("getCamera", &ColorCamera::getCamera, DOC(dai, node, ColorCamera, getCamera)) + + .def("setRawOutputPacked", &ColorCamera::setRawOutputPacked, py::arg("packed"), DOC(dai, node, ColorCamera, setRawOutputPacked)) ; // ALIAS daiNodeModule.attr("ColorCamera").attr("Properties") = colorCameraProperties; diff --git a/src/pipeline/node/MonoCameraBindings.cpp b/src/pipeline/node/MonoCameraBindings.cpp index 41c3b981f..51365fda0 100644 --- a/src/pipeline/node/MonoCameraBindings.cpp +++ b/src/pipeline/node/MonoCameraBindings.cpp @@ -92,6 +92,8 @@ void bind_monocamera(pybind11::module& m, void* pCallstack){ .def("getRawNumFramesPool", &MonoCamera::getRawNumFramesPool, DOC(dai, node, MonoCamera, getRawNumFramesPool)) .def("setCamera", &MonoCamera::setCamera, py::arg("name"), DOC(dai, node, MonoCamera, setCamera)) .def("getCamera", &MonoCamera::getCamera, DOC(dai, node, MonoCamera, getCamera)) + + .def("setRawOutputPacked", &MonoCamera::setRawOutputPacked, py::arg("packed"), DOC(dai, node, MonoCamera, setRawOutputPacked)) ; // ALIAS daiNodeModule.attr("MonoCamera").attr("Properties") = monoCameraProperties; diff --git a/src/pipeline/node/NodeBindings.cpp b/src/pipeline/node/NodeBindings.cpp index ef2bd8c2e..06e790377 100644 --- a/src/pipeline/node/NodeBindings.cpp +++ b/src/pipeline/node/NodeBindings.cpp @@ -114,6 +114,8 @@ void bind_edgedetector(pybind11::module& m, void* pCallstack); void bind_featuretracker(pybind11::module& m, void* pCallstack); void bind_apriltag(pybind11::module& m, void* pCallstack); void bind_detectionparser(pybind11::module& m, void* pCallstack); +void bind_uvc(pybind11::module& m, void* pCallstack); +void bind_tof(pybind11::module& m, void* pCallstack); void NodeBindings::addToCallstack(std::deque& callstack) { // Bind Node et al @@ -143,6 +145,8 @@ void NodeBindings::addToCallstack(std::deque& callstack) { callstack.push_front(bind_featuretracker); callstack.push_front(bind_apriltag); callstack.push_front(bind_detectionparser); + callstack.push_front(bind_uvc); + callstack.push_front(bind_tof); } void NodeBindings::bind(pybind11::module& m, void* pCallstack){ @@ -209,6 +213,9 @@ void NodeBindings::bind(pybind11::module& m, void* pCallstack){ .def_readwrite("group", &Node::Input::group, DOC(dai, Node, Input, group)) .def_readwrite("name", &Node::Input::name, DOC(dai, Node, Input, name)) .def_readwrite("type", &Node::Input::type, DOC(dai, Node, Input, type)) + .def_readwrite("possibleDatatypes", &Node::Input::possibleDatatypes, DOC(dai, Node, Input, possibleDatatypes)) + .def("getParent", static_cast(&Node::Input::getParent), py::return_value_policy::reference_internal, DOC(dai, Node, Input, getParent)) + .def("getParent", static_cast(&Node::Input::getParent), py::return_value_policy::reference_internal, DOC(dai, Node, Input, getParent)) .def("setBlocking", &Node::Input::setBlocking, py::arg("blocking"), DOC(dai, Node, Input, setBlocking)) .def("getBlocking", &Node::Input::getBlocking, DOC(dai, Node, Input, getBlocking)) .def("setQueueSize", &Node::Input::setQueueSize, py::arg("size"), DOC(dai, Node, Input, setQueueSize)) diff --git a/src/pipeline/node/ToFBindings.cpp b/src/pipeline/node/ToFBindings.cpp new file mode 100644 index 000000000..ae67b149e --- /dev/null +++ b/src/pipeline/node/ToFBindings.cpp @@ -0,0 +1,47 @@ +#include "NodeBindings.hpp" +#include "Common.hpp" + +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/node/ToF.hpp" + +void bind_tof(pybind11::module& m, void* pCallstack){ + + using namespace dai; + using namespace dai::node; + + // Node and Properties declare upfront + py::class_ tofProperties(m, "ToFProperties", DOC(dai, ToFProperties)); + auto tof = ADD_NODE(ToF); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Properties + tofProperties + .def_readwrite("initialConfig", &ToFProperties::initialConfig, DOC(dai, ToFProperties, initialConfig)) + ; + + // Node + tof + .def_readonly("inputConfig", &ToF::inputConfig, DOC(dai, node, ToF, inputConfig), DOC(dai, node, ToF, inputConfig)) + .def_readonly("input", &ToF::input, DOC(dai, node, ToF, input), DOC(dai, node, ToF, input)) + .def_readonly("depth", &ToF::depth, DOC(dai, node, ToF, depth), DOC(dai, node, ToF, depth)) + .def_readonly("amplitude", &ToF::amplitude, DOC(dai, node, ToF, amplitude), DOC(dai, node, ToF, amplitude)) + .def_readonly("error", &ToF::error, DOC(dai, node, ToF, error), DOC(dai, node, ToF, error)) + .def_readonly("initialConfig", &ToF::initialConfig, DOC(dai, node, ToF, initialConfig), DOC(dai, node, ToF, initialConfig)) + ; + // ALIAS + daiNodeModule.attr("ToF").attr("Properties") = tofProperties; + +} diff --git a/src/pipeline/node/UVCBindings.cpp b/src/pipeline/node/UVCBindings.cpp new file mode 100644 index 000000000..81f180658 --- /dev/null +++ b/src/pipeline/node/UVCBindings.cpp @@ -0,0 +1,50 @@ + +#include "NodeBindings.hpp" +#include "Common.hpp" + +#include "depthai/pipeline/Pipeline.hpp" +#include "depthai/pipeline/Node.hpp" +#include "depthai/pipeline/node/UVC.hpp" + +void bind_uvc(pybind11::module& m, void* pCallstack){ + + using namespace dai; + using namespace dai::node; + + // Node and Properties declare upfront + py::class_ uvcProperties(m, "UVCProperties", DOC(dai, UVCProperties)); + auto uvc = ADD_NODE(UVC); + + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + // Call the rest of the type defines, then perform the actual bindings + Callstack* callstack = (Callstack*) pCallstack; + auto cb = callstack->top(); + callstack->pop(); + cb(m, pCallstack); + // Actual bindings + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + /////////////////////////////////////////////////////////////////////// + + // Properties + uvcProperties + .def_readwrite("gpioInit", &UVCProperties::gpioInit) + .def_readwrite("gpioStreamOn", &UVCProperties::gpioStreamOn) + .def_readwrite("gpioStreamOff", &UVCProperties::gpioStreamOff) + ; + + // UVC node + uvc + .def_readonly("input", &UVC::input, DOC(dai, node, UVC, input)) + .def("setGpiosOnInit", &UVC::setGpiosOnInit, py::arg("list"), DOC(dai, node, UVC, setGpiosOnInit)) + .def("setGpiosOnStreamOn", &UVC::setGpiosOnStreamOn, py::arg("list"), DOC(dai, node, UVC, setGpiosOnStreamOn)) + .def("setGpiosOnStreamOff", &UVC::setGpiosOnStreamOff, py::arg("list"), DOC(dai, node, UVC, setGpiosOnStreamOff)) + ; + + // ALIAS + daiNodeModule.attr("UVC").attr("Properties") = uvcProperties; + +} + diff --git a/utilities/cam_test.py b/utilities/cam_test.py index e54b676dc..240007de3 100755 --- a/utilities/cam_test.py +++ b/utilities/cam_test.py @@ -30,37 +30,40 @@ For the 'Select control: ...' options, use these keys to modify the value: '-' or '_' to decrease '+' or '=' to increase + +'/' to toggle printing camera settings: exposure, ISO, lens position, color temperature """ import os #os.environ["DEPTHAI_LEVEL"] = "debug" import cv2 +import numpy as np import argparse import collections import time from itertools import cycle from pathlib import Path import sys -import cam_test_gui import signal def socket_type_pair(arg): socket, type = arg.split(',') - if not (socket in ['rgb', 'left', 'right', 'camd']): + if not (socket in ['rgb', 'left', 'right', 'cama', 'camb', 'camc', 'camd']): raise ValueError("") - if not (type in ['m', 'mono', 'c', 'color']): + if not (type in ['m', 'mono', 'c', 'color', 't', 'tof']): raise ValueError("") is_color = True if type in ['c', 'color'] else False - return [socket, is_color] + is_tof = True if type in ['t', 'tof'] else False + return [socket, is_color, is_tof] parser = argparse.ArgumentParser() parser.add_argument('-cams', '--cameras', type=socket_type_pair, nargs='+', - default=[['rgb', True], ['left', False], - ['right', False], ['camd', True]], - help="Which camera sockets to enable, and type: c[olor] / m[ono]. " + default=[['rgb', True, False], ['left', False, False], + ['right', False, False], ['camd', True, False]], + help="Which camera sockets to enable, and type: c[olor] / m[ono] / t[of]. " "E.g: -cams rgb,m right,c . Default: rgb,c left,m right,m camd,c") parser.add_argument('-mres', '--mono-resolution', type=int, default=800, choices={480, 400, 720, 800}, help="Select mono camera resolution (height). Default: %(default)s") @@ -78,6 +81,17 @@ def socket_type_pair(arg): help="Make OpenCV windows resizable. Note: may introduce some artifacts") parser.add_argument('-tun', '--camera-tuning', type=Path, help="Path to custom camera tuning database") +parser.add_argument('-raw', '--enable-raw', default=False, action="store_true", + help='Enable the RAW camera streams') +parser.add_argument('-tofraw', '--tof-raw', action='store_true', + help="Show just ToF raw output instead of post-processed depth") +parser.add_argument('-tofamp', '--tof-amplitude', action='store_true', + help="Show also ToF amplitude output alongside depth") +parser.add_argument('-tofcm', '--tof-cm', action='store_true', + help="Show ToF depth output in centimeters, capped to 255") +parser.add_argument('-rgbprev', '--rgb-preview', action='store_true', + help="Show RGB `preview` stream instead of full size `isp`") + parser.add_argument('-d', '--device', default="", type=str, help="Optional MX ID of the device to connect to.") @@ -95,37 +109,39 @@ def socket_type_pair(arg): import depthai as dai if len(sys.argv) == 1: + import cam_test_gui cam_test_gui.main() cam_list = [] cam_type_color = {} +cam_type_tof = {} print("Enabled cameras:") -for socket, is_color in args.cameras: +for socket, is_color, is_tof in args.cameras: cam_list.append(socket) cam_type_color[socket] = is_color - print(socket.rjust(7), ':', 'color' if is_color else 'mono') + cam_type_tof[socket] = is_tof + print(socket.rjust(7), ':', 'tof' if is_tof else 'color' if is_color else 'mono') print("DepthAI version:", dai.__version__) print("DepthAI path:", dai.__file__) cam_socket_opts = { - 'rgb': dai.CameraBoardSocket.RGB, # Or CAM_A - 'left': dai.CameraBoardSocket.LEFT, # Or CAM_B - 'right': dai.CameraBoardSocket.RIGHT, # Or CAM_C - 'camd': dai.CameraBoardSocket.CAM_D, -} - -cam_socket_to_name = { - 'RGB': 'rgb', - 'LEFT': 'left', - 'RIGHT': 'right', - 'CAM_D': 'camd', + 'rgb' : dai.CameraBoardSocket.CAM_A, + 'left' : dai.CameraBoardSocket.CAM_B, + 'right': dai.CameraBoardSocket.CAM_C, + 'cama' : dai.CameraBoardSocket.CAM_A, + 'camb' : dai.CameraBoardSocket.CAM_B, + 'camc' : dai.CameraBoardSocket.CAM_C, + 'camd' : dai.CameraBoardSocket.CAM_D, } rotate = { 'rgb': args.rotate in ['all', 'rgb'], 'left': args.rotate in ['all', 'mono'], 'right': args.rotate in ['all', 'mono'], + 'cama': args.rotate in ['all', 'rgb'], + 'camb': args.rotate in ['all', 'mono'], + 'camc': args.rotate in ['all', 'mono'], 'camd': args.rotate in ['all', 'rgb'], } @@ -177,17 +193,50 @@ def get(self): control = pipeline.createXLinkIn() control.setStreamName('control') +xinTofConfig = pipeline.createXLinkIn() +xinTofConfig.setStreamName('tofConfig') + cam = {} +tof = {} xout = {} +xout_raw = {} +xout_tof_amp = {} +streams = [] +tofConfig = {} for c in cam_list: + tofEnableRaw = False xout[c] = pipeline.createXLinkOut() xout[c].setStreamName(c) - if cam_type_color[c]: + streams.append(c) + if cam_type_tof[c]: + cam[c] = pipeline.create(dai.node.ColorCamera) # .Camera + if args.tof_raw: + tofEnableRaw = True + else: + tof[c] = pipeline.create(dai.node.ToF) + cam[c].raw.link(tof[c].input) + tof[c].depth.link(xout[c].input) + xinTofConfig.out.link(tof[c].inputConfig) + tofConfig = tof[c].initialConfig.get() + tofConfig.depthParams.freqModUsed = dai.RawToFConfig.DepthParams.TypeFMod.MIN + tofConfig.depthParams.avgPhaseShuffle = False + tofConfig.depthParams.minimumAmplitude = 3.0 + tof[c].initialConfig.set(tofConfig) + if args.tof_amplitude: + amp_name = 'tof_amplitude_' + c + xout_tof_amp[c] = pipeline.create(dai.node.XLinkOut) + xout_tof_amp[c].setStreamName(amp_name) + streams.append(amp_name) + tof[c].amplitude.link(xout_tof_amp[c].input) + elif cam_type_color[c]: cam[c] = pipeline.createColorCamera() cam[c].setResolution(color_res_opts[args.color_resolution]) cam[c].setIspScale(1, args.isp_downscale) # cam[c].initialControl.setManualFocus(85) # TODO - cam[c].isp.link(xout[c].input) + if args.rgb_preview: + cam[c].preview.link(xout[c].input) + else: + cam[c].isp.link(xout[c].input) else: cam[c] = pipeline.createMonoCamera() cam[c].setResolution(mono_res_opts[args.mono_resolution]) @@ -205,7 +254,16 @@ def get(self): if rotate[c]: cam[c].setImageOrientation(dai.CameraImageOrientation.ROTATE_180_DEG) cam[c].setFps(args.fps) - cam[c].setIsp3aFps(args.isp3afps) + if args.isp3afps: + cam[c].setIsp3aFps(args.isp3afps) + + if args.enable_raw or tofEnableRaw: + raw_name = 'raw_' + c + xout_raw[c] = pipeline.create(dai.node.XLinkOut) + xout_raw[c].setStreamName(raw_name) + streams.append(raw_name) + cam[c].raw.link(xout_raw[c].input) + cam[c].setRawOutputPacked(False) if args.camera_tuning: pipeline.setCameraTuningBlobPath(str(args.camera_tuning)) @@ -232,7 +290,11 @@ def exit_cleanly(signum, frame): f' -socket {p.socket.name:6}: {p.sensorName:6} {p.width:4} x {p.height:4} focus:', end='') print('auto ' if p.hasAutofocus else 'fixed', '- ', end='') print(*[type.name for type in p.supportedTypes]) - cam_name[cam_socket_to_name[p.socket.name]] = p.sensorName + cam_name[p.socket.name] = p.sensorName + if args.enable_raw: + cam_name['raw_'+p.socket.name] = p.sensorName + if args.tof_amplitude: + cam_name['tof_amplitude_'+p.socket.name] = p.sensorName print('USB speed:', device.getUsbSpeed().name) @@ -241,7 +303,7 @@ def exit_cleanly(signum, frame): q = {} fps_host = {} # FPS computed based on the time we receive frames in app fps_capt = {} # FPS computed based on capture timestamps from device - for c in cam_list: + for c in streams: q[c] = device.getOutputQueue(name=c, maxSize=4, blocking=False) # The OpenCV window resize may produce some artifacts if args.resizable_windows: @@ -251,6 +313,7 @@ def exit_cleanly(signum, frame): fps_capt[c] = FPS() controlQueue = device.getInputQueue('control') + tofCfgQueue = device.getInputQueue('tofConfig') # Manual exposure/focus set step EXP_STEP = 500 # us @@ -294,13 +357,17 @@ def exit_cleanly(signum, frame): luma_denoise = 0 chroma_denoise = 0 control = 'none' + show = False + + jet_custom = cv2.applyColorMap(np.arange(256, dtype=np.uint8), cv2.COLORMAP_JET) + jet_custom[0] = [0, 0, 0] print("Cam:", *[' ' + c.ljust(8) for c in cam_list], "[host | capture timestamp]") capture_list = [] while True: - for c in cam_list: + for c in streams: try: pkt = q[c].tryGet() except Exception as e: @@ -309,37 +376,89 @@ def exit_cleanly(signum, frame): if pkt is not None: fps_host[c].update() fps_capt[c].update(pkt.getTimestamp().total_seconds()) + width, height = pkt.getWidth(), pkt.getHeight() frame = pkt.getCvFrame() - if c in capture_list: - width, height = pkt.getWidth(), pkt.getHeight() - capture_file_name = ('capture_' + c + '_' + cam_name[c] - + '_' + str(width) + 'x' + str(height) - + '_exp_' + - str(int( - pkt.getExposureTime().total_seconds()*1e6)) - + '_iso_' + str(pkt.getSensitivity()) - + '_lens_' + - str(pkt.getLensPosition()) - + '_' + capture_time - + '_' + str(pkt.getSequenceNum()) - + ".png" - ) - print("\nSaving:", capture_file_name) - cv2.imwrite(capture_file_name, frame) + cam_skt = c.split('_')[-1] + if cam_type_tof[cam_skt] and not (c.startswith('raw_') or c.startswith('tof_amplitude_')): + if args.tof_cm: + # pixels represent `cm`, capped to 255. Value can be checked hovering the mouse + frame = (frame // 10).clip(0, 255).astype(np.uint8) + else: + frame = (frame.view(np.int16).astype(float)) + frame = cv2.normalize(frame, frame, alpha=255, beta=0, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U) + frame = cv2.applyColorMap(frame, jet_custom) + if show: + txt = f"[{c:5}, {pkt.getSequenceNum():4}] " + txt += f"Exp: {pkt.getExposureTime().total_seconds()*1000:6.3f} ms, " + txt += f"ISO: {pkt.getSensitivity():4}, " + txt += f"Lens pos: {pkt.getLensPosition():3}, " + txt += f"Color temp: {pkt.getColorTemperature()} K" + print(txt) + capture = c in capture_list + if capture: + capture_file_info = ('capture_' + c + '_' + cam_name[cam_socket_opts[cam_skt].name] + + '_' + str(width) + 'x' + str(height) + + '_exp_' + str(int(pkt.getExposureTime().total_seconds()*1e6)) + + '_iso_' + str(pkt.getSensitivity()) + + '_lens_' + str(pkt.getLensPosition()) + + '_' + capture_time + + '_' + str(pkt.getSequenceNum()) + ) capture_list.remove(c) - + print() + if c.startswith('raw_') or c.startswith('tof_amplitude_'): + if capture: + filename = capture_file_info + '_10bit.bw' + print('Saving:', filename) + frame.tofile(filename) + # Full range for display, use bits [15:6] of the 16-bit pixels + type = pkt.getType() + multiplier = 1 + if type == dai.ImgFrame.Type.RAW10: multiplier = (1 << (16-10)) + if type == dai.ImgFrame.Type.RAW12: multiplier = (1 << (16-4)) + frame = frame * multiplier + # Debayer as color for preview/png + if cam_type_color[cam_skt]: + # See this for the ordering, at the end of page: + # https://docs.opencv.org/4.5.1/de/d25/imgproc_color_conversions.html + # TODO add bayer order to ImgFrame getType() + frame = cv2.cvtColor(frame, cv2.COLOR_BayerGB2BGR) + else: + # Save YUV too, but only when RAW is also enabled (for tuning purposes) + if capture and args.enable_raw: + payload = pkt.getData() + filename = capture_file_info + '_P420.yuv' + print('Saving:', filename) + payload.tofile(filename) + if capture: + filename = capture_file_info + '.png' + print('Saving:', filename) + cv2.imwrite(filename, frame) cv2.imshow(c, frame) print("\rFPS:", - *["{:6.2f}|{:6.2f}".format(fps_host[c].get(), - fps_capt[c].get()) for c in cam_list], - end='', flush=True) + *["{:6.2f}|{:6.2f}".format(fps_host[c].get(), fps_capt[c].get()) for c in cam_list], + end=' ', flush=True) + if show: print() key = cv2.waitKey(1) if key == ord('q'): break + elif key == ord('/'): + show = not show + # Print empty string as FPS status new-line separator + print("" if show else "Printing camera settings: OFF") elif key == ord('c'): - capture_list = cam_list.copy() + capture_list = streams.copy() capture_time = time.strftime('%Y%m%d_%H%M%S') + elif key == ord('g') and tof: + f_mod = dai.RawToFConfig.DepthParams.TypeFMod.MAX if tofConfig.depthParams.freqModUsed == dai.RawToFConfig.DepthParams.TypeFMod.MIN else dai.RawToFConfig.DepthParams.TypeFMod.MIN + print("ToF toggling f_mod value to:", f_mod) + tofConfig.depthParams.freqModUsed = f_mod + tofCfgQueue.send(tofConfig) + elif key == ord('h') and tof: + tofConfig.depthParams.avgPhaseShuffle = not tofConfig.depthParams.avgPhaseShuffle + print("ToF toggling avgPhaseShuffle value to:", tofConfig.depthParams.avgPhaseShuffle) + tofCfgQueue.send(tofConfig) elif key == ord('t'): print("Autofocus trigger (and disable continuous)") ctrl = dai.CameraControl() @@ -414,7 +533,7 @@ def exit_cleanly(signum, frame): if floodIntensity < 0: floodIntensity = 0 device.setIrFloodLightBrightness(floodIntensity) - elif key >= 0 and chr(key) in '34567890[]': + elif key >= 0 and chr(key) in '34567890[]p': if key == ord('3'): control = 'awb_mode' elif key == ord('4'): @@ -435,6 +554,8 @@ def exit_cleanly(signum, frame): control = 'luma_denoise' elif key == ord(']'): control = 'chroma_denoise' + elif key == ord('p'): + control = 'tof_amplitude_min' print("Selected control:", control) elif key in [ord('-'), ord('_'), ord('+'), ord('=')]: change = 0 @@ -485,4 +606,9 @@ def exit_cleanly(signum, frame): chroma_denoise = clamp(chroma_denoise + change, 0, 4) print("Chroma denoise:", chroma_denoise) ctrl.setChromaDenoise(chroma_denoise) + elif control == 'tof_amplitude_min' and tof: + amp_min = clamp(tofConfig.depthParams.minimumAmplitude + change, 0, 50) + print("Setting min amplitude(confidence) to:", amp_min) + tofConfig.depthParams.minimumAmplitude = amp_min + tofCfgQueue.send(tofConfig) controlQueue.send(ctrl) diff --git a/utilities/device_manager.py b/utilities/device_manager.py index ea2cf44e9..99814f10b 100755 --- a/utilities/device_manager.py +++ b/utilities/device_manager.py @@ -10,6 +10,7 @@ from typing import Dict import platform import os +import numpy if USE_OPENCV: # import cv2 @@ -20,10 +21,14 @@ SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__)) +PLATFORM_ICON_PATH = None if platform.system() == 'Windows': - sg.set_global_icon(f'{SCRIPT_DIR}/assets/icon.ico') + PLATFORM_ICON_PATH = f'{SCRIPT_DIR}/assets/icon.ico' else: - sg.set_global_icon(f'{SCRIPT_DIR}/assets/icon.png') + PLATFORM_ICON_PATH = f'{SCRIPT_DIR}/assets/icon.png' + +# Apply icon globally +sg.set_global_icon(PLATFORM_ICON_PATH) CONF_TEXT_POE = ['ipTypeText', 'ipText', 'maskText', 'gatewayText', 'dnsText', 'dnsAltText', 'networkTimeoutText', 'macText'] CONF_INPUT_POE = ['staticBut', 'dynamicBut', 'ip', 'mask', 'gateway', 'dns', 'dnsAlt', 'networkTimeout', 'mac'] @@ -38,27 +43,35 @@ def PrintException(): filename = f.f_code.co_filename print('Exception in {}, line {}; {}'.format(filename, lineno, exc_obj)) -def check_ip(s: str, req = True): +def Popup(msg, window): + main_window_location = window.CurrentLocation() + main_window_size = window.size + # Calculate the centered location for the new window + centered_location = (main_window_location[0] + (main_window_size[0] - 50) // 2, + main_window_location[1] + (main_window_size[1] - 50) // 2) + return sg.Popup(msg, location=centered_location) + +def check_ip(window, s: str, req = True): if s == "": return not req spl = s.split(".") if len(spl) != 4: - sg.Popup("Wrong IP format.\nValue should be similar to 255.255.255.255") + Popup("Wrong IP format.\nValue should be similar to 255.255.255.255", window=window) return False for num in spl: if 255 < int(num): - sg.Popup("Wrong IP format.\nValues can not be above 255!") + Popup("Wrong IP format.\nValues can not be above 255!", window=window) return False return True -def check_mac(s): +def check_mac(window, s): if s.count(":") != 5: - sg.Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF") + Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF", window=window) return False for i in s.split(":"): for j in i: if j > "F" or (j < "A" and not j.isdigit()) or len(i) != 2: - sg.Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF") + Popup("Wrong MAC format.\nValue should be similar to FF:FF:FF:FF:FF:FF", window=window) return False return True @@ -113,7 +126,7 @@ def wait(self): class SelectIP: - def __init__(self): + def __init__(self, window): self.ok = False layout = [ [sg.Text("Specify the custom IP of the OAK PoE\ncamera you want to connect to")], @@ -124,15 +137,24 @@ def __init__(self): [sg.Submit(), sg.Cancel()], ] self.window = sg.Window("Specify IP", layout, size=(300,110), modal=True, finalize=True) + + main_window_location = window.CurrentLocation() + main_window_size = window.size + new_window_size = self.window.size + # Calculate the centered location for the new window + centered_location = (main_window_location[0] + (main_window_size[0] - new_window_size[0]) // 2, + main_window_location[1] + (main_window_size[1] - new_window_size[1]) // 2) + self.window.move(*centered_location) + def wait(self): event, values = self.window.Read() self.window.close() - if str(event) == "Cancel" or values is None or not check_ip(values["ip"]): + if str(event) == "Cancel" or values is None or not check_ip(self.window, values["ip"]): return False, "" return True, values["ip"] class SearchDevice: - def __init__(self): + def __init__(self, window): self.infos = [] layout = [ [sg.Text("Select an OAK camera you would like to connect to.", font=('Arial', 10, 'bold'))], @@ -156,12 +178,22 @@ def __init__(self): [sg.Button('Search', size=(15, 2), font=('Arial', 10, 'bold'))], ] self.window = sg.Window("Select Device", layout, size=(550,375), modal=True, finalize=True) + + main_window_location = window.CurrentLocation() + main_window_size = window.size + new_window_size = self.window.size + # Calculate the centered location for the new window + centered_location = (main_window_location[0] + (main_window_size[0] - new_window_size[0]) // 2, + main_window_location[1] + (main_window_size[1] - new_window_size[1]) // 2) + self.window.move(*centered_location) + self.search_devices() def search_devices(self): self.infos = dai.XLinkConnection.getAllConnectedDevices() if not self.infos: - sg.Popup("No devices found.") + pass + # sg.Popup("No devices found.") else: rows = [] for info in self.infos: @@ -257,21 +289,20 @@ def factoryReset(device: dai.DeviceInfo, type: dai.DeviceBootloader.Type): def connectAndStartStreaming(dev): - # OpenCV - if USE_OPENCV: + with dai.Device(dev) as d: # Create pipeline pipeline = dai.Pipeline() + # OpenCV + if USE_OPENCV: + camRgb = pipeline.create(dai.node.ColorCamera) + camRgb.setIspScale(1,3) + videnc = pipeline.create(dai.node.VideoEncoder) + videnc.setDefaultProfilePreset(camRgb.getFps(), videnc.Properties.Profile.MJPEG) + xout = pipeline.create(dai.node.XLinkOut) + xout.setStreamName("mjpeg") + camRgb.video.link(videnc.input) + videnc.bitstream.link(xout.input) - camRgb = pipeline.create(dai.node.ColorCamera) - camRgb.setIspScale(1,3) - videnc = pipeline.create(dai.node.VideoEncoder) - videnc.setDefaultProfilePreset(camRgb.getFps(), videnc.Properties.Profile.MJPEG) - xout = pipeline.create(dai.node.XLinkOut) - xout.setStreamName("mjpeg") - camRgb.video.link(videnc.input) - videnc.bitstream.link(xout.input) - - with dai.Device(pipeline, dev) as d: while not d.isClosed(): mjpeg = d.getOutputQueue('mjpeg').get() frame = cv2.imdecode(mjpeg.getData(), cv2.IMREAD_UNCHANGED) @@ -279,21 +310,22 @@ def connectAndStartStreaming(dev): if cv2.waitKey(1) == ord('q'): cv2.destroyWindow('Color Camera') break - else: - # Create pipeline (no opencv) - pipeline = dai.Pipeline() - camRgb = pipeline.create(dai.node.ColorCamera) - camRgb.setIspScale(1,3) - camRgb.setPreviewSize(camRgb.getIspSize()) - camRgb.setColorOrder(camRgb.Properties.ColorOrder.RGB) - - xout = pipeline.create(dai.node.XLinkOut) - xout.input.setQueueSize(2) - xout.input.setBlocking(False) - xout.setStreamName("color") - camRgb.preview.link(xout.input) - - with dai.Device(pipeline, dev) as d: + else: + camRgb = pipeline.create(dai.node.ColorCamera) + camRgb.setIspScale(1,3) + firstSensor = d.getConnectedCameraFeatures()[0] + camRgb.setPreviewSize(firstSensor.width // 3, firstSensor.height // 3) + camRgb.setColorOrder(camRgb.Properties.ColorOrder.RGB) + + xout = pipeline.create(dai.node.XLinkOut) + xout.input.setQueueSize(2) + xout.input.setBlocking(False) + xout.setStreamName("color") + camRgb.preview.link(xout.input) + + # Start pipeline + d.startPipeline(pipeline) + frame = d.getOutputQueue('color', 2, False).get() width, height = frame.getWidth(), frame.getHeight() @@ -540,7 +572,7 @@ class DeviceManager: def __init__(self) -> None: self.window = sg.Window(title="Device Manager", - icon="assets/icon.png", + icon=PLATFORM_ICON_PATH, layout=layout, size=(645, 380), finalize=True # So we can do First search for devices @@ -556,7 +588,7 @@ def isPoE(self) -> bool: return self.bl.getType() == dai.DeviceBootloader.Type.NETWORK except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', self.window) def isUsb(self) -> bool: return not self.isPoE() @@ -577,7 +609,7 @@ def run(self) -> None: device = self.device if deviceStateTxt(device.state) == "BOOTED": # device is already booted somewhere else - sg.Popup("Device is already booted somewhere else!") + Popup("Device is already booted somewhere else!", self.window) else: self.resetGui() self.bl = connectToDevice(device) @@ -588,7 +620,7 @@ def run(self) -> None: self.window.Element('progress').update("No device selected.") elif event == "Search": self.getDevices() # Re-search devices for dropdown - selDev = SearchDevice() + selDev = SearchDevice(window=self.window) di = selDev.wait() if di is not None: self.resetGui() @@ -601,7 +633,7 @@ def run(self) -> None: self.getConfigs() self.unlockConfig() elif event == "Specify IP": - select = SelectIP() + select = SelectIP(window=self.window) ok, ip = select.wait() if ok: self.resetGui() @@ -655,14 +687,14 @@ def run(self) -> None: print("Factory reset cancelled.") elif event == "Flash configuration": - self.flashConfig() - self.getConfigs() - self.resetGui() - if self.isUsb(): - self.unlockConfig() - else: - self.devices.clear() - self.window.Element('devices').update("Search for devices", values=[]) + if self.flashConfig() is not None: + self.getConfigs() + self.resetGui() + if self.isUsb(): + self.unlockConfig() + else: + self.devices.clear() + self.window.Element('devices').update("Search for devices", values=[]) elif event == "Clear configuration": self.clearConfig() self.getConfigs() @@ -677,7 +709,7 @@ def run(self) -> None: confJson = self.bl.readConfigData() sg.popup_scrolled(confJson, title='Configuration') except Exception as ex: - sg.popup(f'No existing config to view ({ex})') + Popup(f'No existing config to view ({ex})', self.window) elif event == "Flash application": file = sg.popup_get_file("Select .dap file", file_types=(('DepthAI Application Package', '*.dap'), ('All Files', '*.* *'))) @@ -685,9 +717,9 @@ def run(self) -> None: elif event == "Remove application": try: self.bl.flashClear() - sg.popup(f'Successfully removed application') + Popup(f'Successfully removed application', self.window) except Exception as ex: - sg.popup(f"Couldn't remove application ({ex})") + sg.popup(f"Couldn't remove application ({ex})", self.window) elif event.startswith("_unique_configBtn"): self.window['-COL1-'].update(visible=False) @@ -713,7 +745,7 @@ def run(self) -> None: elif event == "recoveryMode": if recoveryMode(self.bl): - sg.Popup(f'Device successfully put into USB recovery mode.') + Popup(f'Device successfully put into USB recovery mode.', self.window) # Device will reboot, close previous and reset GUI self.closeDevice() self.resetGui() @@ -872,6 +904,12 @@ def resetGui(self): self.window.Element('commit').update("-version-") self.window.Element('devState').update("-state-") + # Move back to 'About' page + self.window['-COL1-'].update(visible=True) + self.window['-COL2-'].update(visible=False) + self.window['-COL3-'].update(visible=False) + self.window['-COL4-'].update(visible=False) + def closeDevice(self): if self.bl is not None: self.bl.close() @@ -885,7 +923,7 @@ def getDevices(self): deviceInfos = dai.XLinkConnection.getAllConnectedDevices() if not deviceInfos: self.window.Element('devices').update("No devices") - sg.Popup("No devices found.") + # sg.Popup("No devices found.") else: for deviceInfo in deviceInfos: deviceTxt = deviceInfo.getMxId() @@ -896,7 +934,7 @@ def getDevices(self): self.window.Element('devices').update("Select device", values=listedDevices) except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', window=self.window) def flashConfig(self): values = self.values @@ -912,12 +950,12 @@ def flashConfig(self): try: if self.isPoE: if self.values['staticBut']: - if check_ip(values['ip']) and check_ip(values['mask']) and check_ip(values['gateway'], req=False): + if check_ip(self.window, values['ip']) and check_ip(self.window, values['mask']) and check_ip(self.window, values['gateway'], req=False): conf.setStaticIPv4(values['ip'], values['mask'], values['gateway']) else: raise Exception('IP or Mask missing using static IP configuration') else: - if check_ip(values['ip'], req=False) and check_ip(values['mask'], req=False) and check_ip(values['gateway'], req=False): + if check_ip(self.window, values['ip'], req=False) and check_ip(self.window, values['mask'], req=False) and check_ip(self.window, values['gateway'], req=False): conf.setDynamicIPv4(values['ip'], values['mask'], values['gateway']) conf.setDnsIPv4(values['dns'], values['dnsAlt']) @@ -925,9 +963,9 @@ def flashConfig(self): if int(values['networkTimeout']) >= 0: conf.setNetworkTimeout(timedelta(seconds=int(values['networkTimeout']) / 1000)) else: - sg.Popup("Values can not be negative!") + Popup("Values can not be negative!", window=self.window) if values['mac'] != "": - if check_mac(values['mac']): + if check_mac(self.window, values['mac']): conf.setMacAddress(values['mac']) else: conf.setMacAddress('00:00:00:00:00:00') @@ -936,29 +974,34 @@ def flashConfig(self): if int(values['usbTimeout']) >= 0: conf.setUsbTimeout(timedelta(seconds=int(values['usbTimeout']) / 1000)) else: - sg.Popup("Values can not be negative!") + Popup("Values can not be negative!", window=self.window) if values['usbSpeed'] != "": conf.setUsbMaxSpeed(getattr(dai.UsbSpeed, values['usbSpeed'])) success, error = self.bl.flashConfig(conf) if not success: - sg.Popup(f"Flashing failed: {error}") + Popup(f"Flashing failed: {error}", window=self.window) + return False else: - sg.Popup("Flashing successful.") + Popup("Flashing successful.", window=self.window) + return True + except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', window=self.window) + + return None def clearConfig(self): try: success, error = self.bl.flashConfigClear() if not success: - sg.Popup(f"Clearing configuration failed: {error}") + Popup(f"Clearing configuration failed: {error}", window=self.window) else: - sg.Popup("Successfully cleared configuration.") + Popup("Successfully cleared configuration.", window=self.window) except Exception as ex: PrintException() - sg.Popup(f'{ex}') + Popup(f'{ex}', window=self.window) app = DeviceManager()