Skip to content

Commit

Permalink
Merge branch 'release_2.16.0.0' into main
Browse files Browse the repository at this point in the history
  • Loading branch information
themarpe committed Jun 6, 2022
2 parents 425d01a + 4c289a5 commit e0355bc
Show file tree
Hide file tree
Showing 31 changed files with 579 additions and 137 deletions.
2 changes: 2 additions & 0 deletions .github/workflows/main.yml
Original file line number Diff line number Diff line change
Expand Up @@ -177,6 +177,7 @@ jobs:
matrix:
python-version: [3.6, 3.7, 3.8, 3.9, '3.10']
python-architecture: [x64, x86]
fail-fast: false
steps:
- name: Cache .hunter folder
uses: actions/cache@v2
Expand Down Expand Up @@ -230,6 +231,7 @@ jobs:
strategy:
matrix:
python-version: [3.6, 3.7, 3.8, 3.9, '3.10']
fail-fast: false
steps:
- name: Cache .hunter folder
uses: actions/cache@v2
Expand Down
32 changes: 30 additions & 2 deletions CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,12 @@ if(NOT WIN32)
set(HUNTER_CONFIGURATION_TYPES "Release" CACHE STRING "Hunter dependencies list of build configurations")
endif()

# Specify path separator
set(SYS_PATH_SEPARATOR ";")
if(UNIX)
set(SYS_PATH_SEPARATOR ":")
endif()

# Generate combined Hunter config
file(READ depthai-core/cmake/Hunter/config.cmake depthai_core_hunter_config)
file(READ cmake/Hunter/config.cmake hunter_config)
Expand All @@ -29,7 +35,7 @@ endif()

# Pybindings project
set(TARGET_NAME depthai)
project(depthai VERSION "1") # revision of bindings [depthai-core].[rev]
project(depthai VERSION "0") # revision of bindings [depthai-core].[rev]

# Set default build type depending on context
set(default_build_type "Release")
Expand Down Expand Up @@ -97,6 +103,23 @@ pybind11_add_module(${TARGET_NAME}
src/log/LogBindings.cpp
)

if(WIN32)
# Copy dlls to target directory - Windows only
# TARGET_RUNTIME_DLLS generator expression available since CMake 3.21
if(CMAKE_VERSION VERSION_LESS "3.21")
file(GLOB depthai_dll_libraries "${HUNTER_INSTALL_PREFIX}/bin/*.dll")
else()
set(depthai_dll_libraries "$<TARGET_RUNTIME_DLLS:${TARGET_NAME}>")
endif()
add_custom_command(TARGET ${TARGET_NAME} POST_BUILD COMMAND
${CMAKE_COMMAND} -E copy ${depthai_dll_libraries} $<TARGET_FILE_DIR:${TARGET_NAME}>
COMMAND_EXPAND_LISTS
)

# Disable "d" postfix, so python can import the library as is
set_target_properties(${TARGET_NAME} PROPERTIES DEBUG_POSTFIX "")
endif()

# Add stubs (pyi) generation step after building bindings
execute_process(COMMAND "${PYTHON_EXECUTABLE}" "-c" "from mypy import api" RESULT_VARIABLE error OUTPUT_QUIET ERROR_QUIET)
if(error)
Expand All @@ -108,7 +131,12 @@ else()
endif()
message(STATUS "Mypy available, creating and checking stubs. Running with generate_stubs.py ${TARGET_NAME} ${bindings_directory}")
add_custom_command(TARGET ${TARGET_NAME} POST_BUILD COMMAND
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/generate_stubs.py" "${TARGET_NAME}" "${bindings_directory}"
${CMAKE_COMMAND} -E env
# PATH (dlls)
"PATH=${HUNTER_INSTALL_PREFIX}/bin${SYS_PATH_SEPARATOR}$ENV{PATH}"
# Python path (to find compiled module)
"PYTHONPATH=$<TARGET_FILE_DIR:${TARGET_NAME}>${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}"
${PYTHON_EXECUTABLE} "${CMAKE_CURRENT_LIST_DIR}/generate_stubs.py" "${TARGET_NAME}" "$<TARGET_FILE_DIR:${TARGET_NAME}>"
DEPENDS "${CMAKE_CURRENT_LIST_DIR}/generate_stubs.py"
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
)
Expand Down
6 changes: 0 additions & 6 deletions ci/Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,12 +4,6 @@ RUN apt-get update && apt-get install -y wget build-essential cmake pkg-config l

ADD ci/docker_dependencies.sh .
RUN ./docker_dependencies.sh
RUN wget https://github.com/libusb/libusb/releases/download/v1.0.24/libusb-1.0.24.tar.bz2
RUN tar xf libusb-1.0.24.tar.bz2
RUN cd libusb-1.0.24 && \
./configure --disable-udev && \
make -j && make install


RUN pip install -U pip && pip install --extra-index-url https://www.piwheels.org/simple/ --prefer-binary opencv-python

Expand Down
2 changes: 1 addition & 1 deletion depthai-core
Submodule depthai-core updated 48 files
+3 −3 .github/workflows/main.workflow.yml
+3 −3 .github/workflows/test.workflow.yml
+6 −3 CMakeLists.txt
+3 −6 README.md
+2 −2 cmake/ClangFormat.cmake
+1 −1 cmake/Depthai/DepthaiDeviceSideConfig.cmake
+19 −6 cmake/Hunter/config.cmake
+7 −1 cmake/depthaiDependencies.cmake
+12 −0 cmake/toolchain/asan.cmake
+12 −0 cmake/toolchain/ubsan.cmake
+31 −3 examples/CMakeLists.txt
+88 −0 examples/IMU/imu_firmware_update.cpp
+1 −0 examples/IMU/imu_rotation_vector.cpp
+18 −0 examples/MobileNet/rgb_mobilenet.cpp
+151 −0 examples/NeuralNetwork/detection_parser.cpp
+15 −0 examples/SpatialDetection/spatial_tiny_yolo.cpp
+1 −1 examples/bootloader/bootloader_config.cpp
+1 −1 examples/bootloader/bootloader_version.cpp
+1 −1 examples/bootloader/flash_bootloader.cpp
+10 −2 include/depthai/device/DeviceBase.hpp
+2 −2 include/depthai/openvino/OpenVINO.hpp
+9 −0 include/depthai/pipeline/datatype/StereoDepthConfig.hpp
+5 −0 include/depthai/pipeline/node/DetectionNetwork.hpp
+107 −0 include/depthai/pipeline/node/DetectionParser.hpp
+6 −0 include/depthai/pipeline/node/IMU.hpp
+9 −0 include/depthai/pipeline/node/StereoDepth.hpp
+6 −0 include/depthai/pipeline/node/VideoEncoder.hpp
+1 −0 include/depthai/pipeline/nodes.hpp
+4 −1 include/depthai/utility/Initialization.hpp
+18 −6 include/depthai/xlink/XLinkConnection.hpp
+1 −1 shared/depthai-shared
+63 −32 src/device/DeviceBase.cpp
+10 −9 src/device/DeviceBootloader.cpp
+15 −3 src/openvino/OpenVINO.cpp
+5 −0 src/pipeline/datatype/StereoDepthConfig.cpp
+15 −15 src/pipeline/node/DetectionNetwork.cpp
+94 −0 src/pipeline/node/DetectionParser.cpp
+4 −0 src/pipeline/node/IMU.cpp
+12 −12 src/pipeline/node/SpatialDetectionNetwork.cpp
+4 −0 src/pipeline/node/StereoDepth.cpp
+8 −5 src/pipeline/node/VideoEncoder.cpp
+18 −3 src/utility/Initialization.cpp
+4 −3 src/utility/Resources.cpp
+199 −165 src/xlink/XLinkConnection.cpp
+42 −12 tests/CMakeLists.txt
+33 −0 tests/integration/CMakeLists.txt
+8 −0 tests/integration/src/main.cpp
+23 −0 tests/src/openvino_blob_test.cpp
2 changes: 2 additions & 0 deletions docs/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,8 @@ else()
add_custom_target(sphinx ALL
${CMAKE_COMMAND} -E env
# Environment variables
# PATH (dlls)
"PATH=${HUNTER_INSTALL_PREFIX}/bin${SYS_PATH_SEPARATOR}$ENV{PATH}"
# Python path (to find compiled module)
"PYTHONPATH=$<TARGET_FILE_DIR:${TARGET_NAME}>${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}"
# ASAN in case of sanitizers
Expand Down
9 changes: 6 additions & 3 deletions docs/source/components/bootloader.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,20 +21,23 @@ Device Manager
``device_manager.py`` is a Python helper that interfaces with device :ref:`Bootloader` and bootloader configuration.
It can be found at `depthai-python/utilities <https://github.com/luxonis/depthai-python/tree/main/utilities>`__.

.. image:: https://user-images.githubusercontent.com/18037362/170479657-faacd06d-5f7e-4215-a821-005d58a5f379.png
.. image:: https://user-images.githubusercontent.com/18037362/171629704-0f78f31a-1778-4338-8ac0-bdfb0d2d593f.png

Device Manager Usage
--------------------

**About device tab** - Select a camera to see its metadata - like MxID, flashed bootloader version, device state etc.

* First, we need to select the device using the dropdown. You can click ``Search`` to search for all available cameras, either via USB port or on LAN (PoE OAKs).
* First we have to select the device we want to connect (boot) to, you can select that using:

* **Dropdown** which contains found device MX Ids. Dropdown will only get updated when starting the app.
* **Specify IP** button if your OAK PoE camera isn't in the same LAN.
* **Search** feature - a new window will show that has a table with all available cameras (either via USB port or on LAN - OAK PoEs), their MxId, name, and status. Clicking on a table row will select that device and boot to it.
* ``Flash newest Bootloader`` button will flash the ``newest bootloader`` to the device. You can select AUTO, USB or NETWORK bootloader.

* **AUTO** will select the connection type of bootloader with which the camera is currently connected to. If you are connected via USB (doing factory reset) to an OAK PoE camera, you shouldn't select AUTO, as it will flash USB bootloader.
* **USB** bootloader will try to boot the application that is stored on flash memory. If it can't find flashed application, it will just behave as normal USB OAK - so it will wait until a host computer initializes the application.
* **NETWORK** bootloader is used by the OAK PoE cameras, and is flashed at the factory. It handles network initialization so the OAK PoE cameras can be booted through the LAN.

* ``Factory reset`` will erase the whole flash content and re-flash it with only the USB or NETWORK bootloader. Flashed application (pipeline, assets) and bootloader configurations will be lost.
* ``Boot into USB recovery mode`` will force eg. OAK PoE camera to be available through the USB connector, even if its boot pins are set to PoE booting. It is mostly used by our firmware developers.

Expand Down
14 changes: 14 additions & 0 deletions docs/source/components/device.rst
Original file line number Diff line number Diff line change
Expand Up @@ -40,6 +40,20 @@ When you create the device in the code, firmware is uploaded together with the p
cfg = depthai.ImageManipConfig()
input_q.send(cfg)
Connect to specified device
###########################

If you have multiple devices and only want to connect to a specific one, or if your OAK PoE camera is outside of your
subnet, you can specify the device (either with MxID, IP, or USB port name) you want to connect to.

.. code-block:: python
# Specify MXID, IP Address or USB path
device_info = depthai.DeviceInfo("14442C108144F1D000") # MXID
#device_info = depthai.DeviceInfo("192.168.1.44") # IP Address
#device_info = depthai.DeviceInfo("3.3.3") # USB port name
with depthai.Device(pipeline, device_info) as device:
# ...
Multiple devices
################
Expand Down
6 changes: 6 additions & 0 deletions examples/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -33,6 +33,8 @@ function(add_python_example example_name python_script_path)
add_custom_target(${example_name}
${CMAKE_COMMAND} -E env
# Environment variables
# PATH (dlls)
"PATH=${HUNTER_INSTALL_PREFIX}/bin${SYS_PATH_SEPARATOR}$ENV{PATH}"
# Python path (to find compiled module)
"PYTHONPATH=$<TARGET_FILE_DIR:${TARGET_NAME}>${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}"
# ASAN in case of sanitizers
Expand All @@ -49,6 +51,8 @@ function(add_python_example example_name python_script_path)
# Adds test with 5 seconds timeout and bumps all python warnings to errors
add_test(NAME ${example_name} COMMAND
${CMAKE_COMMAND} -E env
# PATH (dlls)
"PATH=${HUNTER_INSTALL_PREFIX}/bin${SYS_PATH_SEPARATOR}$ENV{PATH}"
# Python path (to find compiled module)
"PYTHONPATH=$<TARGET_FILE_DIR:${TARGET_NAME}>${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}"
# ASAN in case of sanitizers
Expand All @@ -70,6 +74,8 @@ if(DEPTHAI_PYTHON_TEST_EXAMPLES)
# Adds install requirements test with 5 minute timeout
add_test(NAME install_requirements COMMAND
${CMAKE_COMMAND} -E env
# PATH (dlls)
"PATH=${HUNTER_INSTALL_PREFIX}/bin${SYS_PATH_SEPARATOR}$ENV{PATH}"
# Python path (to find compiled module)
"PYTHONPATH=$<TARGET_FILE_DIR:${TARGET_NAME}>${SYS_PATH_SEPARATOR}$ENV{PYTHONPATH}"
# ASAN in case of sanitizers
Expand Down
74 changes: 74 additions & 0 deletions examples/IMU/imu_firmware_update.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
#!/usr/bin/env python3

import cv2
import depthai as dai
import time
import math

print("Warning! Flashing IMU firmware can potentially soft brick your device and should be done with caution.")
print("Do not unplug your device while the IMU firmware is flashing.")
print("Type 'y' and press enter to proceed, otherwise exits: ")
if input() != 'y':
print("Prompt declined, exiting...")
exit(-1)

# Create pipeline
pipeline = dai.Pipeline()

# Define sources and outputs
imu = pipeline.create(dai.node.IMU)
xlinkOut = pipeline.create(dai.node.XLinkOut)

xlinkOut.setStreamName("imu")

# enable ACCELEROMETER_RAW at 500 hz rate
imu.enableIMUSensor(dai.IMUSensor.ACCELEROMETER_RAW, 500)
# enable GYROSCOPE_RAW at 400 hz rate
imu.enableIMUSensor(dai.IMUSensor.GYROSCOPE_RAW, 400)
# it's recommended to set both setBatchReportThreshold and setMaxBatchReports to 20 when integrating in a pipeline with a lot of input/output connections
# above this threshold packets will be sent in batch of X, if the host is not blocked and USB bandwidth is available
imu.setBatchReportThreshold(1)
# maximum number of IMU packets in a batch, if it's reached device will block sending until host can receive it
# if lower or equal to batchReportThreshold then the sending is always blocking on device
# useful to reduce device's CPU load and number of lost packets, if CPU load is high on device side due to multiple nodes
imu.setMaxBatchReports(10)

# Link plugins IMU -> XLINK
imu.out.link(xlinkOut.input)

imu.enableFirmwareUpdate(True)

# Pipeline is defined, now we can connect to the device
with dai.Device(pipeline) as device:

def timeDeltaToMilliS(delta) -> float:
return delta.total_seconds()*1000

# Output queue for imu bulk packets
imuQueue = device.getOutputQueue(name="imu", maxSize=50, blocking=False)
baseTs = None
while True:
imuData = imuQueue.get() # blocking call, will wait until a new data has arrived

imuPackets = imuData.packets
for imuPacket in imuPackets:
acceleroValues = imuPacket.acceleroMeter
gyroValues = imuPacket.gyroscope

acceleroTs = acceleroValues.timestamp.get()
gyroTs = gyroValues.timestamp.get()
if baseTs is None:
baseTs = acceleroTs if acceleroTs < gyroTs else gyroTs
acceleroTs = timeDeltaToMilliS(acceleroTs - baseTs)
gyroTs = timeDeltaToMilliS(gyroTs - baseTs)

imuF = "{:.06f}"
tsF = "{:.03f}"

print(f"Accelerometer timestamp: {tsF.format(acceleroTs)} ms")
print(f"Accelerometer [m/s^2]: x: {imuF.format(acceleroValues.x)} y: {imuF.format(acceleroValues.y)} z: {imuF.format(acceleroValues.z)}")
print(f"Gyroscope timestamp: {tsF.format(gyroTs)} ms")
print(f"Gyroscope [rad/s]: x: {imuF.format(gyroValues.x)} y: {imuF.format(gyroValues.y)} z: {imuF.format(gyroValues.z)} ")

if cv2.waitKey(1) == ord('q'):
break
1 change: 1 addition & 0 deletions examples/IMU/imu_rotation_vector.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@

# enable ROTATION_VECTOR at 400 hz rate
imu.enableIMUSensor(dai.IMUSensor.ROTATION_VECTOR, 400)
# it's recommended to set both setBatchReportThreshold and setMaxBatchReports to 20 when integrating in a pipeline with a lot of input/output connections
# above this threshold packets will be sent in batch of X, if the host is not blocked and USB bandwidth is available
imu.setBatchReportThreshold(1)
# maximum number of IMU packets in a batch, if it's reached device will block sending until host can receive it
Expand Down
15 changes: 15 additions & 0 deletions examples/MobileNet/rgb_mobilenet.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,11 @@
nn = pipeline.create(dai.node.MobileNetDetectionNetwork)
xoutRgb = pipeline.create(dai.node.XLinkOut)
nnOut = pipeline.create(dai.node.XLinkOut)
nnNetworkOut = pipeline.create(dai.node.XLinkOut)

xoutRgb.setStreamName("rgb")
nnOut.setStreamName("nn")
nnNetworkOut.setStreamName("nnNetwork");

# Properties
camRgb.setPreviewSize(300, 300)
Expand All @@ -51,13 +53,15 @@

camRgb.preview.link(nn.input)
nn.out.link(nnOut.input)
nn.outNetwork.link(nnNetworkOut.input);

# Connect to device and start pipeline
with dai.Device(pipeline) as device:

# Output queues will be used to get the rgb frames and nn data from the outputs defined above
qRgb = device.getOutputQueue(name="rgb", maxSize=4, blocking=False)
qDet = device.getOutputQueue(name="nn", maxSize=4, blocking=False)
qNN = device.getOutputQueue(name="nnNetwork", maxSize=4, blocking=False);

frame = None
detections = []
Expand All @@ -81,15 +85,19 @@ def displayFrame(name, frame):
# Show the frame
cv2.imshow(name, frame)

printOutputLayersOnce = True

while True:
if args.sync:
# Use blocking get() call to catch frame and inference result synced
inRgb = qRgb.get()
inDet = qDet.get()
inNN = qNN.get()
else:
# Instead of get (blocking), we use tryGet (non-blocking) which will return the available data or None otherwise
inRgb = qRgb.tryGet()
inDet = qDet.tryGet()
inNN = qNN.tryGet()

if inRgb is not None:
frame = inRgb.getCvFrame()
Expand All @@ -100,6 +108,13 @@ def displayFrame(name, frame):
detections = inDet.detections
counter += 1

if printOutputLayersOnce and inNN is not None:
toPrint = 'Output layer names:'
for ten in inNN.getAllLayerNames():
toPrint = f'{toPrint} {ten},'
print(toPrint)
printOutputLayersOnce = False;

# If the frame is available, draw bounding boxes on it and show the frame
if frame is not None:
displayFrame("rgb", frame)
Expand Down
Loading

0 comments on commit e0355bc

Please sign in to comment.