From 024facf72b157d20ae572a4ad75540b6ea261ec0 Mon Sep 17 00:00:00 2001 From: quic-khrahul <131336334+quic-khrahul@users.noreply.github.com> Date: Fri, 28 Apr 2023 13:49:25 +0530 Subject: [PATCH 1/4] People Intrusion Detection LU.UM.1.1 Signed-off-by: quic-khrahul <131336334+quic-khrahul@users.noreply.github.com> --- .../People-Intrusion-Detection/README.md | 256 ++++++ .../docs/GettingStarted.md | 51 ++ .../docs/Install.md | 116 +++ .../model/coco_labels.txt | 80 ++ .../model/inputlist.txt | 2 + .../model/mle_snpeyolov5m_quant.config | 22 + .../model/mle_snpeyolov5n_quant.config | 17 + .../src/gst-plugin-mle/CMakeLists.txt | 69 ++ .../src/gst-plugin-mle/config.h.in | 34 + .../gst-plugin-mle/mle_engine/CMakeLists.txt | 39 + .../gst-plugin-mle/mle_engine/common_utils.h | 290 ++++++ .../mle_engine/ml_engine_impl.cc | 796 ++++++++++++++++ .../mle_engine/ml_engine_intf.h | 305 +++++++ .../gst-plugin-mle/mle_engine/snpe_base.cc | 421 +++++++++ .../src/gst-plugin-mle/mle_engine/snpe_base.h | 121 +++ .../mle_engine/snpe_detection.cc | 154 ++++ .../mle_engine/snpe_detection.h | 43 + .../mle_engine/snpe_yolodetection.cc | 308 +++++++ .../mle_engine/snpe_yolodetection.h | 58 ++ .../mle_gst_snpe/CMakeLists.txt | 33 + .../gst-plugin-mle/mle_gst_snpe/mle_snpe.cc | 862 ++++++++++++++++++ .../gst-plugin-mle/mle_gst_snpe/mle_snpe.h | 95 ++ .../mle_gst_snpe/mle_snpeyolov5n.config | 17 + .../include/gst/video/c2d-video-converter.h | 210 +++++ .../src/include/gst/video/gstimagepool.h | 88 ++ .../src/include/ml-meta/ml_meta.h | 365 ++++++++ 26 files changed, 4852 insertions(+) create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/GettingStarted.md create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/Install.md create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/coco_labels.txt create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/inputlist.txt create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5m_quant.config create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5n_quant.config create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/CMakeLists.txt create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/config.h.in create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/CMakeLists.txt create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/common_utils.h create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_impl.cc create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_intf.h create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.cc create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.h create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.cc create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.h create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.h create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/c2d-video-converter.h create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/gstimagepool.h create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/ml-meta/ml_meta.h diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md new file mode 100644 index 0000000..deea678 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md @@ -0,0 +1,256 @@ +# People Intrusion Detection using qtimlesnpe plugin and Gstreamer framework using YoloV5 Model +This project is designed to use Gstreamer pipeline for People Intrusion Detection on Qualcomm® Robotics RB5 development kit with a USB camera. It will help customer to Identify if a person appears in a restricted area by specifying a detection area, the algorithm can automatically and accurately analyze real-time video and detect if a person enters the area. Solution will reduce the manual monitoring cost. + +# Objective + +The main objective of this project is to start artificial intelligence (AI) application development for robotics using the Qualcomm Robotics RB5 development kit. This project uses Gstreamer Framework It will walk you through the following steps: + +1. Download the RB5 Ubuntu platform source code + +2. Make modifications in qtimlesnpe plugin for the post processing. + +3. Push the Gstreamer qtimlesnpe plugin libraries to the device. + +4. Run the Gstreamer pipeline using SNPE engine for AI inteferencing on Qualcomm Robotics RB5 development kit. + +5. Device need to be connected to active internet connection for installation of packages. + +# Prerequisite +1. RB5 Introduction Guide: + * https://github.com/quic/sample-apps-for-robotics-platforms/tree/master/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/GettingStarted.md +2. The Qualcomm Neural Processing SDK provides tools for model conversion(onnx to dlc), model quantization and execution. +Refer the steps given in the detailned documentation in the SDK for installation. + * https://developer.qualcomm.com/software/qualcomm-neural-processing-sdk +3. ADB to access the device using command prompt and push/pull the files from device. + * https://developer.android.com/studio/command-line/adb + +# Materials Required / Parts List / Tools + +1. An Ubuntu 18.04 PC +2. Qualcomm Robotics RB5 Development kit: + * https://developer.qualcomm.com/qualcomm-robotics-rb5-kit +4. A USB camera +5. A display monitor + +![image](https://github.qualcomm.com/storage/user/12959/files/2d85cec3-913e-43af-9a06-e539690bb30c) + +# Environment Setup to download Yolov5 Model: + +Python Requirements: +* python 3.6.9 +* onnx==1.6.0 +* onnx-simplifier==0.2.6 +* onnxoptimizer==0.2.6 +* onnxruntime==1.1.0 +* numpy==1.16.5 +* protobuf==3.17.3 +* torch==1.10.0 +* torchvision==0.11.1 + +```console +sudo apt install python3.6 python3.6-venv build-essential make python3-dev python3.6-dev +``` +```console +git clone https://github.com/ultralytics/yolov5.git +``` +```console +cd yolov5 +``` +```console +git checkout v6.0 +``` +```console +python3.6 -m pip install --upgrade pip +``` +```console +python3.6 -m pip install -r requirements.txt +``` +```console +python3.6 -m pip install coremltools>=4.1 onnx==1.6.0 scikit-learn==0.19.2 onnxruntime==1.1.0 onnx-simplifier==0.2.6 onnxoptimizer==0.2.6 +``` + +### Export YoloV5m to ONNX: +```console +python3.6 export.py --weights yolov5m.pt --optimize --opset 11 --simplify --include onnx +``` + +## Setup SNPE +SNPE Setup is needed to convert the onnx model to quantized dlc, please follow the instructions for setting up Neural Processing SDK using the link provided. Please use same version of SNPE throughout the demo. +https://developer.qualcomm.com/docs/snpe/setup.html +```console +export SNPE_ROOT=/snpe-1.68.0.3932 +``` +Find onnx installation path from pip +```console +python3.6 -m pip show onnx +``` +Look for 'Location:' line in output of above command +```console +export ONNX_DIR=/onnx +``` +Setup onnx environment for snpe +```console +cd $SNPE_ROOT +source bin/envsetup.sh -o $ONNX_DIR +``` +SNPE currently does not support 5D operator. It requires specify output nodes before 5D Reshape in convert command. The output nodes can be checked in the https://netron.app/. + +To check the output layer nodes, Open the model in the Netron app and click on Conv layer. +![image](https://github.qualcomm.com/storage/user/12959/files/2aa3e4af-6518-43df-a59b-c29130644554) + +In attached yolov5m.onnx, the output nodes before 5D is onnx::443 (Conv_271), 496 (Conv_305) and 549 (Conv_339) + +![image](https://github.qualcomm.com/storage/user/12959/files/e8f4be0d-06c5-4f16-ac58-83e9795315a0) + +## This implementation does below functions: +* anchorBoxProcess: +Get raw data from out nodes before 5D (Conv_271, Conv_305, Conv_339), convert to meaning data (scores, class, bounding boxes). +* doNMS: (non-max suppression): remove overlap boxes +* ShowDetectionOverlay: Overlay detection result at output video/Image + +## Convert Onnx Model to DLC +### Convert to DLC +```console +snpe-onnx-to-dlc -i yolov5m.onnx --out_node --out_node --out_node +``` +Example corresponding to screenshot above: +```console +snpe-onnx-to-dlc -i yolov5m.onnx --out_node 443 --out_node 496 --out_node 549 +``` + +### Use below command to generate the Quantized model for AIP/DSP. Use the same SNPE SDK version which is installed in device. +Please take reference from inputlist.txt file given in model directory. Create your own inputlist.txt file as per your yolov5m.onnx model. We need to update all the output names in inputlist.txt +```console +snpe-dlc-quantize --input_dlc=yolov5m.dlc --input_list=inputlist.txt --output_dlc=yolov5m_quant.dlc --enable_hta +``` + +input.raw file is needed for model quantization. Create a sample input.raw file using below python command. +```console +import numpy as np +((np.random.random((1,3,640,640)).astype(np.float32))).tofile("input.raw") +``` + +* Please read https://developer.qualcomm.com/sites/default/files/docs/snpe/quantized_models.html to know more about model quantization. + +# Source Code / Source Examples / Application Executable: + +Below are the resources used for the demo application. It include qtimlesnpe plugin source code, YoloV5 dlc, config file and label file.. + +## 1. qtimlesnpe pluing source code: +Please refer the source code for the changes done for YoloV5, Different model may need different post processing steps. + * https://github.com/quic/sample-apps-for-robotics-platforms/tree/master/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src + +## 2. Demo dlc file used in this demo +Download the YoloV5 model from the Yolo official repository. Convert the model to dlc using snpe-onnx-to-dlc and quantize it using snpe-dlc-quantize. + +## 3. Demo config file +Config file is used to provide model information to qtimlesnpe plugin. Make changes in the config file to any changes in execution. +Update the model layer information, label and dlc path in the configuration file. + * https://github.com/quic/sample-apps-for-robotics-platforms/tree/master/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5m_quant.config + +![image](https://github.qualcomm.com/storage/user/12959/files/a6c327d3-69fd-4fac-b863-0412f6b1dc0f) + + +#### To define the camera FOV, there is need to set restricted area. It can be set by change the x,y, width and height. These dimensions depends on the camera resolution so it need to be set accordingly. +* x_axis = 100 +* y_axis = 100 +* width = 400 +* height = 400 +If restricted area not configured, model will perform detection on complete framebuffer. + +## 4. Label file for YoloV5 model +YoloV5 was trained on coco dataset, it uses coco labels. +* https://github.com/quic/sample-apps-for-robotics-platforms/tree/master/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/coco_labels.txt + +# Build / Assembly Instructions: + +1. Hardware set up: + +a. Connect the Qualcomm Robotics RB5 development kit to the monitor through HDMI cable. +b. Plugin a keyboard and a mouse to the development board. +c. Connect the USB camera module to the development board. + +# Gstreamer Pipeline +![image](https://github.qualcomm.com/storage/user/12959/files/6e7ac490-408d-41b2-a633-711b1d01f914) + +# Detailed flow diagram +![image](https://github.qualcomm.com/storage/user/12959/files/0ffa0c7c-32cb-4c68-9dbe-0114f8029e98) + +# Steps to Run Demo RB5 Board: + +## Push required Demo files to device: + +1. Prepare Directory to run the model +```console +adb shell "mkdir /data/misc/camera" +``` + +2. Push model, config and label file to the device: +```console +adb push yolov5m_quant.dlc /data/misc/camera +``` +```console +adb push mle_snpeyolov5m_quant.config /data/misc/camera +``` +```console +adb push coco_labels.txt /data/misc/camera +``` + +3. Compile and Install qtimlesnpe libraries on the target: + + Please refer the steps given in the below document to download, compile and install qtimlesnpe gstreamer plugin libraries. + * https://github.com/quic/sample-apps-for-robotics-platforms/tree/master/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/Install.md + +# Run the model +## Do step 4-7 to display the live stream on laptop + +4. Run adb forward command in command prompt +```console +adb forward tcp:8900 tcp:8900 +``` + +5. Run rtsp server +```console +adb shell +``` +```console +gst-rtsp-server -p 8900 -m /live "( udpsrc name=pay0 port=8554 caps=\"application/x-rtp,media=video,clock-rate=90000,encoding-name=H264,payload=96\" )" +``` + +6. Run Gstreamer Pipeline from different shell +```console +adb shell +``` +```console +gst-launch-1.0 qtiqmmfsrc ! video/x-raw\(memory:GBM\), format=NV12, width=1280, height=720, framerate=30/1 ! queue ! qtimlesnpe config=/data/misc/camera/mle_snpeyolov5m_quant.config postprocessing=yolov5detection ! queue ! qtioverlay bbox-color=0xFF0000FF ! queue ! omxh264enc target-bitrate=6000000 periodicity-idr=1 interval-intraframes=29 control-rate=max-bitrate ! queue ! h264parse config-interval=-1 ! rtph264pay pt=96 ! udpsink host=127.0.0.1 port=8554 +``` + +7. Connect the device to laptop via USB. Install VLC player and run “Open Network Stream” with rtsp://127.0.0.1:8900/live + +## Do step 8-9 to display the live stream on external monitor using hdmi cable + +8. Connect the device with the monitor using hdmi cable. Reboot the device. + +9. Steps to Run the Pipeline +```console +adb shell +``` +```console +export XDG_RUNTIME_DIR="/usr/bin/weston_socket" +``` +```console +mkdir -p $XDG_RUNTIME_DIR +``` +```console +chmod 0700 $XDG_RUNTIME_DIR +``` +```console +/usr/bin/weston --tty=1 --connector=29 & +``` +```console +gst-launch-1.0 qtiqmmfsrc ! video/x-raw\(memory:GBM\), format=NV12, width=1280, height=720, framerate=30/1 ! queue ! qtimlesnpe config=/data/misc/camera/mle_snpeyolov5m_quant.config postprocessing=yolov5detection ! queue ! qtioverlay bbox-color=0xFF0000FF ! waylandsink width=1920 height=1080 async=true sync=false enable-last-sample=false +``` +Add GST_DEBUG=qtiml*:5 in gstreamer launch command if you want to enable debug logs for qtimlesnpe plugin. + +### Demo +![image](https://github.qualcomm.com/storage/user/12959/files/d71791e7-db2c-496b-8c8c-6ec9942ce506) diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/GettingStarted.md b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/GettingStarted.md new file mode 100644 index 0000000..3ded3ae --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/GettingStarted.md @@ -0,0 +1,51 @@ +# Qualcomm® Robotics RB5 Development Kit + +![image](https://github.qualcomm.com/storage/user/12959/files/fe1d637f-0fb6-40a1-90e2-88f29693f0bd) + +Based on the Qualcomm® QRB5165 Robotics SoC, the Qualcomm® Robotics RB5 Development kit contains a robotics-focused development board and compliant with the 96Boards open hardware specification which supports a broad range of mezzanine-board expansions for rapid prototyping. + +* Contains advanced robotics platform Qualcomm® QRB5165 processor. +* Supports widely used Linux based distributions for robotics applications. +* Supports multiple SDKs and tools, including Qualcomm® Neural Processing SDK for AI, Qualcomm® Robotics Vision SDK, Qualcomm® Computer Vision SDK, Qualcomm® Hexagon™ DSP SDK, Robotics Operating System (ROS) 2, and multiple Linux distributions. +* Comprehensive set of demo applications and tutorials to accelerate development of robotics applications. +* Compliants with the 96Board, support for sensors such as multiple cameras, depth sensing solution, GMSL sensor, Ultrasonic Time-of-Flight Sensor with Extended Range, multi-mic and additional sensors like IMU, pressure sensor, magnetometer etc. +* Multiple interfaces and I/Os which can connect multiple sensors. + +# Product Overview +https://github.qualcomm.com/storage/user/12959/files/bacf2d2c-ad46-4e19-b0e9-a96b3a31013f + +# Unboxing Video +https://github.qualcomm.com/storage/user/12959/files/bf69579a-54c0-4112-b9e3-8fab92c025dd + +# Order Here +https://www.thundercomm.com/product/qualcomm-robotics-rb5-development-kit/#overview + +# Documents +## Quick Start Guide +https://developer.qualcomm.com/qualcomm-robotics-rb5-kit/quick-start-guide + +## Depth Camera Hardware User Guide +### Data Sheet +https://dev.intelrealsense.com/docs/datasheets + +### Video Tutorial +https://www.intelrealsense.com/videos-and-tutorials/ + +### Hardware Reference guide +https://developer.qualcomm.com/qualcomm-robotics-rb5-kit/hardware-reference-guide + +### Software Reference Mnaual +https://developer.qualcomm.com/qualcomm-robotics-rb5-kit/software-reference-manual + +### SDK Manager +Everything You Need to Generate System Image +RB5 SDK Manager provides an end-to-end image generation/downloading solution for developers to work with RB5 devices. +https://www.thundercomm.com/product/qualcomm-robotics-rb5-development-kit/#sdk-manager + +https://github.qualcomm.com/storage/user/12959/files/1825a6e3-7fe0-4bb9-bb09-3a205233ec2c + +## Learning Resources +https://developer.qualcomm.com/qualcomm-robotics-rb5-kit/learning-resources + +## Sample apps +https://github.com/quic/sample-apps-for-Qualcomm-Robotics-RB5-platform diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/Install.md b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/Install.md new file mode 100644 index 0000000..cc00297 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/Install.md @@ -0,0 +1,116 @@ +## Please follow the steps to compile and deploy the qtimlesnpe plugin on target device + +#### 1. Download SNPE SDK v1.68.0 from here: + +https://developer.qualcomm.com/downloads/qualcomm-neural-processing-sdk-ai-v1680?referrer=node/34505 + +#### 2. Please push the SNPE libraries extracted from SNPE sdk into the requisite folder . +```console +cd {}\snpe-1.68.0\snpe-1.68.0.3932\ +``` +```console +adb push lib\aarch64-ubuntu-gcc7.5\. /usr/lib/ +``` +```console +adb push lib\aarch64-ubuntu-gcc7.5\libsnpe_dsp_domains_v2.so /usr/lib/rfsa/adsp/ +``` +```console +adb push lib\dsp\. /usr/lib/rfsa/adsp/ +``` +```console +adb push bin\aarch64-ubuntu-gcc7.5\snpe-net-run /usr/bin/ +``` + +Verify the snpe version: +```console +adb shell +``` +```console +chmod +x /usr/bin/snpe-net-run +``` +```console +snpe-net-run --version +``` +Check if it matches with the SDK version. If not matches, then please push the libraries again and verify. + +#### 3. Download and install fastcv sdk +##### Install Java on host +```console +apt install default-jdk +``` +##### Download and Install fastcv sdk on host (Linux). +* https://developer.qualcomm.com/downloads/qualcomm-computer-vision-sdk-v171-linux-embedded-linux-installer?referrer=node/7332 +* chmod +x fastcv-installer-linuxembedded-1-7-1.bin +* ./fastcv-installer-linuxembedded-1-7-1.bin + +If using Windows, Please download the Windows package from the website. +https://developer.qualcomm.com/software/qualcomm-computer-vision-sdk/tools + +##### Push the fastcv package to device +```console +adb push /root/DEVELOPMENT/ +``` + +#### 4. Download and Extract the snpe sdk zip file on your target machine mentioned in step 1. + +#### 5. adb shell [Please do this only once] +```console +apt-get install cmake +``` +```console +mkdir -p ~/DEVELOPMENT/ +``` +```console +cd /root/DEVELOPMENT/ +``` +```console +git clone https://github.com/quic/sample-apps-for-robotics-platforms.git +``` +```console +cd sample-apps-for-robotics-platforms/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle +``` +```console +mkdir build +``` +```console +cd build +``` + +#### 6. Top Level structure for your reference on your RB5. +```console + /root/DEVELOPMENT/gst-plugin-mle + | + |---CMakeLists.txt + |---mle_engine + |---mle_gst_snpe + |---build +``` + +#### 7. Now you can perform the on-device compilation +```console +cmake -DCMAKE_VERBOSE_MAKEFILE=1 -DSNPE_ENABLE:BOOL=ON -DSNPE_SDK_BASE_DIR=/snpe-1.68.0.3932 -DFASTCV_SDK_DIR= .. +``` +```console +make +``` +```console +make install +``` +#### 8. Verify if qtimlesnpe plugin loading successfully +```console +gst-inspect-1.0 qtimlesnpe + +Follow futher steps in README.md to execute the applicaion. + +``` +Try Troubleshooting steps if it fails to load. + +### Troubleshooting +##### 1. Delete the gstreamer cache +```console +rm -fr ~/.cache/gstreamer-1.0/registry.aarch64.bin +``` + +##### 2. Try again +1. Check if you are using same SNPE SDK for pushing and compiling the libraries. +2. Repeat the steps again diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/coco_labels.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/coco_labels.txt new file mode 100644 index 0000000..0b38efe --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/coco_labels.txt @@ -0,0 +1,80 @@ +1 person +2 bicycle +3 car +4 motorcycle +5 airplane +6 bus +7 train +8 truck +9 boat +10 traffic light +11 fire hydrant +13 stop sign +14 parking meter +15 bench +16 bird +17 cat +18 dog +19 horse +20 sheep +21 cow +22 elephant +23 bear +24 zebra +25 giraffe +27 backpack +28 umbrella +31 handbag +32 tie +33 suitcase +34 frisbee +35 skis +36 snowboard +37 sports ball +38 kite +39 baseball bat +40 baseball glove +41 skateboard +42 surfboard +43 tennis racket +44 bottle +46 wine glass +47 cup +48 fork +49 knife +50 spoon +51 bowl +52 banana +53 apple +54 sandwich +55 orange +56 broccoli +57 carrot +58 hot dog +59 pizza +60 donut +61 cake +62 chair +63 couch +64 potted plant +65 bed +67 dining table +70 toilet +72 tv +73 laptop +74 mouse +75 remote +76 keyboard +77 cell phone +78 microwave +79 oven +80 toaster +81 sink +82 refrigerator +84 book +85 clock +86 vase +87 scissors +88 teddy bear +89 hair drier +90 toothbrush \ No newline at end of file diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/inputlist.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/inputlist.txt new file mode 100644 index 0000000..3b1227b --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/inputlist.txt @@ -0,0 +1,2 @@ +%443 496 549 +input.raw diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5m_quant.config b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5m_quant.config new file mode 100644 index 0000000..a18c3eb --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5m_quant.config @@ -0,0 +1,22 @@ +org.codeaurora.mle.snpe +input_format = 3 +BlueMean = 0.0 +GreenMean = 0.0 +RedMean = 0.0 +BlueSigma = 255.0 +GreenSigma = 255.0 +RedSigma = 255.0 +UseNorm = true +x_axis = 500 +y_axis = 100 +width = 500 +height = 500 +preprocess_type = 1 +confidence_threshold = 0.4 +nms_threshold = 0.5 +max_detection_result = 10 +output_layers = < "Conv_271", "Conv_305", "Conv_339" > +runtime = 1 +model = "/data/misc/camera/yolov5m_quant.dlc" +labels = "/data/misc/camera/coco_labels.txt" + diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5n_quant.config b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5n_quant.config new file mode 100644 index 0000000..adaef57 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5n_quant.config @@ -0,0 +1,17 @@ +org.codeaurora.mle.snpe +input_format = 3 +BlueMean = 0.0 +GreenMean = 0.0 +RedMean = 0.0 +BlueSigma = 255.0 +GreenSigma = 255.0 +RedSigma = 255.0 +UseNorm = true +preprocess_type = 1 +confidence_threshold = 0.4 +nms_threshold = 0.5 +max_detection_result = 10 +output_layers = < "Conv_198", "Conv_247", "Conv_296" > +runtime = 1 +model = "/data/misc/camera/yolov5n_quant.dlc" +labels = "/data/misc/camera/coco_labels.txt" diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/CMakeLists.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/CMakeLists.txt new file mode 100644 index 0000000..9017572 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/CMakeLists.txt @@ -0,0 +1,69 @@ +cmake_minimum_required(VERSION 3.8.2) + +EXECUTE_PROCESS( COMMAND uname -m COMMAND tr -d '\n' OUTPUT_VARIABLE HOST_ARCHITECTURE ) +if (HOST_ARCHITECTURE MATCHES "(x86)|(X86)|(amd64)|(AMD64)") + message( STATUS "Building for aarch64 on ${HOST_ARCHITECTURE}") + + set(CMAKE_SYSTEM_NAME Linux) + set(CMAKE_SYSTEM_PROCESSOR aarch64) + + set(CMAKE_FIND_ROOT_PATH /usr/aarch64-linux-gnu) + + set(CMAKE_C_COMPILER aarch64-linux-gnu-gcc) + set(CMAKE_CXX_COMPILER aarch64-linux-gnu-g++) + + set(CMAKE_FIND_ROOT_PATH_MODE_PROGRAM NEVER) + set(CMAKE_FIND_ROOT_PATH_MODE_LIBRARY ONLY) + set(CMAKE_FIND_ROOT_PATH_MODE_INCLUDE ONLY) + set(CMAKE_FIND_ROOT_PATH_MODE_PACKAGE ONLY) +else() + set(SYSROOT_INCDIR /usr/include) + set(SYSROOT_LIBDIR /usr/lib) + set(SYSROOT_AARCH_LIBDIR /usr/lib/aarch64-linux-gnu) +endif () + +project(GST_PLUGIN_QTI_OSS_MLE + VERSION 1.0 + LANGUAGES C CXX +) + +if(NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Release) +endif() + +set(CMAKE_INCLUDE_CURRENT_DIR ON) +set(SNPE_INCDIR ${SNPE_SDK_BASE_DIR}/include/zdl) +set(SNPE_LIBDIR ${SNPE_SDK_BASE_DIR}/lib/aarch64-ubuntu-gcc7.5) +set(GST_PLUGIN_INCDIR ${CMAKE_CURRENT_SOURCE_DIR}/../include) + +include_directories(${SYSROOT_INCDIR} ${FASTCV_SDK_DIR} ${SNPE_INCDIR}) +link_directories(${SYSROOT_LIBDIR} ${SYSROOT_AARCH_LIBDIR} ${SNPE_LIBDIR}) + +# Generate configuration header file. +configure_file(config.h.in config.h @ONLY) +include_directories(${CMAKE_CURRENT_BINARY_DIR}) +include_directories(${GST_PLUGIN_INCDIR}) + +# Precompiler definitions. +add_definitions(-DHAVE_CONFIG_H) +if (DELEGATE_SUPPORT) +add_definitions(-DDELEGATE_SUPPORT) +endif() + +find_package(PkgConfig) +pkg_search_module(GLIB REQUIRED glib-2.0) +pkg_check_modules(GST REQUIRED gstreamer-1.0) +pkg_check_modules(GST_ALLOC REQUIRED gstreamer-allocators-1.0) +pkg_check_modules(GST_VIDEO REQUIRED gstreamer-video-1.0) + +# Common compiler flags. +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-error=maybe-uninitialized -Wno-error=unused-result -Wno-error=format= -Wformat -Wformat-security -Wall -Wextra -Werror -fPIC -MD -MT") +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Wno-terminate") +set(CMAKE_CXX_FLAGS_DEBUG "-g") +set(CMAKE_CXX_FLAGS_RELEASE "-g -O2") + +add_subdirectory(mle_engine) +add_subdirectory(mle_gst_snpe) + + diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/config.h.in b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/config.h.in new file mode 100644 index 0000000..c960b54 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/config.h.in @@ -0,0 +1,34 @@ +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#define PACKAGE "gstreamer1.0-plugins-qti-oss-mle" +#define PACKAGE_VERSION "1.0" +#define PACKAGE_LICENSE "BSD" +#define PACKAGE_SUMMARY "QTI open-source GStreamer Plug-in for Machine Learning Engine" +#define PACKAGE_ORIGIN "https://source.codeaurora.org" diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/CMakeLists.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/CMakeLists.txt new file mode 100644 index 0000000..3abe3e0 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/CMakeLists.txt @@ -0,0 +1,39 @@ +set(GST_MLE_LIBRARY Engine_MLE) + +list(APPEND SOURCE_FILES "ml_engine_impl.cc") + +add_definitions(-DSNPE_ENABLE) +list(APPEND SOURCE_FILES "snpe_base.cc") +list(APPEND SOURCE_FILES "snpe_detection.cc") +list(APPEND SOURCE_FILES "snpe_yolodetection.cc") + +add_library(${GST_MLE_LIBRARY} SHARED + ${SOURCE_FILES} +) + +target_include_directories(${GST_MLE_LIBRARY} PUBLIC + ${GST_INCLUDE_DIRS} + ${SYSROOT_INCDIR}/ion_headers +) + +target_include_directories(${GST_MLE_LIBRARY} PRIVATE + ${KERNEL_BUILDDIR}/usr/include + ${SYSROOT_INCDIR}/ion_headers +) + +target_link_libraries(${GST_MLE_LIBRARY} PRIVATE +# log +# dl +# cutils + fastcvopt +) + +target_link_libraries(${GST_MLE_LIBRARY} PRIVATE SNPE) + +install( + TARGETS ${GST_MLE_LIBRARY} + LIBRARY DESTINATION ${SYSROOT_LIBDIR} + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ + GROUP_EXECUTE GROUP_READ + WORLD_EXECUTE WORLD_READ +) diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/common_utils.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/common_utils.h new file mode 100644 index 0000000..36a595d --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/common_utils.h @@ -0,0 +1,290 @@ +/* +* Copyright (c) 2020, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include +#include +#include +#ifdef ANDROID +#include +#endif +#ifndef GST_DISABLE_GST_DEBUG +#include +#define GST_CAT_DEFAULT ensure_debug_category() +static GstDebugCategory * +ensure_debug_category (void) +{ + static gsize category = 0; + + if (g_once_init_enter (&category)) { + gsize cat_done; + + cat_done = (gsize) _gst_debug_category_new("qtimle", 0, + "GST QTI Machine Learning Engine"); + + g_once_init_leave (&category, cat_done); + } + + return (GstDebugCategory *) category; +} + +#define MLE_LOGE(...) GST_ERROR(__VA_ARGS__) +#define MLE_LOGI(...) GST_INFO(__VA_ARGS__) +#define MLE_LOGD(...) GST_DEBUG(__VA_ARGS__) +#define MLE_LOGV(...) GST_LOG(__VA_ARGS__) +#else +#include +#define MLE_LOGE(...) ALOGE("MLE: " __VA_ARGS__) +#define MLE_LOGI(...) ALOGI("MLE: " __VA_ARGS__) +#define MLE_LOGD(...) ALOGD("MLE: " __VA_ARGS__) +#define MLE_LOGV(...) ALOGD("MLE: " __VA_ARGS__) +#define ensure_debug_category() /* NOOP */ +#endif /* GST_DISABLE_GST_DEBUG */ + +class Timer { + std::string str; + uint64_t begin; + +public: + + Timer (std::string s) : str(s) { + MLE_LOGV("%s: Start", str.c_str()); + begin = GetMicroSeconds(); + } + + ~Timer () { + uint64_t end = GetMicroSeconds(); + MLE_LOGD("%s: %llu us", str.c_str(), + static_cast(end - begin)); + } + + uint64_t GetMicroSeconds() { + timespec time; + + clock_gettime(CLOCK_MONOTONIC, &time); + + uint64_t microSeconds = (static_cast(time.tv_sec) * 1000000ULL) + + (static_cast(time.tv_nsec)) / 1000; + + return microSeconds; + } +}; + +#define MLE_UNUSED(var) ((void)var) + +#define DEFAULT_ALPHA 128 + +#define COLOR_TABLE_SIZE 32 + +typedef struct { + uint8_t red; + uint8_t green; + uint8_t blue; + uint8_t alpha; +} rgba; + +#ifdef ANDROID +static rgba color_table[COLOR_TABLE_SIZE] = { + { //Black(background 0) + .red = 0, + .green = 0, + .blue = 0, + .alpha = DEFAULT_ALPHA + }, + { //Maroon(aeroplane 1) + .red = 128, + .green = 0, + .blue = 0, + .alpha = DEFAULT_ALPHA + }, + { //Green(bicycle 2) + .red = 0, + .green = 128, + .blue = 0, + .alpha = DEFAULT_ALPHA + }, + { //Olive(bird 3) + .red = 128, + .green = 128, + .blue = 0, + .alpha = DEFAULT_ALPHA + }, + { //Navy(boat 4) + .red = 0, + .green = 0, + .blue = 128, + .alpha = DEFAULT_ALPHA + }, + { //Purple(bottle 5) + .red = 128, + .green = 0, + .blue = 128, + .alpha = DEFAULT_ALPHA + }, + { //Teal(bus 6) + .red = 0, + .green = 128, + .blue = 128, + .alpha = DEFAULT_ALPHA + }, + { //Silver(car 7) + .red = 192, + .green = 192, + .blue = 192, + .alpha = DEFAULT_ALPHA + }, + { //Grey(cat 8) + .red = 128, + .green = 128, + .blue = 128, + .alpha = DEFAULT_ALPHA + }, + { //Red(chair 9) + .red = 255, + .green = 0, + .blue = 0, + .alpha = DEFAULT_ALPHA + }, + { //Lime(cow 10) + .red = 0, + .green = 255, + .blue = 0, + .alpha = DEFAULT_ALPHA + }, + { //Yellow(diningtable 11) + .red = 255, + .green = 255, + .blue = 0, + .alpha = DEFAULT_ALPHA + }, + { //Blue(dog 12) + .red = 0, + .green = 0, + .blue = 255, + .alpha = DEFAULT_ALPHA + }, + { //Fuchsia(horse 13) + .red = 255, + .green = 0, + .blue = 255, + .alpha = DEFAULT_ALPHA + }, + { //Aqua(motorbike 14) + .red = 0, + .green = 255, + .blue = 255, + .alpha = DEFAULT_ALPHA + }, + { //White(person 15) + .red = 255, + .green = 255, + .blue = 255, + .alpha = DEFAULT_ALPHA + }, + { //Honeydew2(potted plant 16) + .red = 215, + .green = 255, + .blue = 215, + .alpha = DEFAULT_ALPHA + }, + { //Salmon1(sheep 17) + .red = 255, + .green = 135, + .blue = 95, + .alpha = DEFAULT_ALPHA + }, + { //Orange1(sofa 18) + .red = 255, + .green = 175, + .blue = 0, + .alpha = DEFAULT_ALPHA + }, + { //Gold1(train 19) + .red = 255, + .green = 215, + .blue = 0, + .alpha = DEFAULT_ALPHA + }, + { //Thistle1(tv/monitor 20) + .red = 255, + .green = 215, + .blue = 255, + .alpha = DEFAULT_ALPHA + }, + { //Cornsilk1(unknown 255) + .red = 255, + .green = 255, + .blue = 215, + .alpha = DEFAULT_ALPHA + } +}; +class Property { + public: + /** Get + * @property: property + * @default_value: default value + * + * Gets requested property value + * + * return: property value + **/ + template + static TProperty Get(std::string property, TProperty default_value) { + MLE_UNUSED(color_table); // Consider removing + TProperty value = default_value; + char prop_val[PROPERTY_VALUE_MAX]; + std::stringstream s; + s << default_value; + property_get(property.c_str(), prop_val, s.str().c_str()); + + std::stringstream output(prop_val); + output >> value; + return value; + } + + /** Set + * @property: property + * @value: value + * + * Sets requested property value + * + * return: nothing + **/ + template + static void Set(std::string property, TProperty value) { + std::stringstream s; + s << value; + std::string value_string = s.str(); + value_string.resize(PROPERTY_VALUE_MAX); + property_set(property.c_str(), value_string.c_str()); + } +}; +#endif diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_impl.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_impl.cc new file mode 100644 index 0000000..4186304 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_impl.cc @@ -0,0 +1,796 @@ +/* +* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include +#include +#include +#include +#include "ml_engine_intf.h" +#include "common_utils.h" + +namespace mle { + +bool MLEngine::fastcv_mode_is_set_ = false; +bool MLEngine::use_c2d_preprocess_ = false; +std::mutex MLEngine::fastcv_process_lock_; + +MLEngine::MLEngine(MLConfig &config) : config_(config) { + std::lock_guard lock(fastcv_process_lock_); + if (!fastcv_mode_is_set_) { + PreProcessAccelerator(); + fastcv_mode_is_set_ = true; + } + + buffers_.scale_buf = nullptr; + buffers_.rgb_buf = nullptr; + gst_c2d_buf_ = nullptr; + outpool_ = nullptr; + c2d_buf_outframe_ = nullptr; +} + +void MLEngine::PreProcessAccelerator() { + fcvOperationMode mode = FASTCV_OP_CPU_PERFORMANCE; + use_c2d_preprocess_ = false; + + switch(config_.preprocess_accel) { + case kPreprocessGpu: + MLE_LOGI("%s Use C2D for postprocessing", __func__); + use_c2d_preprocess_ = true; + break; + case kPreprocessDsp: + MLE_LOGI("%s FastCV operation is PERFORMANCE", __func__); + mode = FASTCV_OP_PERFORMANCE; + break; + case kPreprocessCpu: + default: + MLE_LOGI("%s FastCV operation is CPU PERFORMANCE", __func__); + break; + } + + fcvSetOperationMode(mode); +} + +void MLEngine::DumpFrame(const uint8_t* buffer, const uint32_t& width, + const uint32_t& height, const uint32_t& size, const std::string& suffix) { + + std::string file_path("/data/misc/camera/ml_engine_"); + size_t written_len = 0; + file_path += std::to_string(width); + file_path += "x"; + file_path += std::to_string(height); + file_path += suffix; + FILE *file = fopen(file_path.c_str(), "w+"); + if (!file) { + MLE_LOGE("%s: Unable to open file(%s)", __func__, + file_path.c_str()); + goto FAIL; + } + written_len = fwrite(buffer, sizeof(uint8_t), size, file); + MLE_LOGD("%s: written_len: %zu", __func__, written_len); + if (size != written_len) { + MLE_LOGE("%s: Bad Write error (%d):(%s)", __func__, errno, + strerror(errno)); + goto FAIL; + } + MLE_LOGD("%s: Buffer Size:%zu Stored:%s", __func__, written_len, + file_path.c_str()); + +FAIL: + if (file != nullptr) { + fclose(file); + } +} + +int32_t MLEngine::AllocateInternalBuffers() { + + if (do_rescale_) { + GstBufferPool *pool = NULL; + GstStructure *config = NULL; + GstAllocator *allocator = NULL; + GstVideoInfo vinfo; + uint32_t size = ((scale_width_ * scale_height_ * 4)); + uint32_t size_aligned = (size + 4096-1) & ~(4096-1); + + GstCaps *caps = gst_caps_new_simple ("video/x-raw", + "format", G_TYPE_STRING, "RGBA", + "framerate", GST_TYPE_FRACTION, 25, 1, + "width", G_TYPE_INT, scale_width_, + "height", G_TYPE_INT, scale_height_, + NULL); + + pool = gst_image_buffer_pool_new (GST_IMAGE_BUFFER_POOL_TYPE_GBM); + if (pool == NULL) { + MLE_LOGE("%s: Failed create buffer image pool", __func__); + return MLE_FAIL; + } + + config = gst_buffer_pool_get_config (pool); + if (config == NULL) { + MLE_LOGE("%s: Failed set config of the pool", __func__); + gst_object_unref (pool); + return MLE_FAIL; + } + + gst_buffer_pool_config_set_params (config, caps, size_aligned, 1, 1); + + allocator = gst_fd_allocator_new (); + gst_buffer_pool_config_set_allocator (config, allocator, NULL); + + if (!gst_buffer_pool_set_config (pool, config)) { + MLE_LOGE("%s: Failed to set pool configuration", __func__); + g_object_unref (pool); + g_object_unref (allocator); + return MLE_FAIL; + } + + g_object_unref (allocator); + outpool_ = pool; + + if (!gst_buffer_pool_is_active (pool) && + !gst_buffer_pool_set_active (pool, TRUE)) { + MLE_LOGE("%s: Failed to activate output video buffer pool", __func__); + g_object_unref (outpool_); + outpool_ = nullptr; + return MLE_FAIL; + } + + if (GST_FLOW_OK != gst_buffer_pool_acquire_buffer (pool, + &gst_c2d_buf_, NULL)) { + MLE_LOGE("%s: Failed to create output buffer", __func__); + g_object_unref (outpool_); + outpool_ = nullptr; + return MLE_FAIL; + } + + c2d_buf_outframe_ = g_slice_new (GstVideoFrame); + + GstVideoFormat format = GST_VIDEO_FORMAT_RGB; + if (config_.input_format == kBgr || + config_.input_format == kBgrFloat) { + format = GST_VIDEO_FORMAT_BGR; + } + gst_video_info_set_format (&vinfo, format, scale_width_, scale_height_); + + if (!gst_video_frame_map (c2d_buf_outframe_, &vinfo, gst_c2d_buf_, + (GstMapFlags)(GST_MAP_READWRITE | + GST_VIDEO_FRAME_MAP_FLAG_NO_REF))) { + MLE_LOGE("Failed to map buffer"); + FreeInternalBuffers(); + return MLE_FAIL; + } + + buffers_.rgb_buf = + (uint8_t*) GST_VIDEO_FRAME_PLANE_DATA (c2d_buf_outframe_, 0); + } + + if (!use_c2d_preprocess_) { + posix_memalign(reinterpret_cast(&buffers_.scale_buf), + 128, + ((scale_width_ * + scale_height_ * 3 / 2))); + if (nullptr == buffers_.scale_buf) { + MLE_LOGE("%s: Scale buf allocation failed", __func__); + FreeInternalBuffers(); + return MLE_FAIL; + } + } + return MLE_OK; +} + +void MLEngine::FreeInternalBuffers() { + if (nullptr != c2d_buf_outframe_) { + gst_video_frame_unmap (c2d_buf_outframe_); + g_slice_free (GstVideoFrame, c2d_buf_outframe_); + c2d_buf_outframe_ = nullptr; + } + if (nullptr != outpool_ && + nullptr != gst_c2d_buf_) { + gst_buffer_pool_release_buffer (outpool_, gst_c2d_buf_); + gst_buffer_pool_set_active (outpool_, FALSE); + gst_object_unref (outpool_); + gst_c2d_buf_ = nullptr; + outpool_ = nullptr; + } + if (nullptr != buffers_.scale_buf && !use_c2d_preprocess_) { + free(buffers_.scale_buf); + buffers_.scale_buf = nullptr; + } +} + +void MLEngine::MeanSubtract(uint8_t* input_buf, + const uint32_t width, + const uint32_t height, + const uint32_t pad_width, + const uint32_t pad_height, + float* processed_buf) { + + MLE_UNUSED(pad_height); + uint8_t* src = input_buf; + float* dest = processed_buf; + + float std_blue = config_.use_norm ? config_.blue_sigma : 1; + float std_green = config_.use_norm ? config_.green_sigma : 1; + float std_red = config_.use_norm ? config_.red_sigma : 1; + + for (uint32_t y = 0; y < height; y++) { + for (uint32_t x = 0; x < width; x++) { + dest[((y * pad_width + x) * 3)] = + (static_cast(src[((y * width + x) * 3)]) - + config_.red_mean) / std_red; + + dest[((y * pad_width + x) * 3) + 1] = + (static_cast(src[((y * width + x) * 3) + 1]) - + config_.green_mean) / std_green; + + dest[((y * pad_width + x) * 3) + 2] = + (static_cast(src[((y * width + x) * 3) + 2]) - + config_.blue_mean) / std_blue; + } + } +} + +void MLEngine::Pad( + uint8_t* input_buf, + const uint32_t input_width, + const uint32_t input_height, + const uint32_t pad_width, + const uint32_t pad_height, + uint8_t* output_buf) +{ + MLE_UNUSED(pad_height); + // This API assume that buffer is already fill up with + // pad value and only active area is copied. + // This optimization reduce time ~10 times. + for (uint32_t y = 0; y < input_height; y++) { + for (uint32_t x = 0; x < 3 * input_width; x++) { + uint32_t index_src = y * 3 * input_width + x; + uint32_t index_dst = y * 3 * pad_width + x; + output_buf[index_dst] = input_buf[index_src]; + } + } +} + +int32_t MLEngine::PreProcessScale( + uint8_t* pSrcLuma, + uint8_t* pSrcChroma, + uint8_t* pDst, + const uint32_t srcWidth, + const uint32_t srcHeight, + const uint32_t srcStride, + const uint32_t scaleWidth, + const uint32_t scaleHeight, + MLEImageFormat format) +{ + MLE_LOGI("%s: Enter ", __func__); + MLE_LOGV("%s: format %d preprocess_mode %d", __func__, format, + (uint32_t)config_.preprocess_mode); + + int32_t rc = MLE_OK; + if ((format == mle_format_nv12) || (format == mle_format_nv21)) { + uint8_t *src_buffer_y = pSrcLuma; + uint8_t *src_buffer_uv = pSrcChroma; + uint32_t x = 0, y = 0; + uint32_t width = srcWidth; + uint32_t height = srcHeight; + uint32_t src_y_offset = 0; + uint32_t src_uv_offset = 0; + + if (config_.preprocess_mode == kKeepARCrop) { + double in_ar = 0, out_ar = 0; + in_ar = static_cast(width) / height; + out_ar = static_cast(scaleWidth) / scaleHeight; + + if (in_ar > out_ar) { + width = out_ar * height; + x = (srcWidth - width) / 2; + width = width & ~1; + x = x & ~1; + } else if (in_ar < out_ar) { + height = width / out_ar; + y = (srcHeight - height) / 2; + height = height & ~1; + y = y & ~1; + } + + po_.width = width; + po_.height = height; + po_.x_offset = x; + po_.y_offset = y; + + //Adjust the Y pointer. + src_y_offset = y * srcWidth + x; + //Adjust the UV pointer. + src_uv_offset = (y/2) * srcWidth + x; + + src_buffer_y = reinterpret_cast + ((intptr_t)src_buffer_y + src_y_offset); + src_buffer_uv = reinterpret_cast + ((intptr_t)src_buffer_uv + src_uv_offset); + } + + MLE_LOGV("%s: Scale Luma plane", __func__); + + fcvScaleDownMNu8(src_buffer_y, + width, + height, + srcStride, + pDst, + scaleWidth, + scaleHeight, + 0); + + MLE_LOGV("%s: Scale Croma plane", __func__); + + fcvScaleDownMNInterleaveu8(src_buffer_uv, + width/2, + height/2, + srcStride, + pDst + (scaleWidth*scaleHeight), + scaleWidth/2, + scaleHeight/2, + 0); + } else { + MLE_LOGE("Unsupported format %d", (int)format); + rc = MLE_IMG_FORMAT_NOT_SUPPORTED; + } + + MLE_LOGI("%s: Exit", __func__); + return rc; +} + +void MLEngine::PreProcessColorConvertRGB( + uint8_t* pSrcLuma, + uint8_t* pSrcChroma, + uint8_t* pDst, + const uint32_t width, + const uint32_t height, + MLEImageFormat format) +{ + if ((format == mle_format_nv12) || (format == mle_format_nv21)) { + fcvColorYCbCr420PseudoPlanarToRGB888u8(pSrcLuma, + pSrcChroma, + width, + height, + 0, + 0, + pDst, + 0); + } +} + +void MLEngine::PreProcessColorConvertBGR( + uint8_t* pSrc, + uint8_t* pDst, + const uint32_t width, + const uint32_t height) +{ + fcvColorRGB888ToBGR888u8(pSrc, + width, + height, + 0, + pDst, + 0); +} + +int32_t MLEngine::ReadLabelsFile(const std::string& file_name, + std::vector& res, + size_t& found_label_count) { + std::ifstream file(file_name); + if (!file) { + MLE_LOGE("%s: Labels file %s not found!", __func__, file_name.c_str()); + return MLE_FAIL; + } + res.clear(); + std::string line; + while (std::getline(file, line)) { + res.push_back(line); + } + found_label_count = res.size(); + const int padding = 16; + while (res.size() % padding) { + res.emplace_back(); + } + return MLE_OK; +} + +int32_t MLEngine::Init(const MLEInputParams* source_info) { + MLE_LOGI("%s: Enter", __func__); + int32_t res = MLE_OK; + + // Gather input configuration parameters + source_params_.width = source_info->width; + source_params_.height = source_info->height; + source_params_.format = source_info->format; + + // Load model from file + std::string file_name = config_.model_file; + res = LoadModel(file_name); + if (MLE_OK != res) { + MLE_LOGE("%s: Failed to load model from %s", + __func__, file_name.c_str()); + return res; + } + MLE_LOGI("%s: Loaded model from %s", __func__, file_name.c_str()); + + if (need_labels_) { + file_name = config_.labels_file; + res = ReadLabelsFile(file_name, labels_, label_count_); + if (MLE_OK != res) { + MLE_LOGE("%s: Failed to read labeles file %s", + __func__, file_name.c_str()); + return res; + } + } + + res = InitFramework(); + if (MLE_OK != res) { + MLE_LOGE("%s: Failed to init framework", __func__); + return res; + } + + // Calculate downscale params + if (config_.preprocess_mode == kKeepARPad) { + float ratio = (engine_input_params_.width & ~0x1) * 1.0 / + fmax(source_params_.width, source_params_.height); + scale_width_ = (uint32_t)(source_params_.width * ratio) & ~0x1; + scale_height_ = (uint32_t)(source_params_.height * ratio) & ~0x1; + } else { + scale_width_ = engine_input_params_.width; + scale_height_ = engine_input_params_.height; + + if (scale_width_ % 2 != 0 || + scale_height_ % 2 != 0) { + MLE_LOGE("Error: Odd dimensions aren't supported for preprocess mode %d", + (int)config_.preprocess_mode); + return MLE_FAIL; + } + } + + MLE_LOGI("%s scale width %d scale height %d model width %d height %d", + __func__, + scale_width_, + scale_height_, + engine_input_params_.width, + engine_input_params_.height); + + MLE_LOGD("%s nms_threshold %3f max_detection_result %d num_threads %d runtime %d label_count_ %zu", + __func__, + config_.nms_threshold, + config_.max_detection_result, + config_.number_of_threads, + config_.runtime, + label_count_ + ); + + // Allocate internal buffers + if (source_params_.width != engine_input_params_.width || + source_params_.height != engine_input_params_.height) { + do_rescale_ = true; + } else { + do_rescale_ = false; + } + res = AllocateInternalBuffers(); + if (MLE_OK != res) { + MLE_LOGE("%s Buffer allocation failed", __func__); + return res; + } + + if (use_c2d_preprocess_) { + GstStructure *inopts = NULL; + c2dconvert_ = gst_c2d_video_converter_new (); + if (c2dconvert_) { + GValue rects = G_VALUE_INIT, entry = G_VALUE_INIT, value = G_VALUE_INIT; + + inopts = gst_structure_new_empty ("options"); + + g_value_init (&rects, GST_TYPE_ARRAY); + g_value_init (&entry, GST_TYPE_ARRAY); + g_value_init (&value, G_TYPE_INT); + + g_value_set_int (&value, 0); + gst_value_array_append_value (&entry, &value); + g_value_set_int (&value, 0); + gst_value_array_append_value (&entry, &value); + g_value_set_int (&value, source_params_.width); + gst_value_array_append_value (&entry, &value); + g_value_set_int (&value, source_params_.height); + gst_value_array_append_value (&entry, &value); + + gst_value_array_append_value (&rects, &entry); + g_value_reset (&entry); + + gst_structure_set_value (inopts, + GST_C2D_VIDEO_CONVERTER_OPT_SRC_RECTANGLES, &rects); + g_value_reset (&rects); + + g_value_set_int (&value, 0); + gst_value_array_append_value (&entry, &value); + g_value_set_int (&value, 0); + gst_value_array_append_value (&entry, &value); + g_value_set_int (&value, scale_width_); + gst_value_array_append_value (&entry, &value); + g_value_set_int (&value, scale_height_); + gst_value_array_append_value (&entry, &value); + + gst_value_array_append_value (&rects, &entry); + + gst_structure_set_value (inopts, + GST_C2D_VIDEO_CONVERTER_OPT_DEST_RECTANGLES, &rects); + + g_value_unset (&value); + g_value_unset (&entry); + g_value_unset (&rects); + + gst_c2d_video_converter_set_input_opts (c2dconvert_, 0, inopts); + } else { + MLE_LOGE("%s: Failed to create c2d converter", __func__); + FreeInternalBuffers(); + res = MLE_FAIL; + } + } + + MLE_LOGI("%s: Exit", __func__); + return res; +} + +void MLEngine::Deinit(){ + MLE_LOGI("%s: Enter", __func__); + if (use_c2d_preprocess_ && c2dconvert_) + gst_c2d_video_converter_free (c2dconvert_); + FreeInternalBuffers(); + MLE_LOGI("%s: Exit", __func__); +} + +int32_t MLEngine::Process(GstVideoFrame *frame) { + MLE_LOGI("%s: Enter", __func__); + if (!frame || !frame->buffer) { + MLE_LOGE("%s Null pointer!", __func__); + return MLE_NULLPTR; + } + int32_t res = MLE_OK; + + { + Timer t("Pre-process time"); + res = PreProcess(frame); + if (MLE_OK != res) { + MLE_LOGE(" PreProcessBuffer failed"); + return res; + } + } + + { + Timer t("Inference time"); + res = ExecuteModel(); + if (MLE_OK != res) { + MLE_LOGE(" Inference failed"); + return res; + } + } + + { + Timer t("Post-process time"); + res = PostProcess(frame->buffer); + if (MLE_OK != res) { + MLE_LOGE(" PostProcess failed"); + } + } + + MLE_LOGI("%s: Exit", __func__); + return res; +} + +int32_t MLEngine::PreProcess(GstVideoFrame *frame) { + MLE_LOGI("%s: Enter", __func__); + int32_t res = MLE_OK; + + uint8_t *frame_data_plane0 = (uint8_t*) GST_VIDEO_FRAME_PLANE_DATA (frame, 0); + uint8_t *frame_data_plane1 = (uint8_t*) GST_VIDEO_FRAME_PLANE_DATA (frame, 1); + + void* engine_input_buf = GetInputBuffer(); + if (!engine_input_buf) { + MLE_LOGE("%s: Input buffer is null", __func__); + return MLE_NULLPTR; + } + + if (use_c2d_preprocess_) { + gpointer request_id = NULL; + request_id = gst_c2d_video_converter_submit_request (c2dconvert_, + frame, 1, c2d_buf_outframe_); + gst_c2d_video_converter_wait_request (c2dconvert_, request_id); + } else { + if (do_rescale_) { + uint32_t stride = GST_VIDEO_FRAME_PLANE_STRIDE(frame, 0); + res = PreProcessScale(frame_data_plane0, + frame_data_plane1, + buffers_.scale_buf, + source_params_.width, + source_params_.height, + stride, + scale_width_, + scale_height_, + source_params_.format); + if (MLE_OK != res) { + MLE_LOGE("PreProcessScale failed due to unsupported image format"); + return res; + } + + PreProcessColorConvertRGB(buffers_.scale_buf, + buffers_.scale_buf + + (scale_width_ * scale_height_), + buffers_.rgb_buf, + scale_width_, + scale_height_, + source_params_.format); + } else { + PreProcessColorConvertRGB(frame_data_plane0, + frame_data_plane1, + buffers_.rgb_buf, + scale_width_, + scale_height_, + source_params_.format); + } + + if (config_.input_format == kBgr || + config_.input_format == kBgrFloat) { + PreProcessColorConvertBGR(buffers_.rgb_buf, + buffers_.rgb_buf, + scale_width_, + scale_height_); + } + } + + // MLE assumes mean subtract will be needed only if engine's input is float + if (config_.input_format == kRgbFloat || + config_.input_format == kBgrFloat) { + MeanSubtract(buffers_.rgb_buf, + scale_width_, + scale_height_, + engine_input_params_.width, + engine_input_params_.height, + (float*)engine_input_buf); + } else { + Pad(buffers_.rgb_buf, + scale_width_, + scale_height_, + engine_input_params_.width, + engine_input_params_.height, + (uint8_t*)engine_input_buf); + } + + MLE_LOGI("%s: Exit", __func__); + return res; +} + +GType +gst_mle_input_format_get_type (void) +{ + static GType gtype = 0; + + if (!gtype) { + static const GEnumValue variants[] = { + {kRgb, "RGB value as input", "RGB"}, + {kBgr, "BGR value as input", "BGR"}, + {kRgbFloat, "RGB float value as input", "RGB Float"}, + {kBgrFloat, "BGR float value as input", "BGR Float"}, + {0, NULL, NULL}, + }; + + gtype = g_enum_register_static ("InputFormat", variants); + } + + return gtype; +} + +GType +gst_mle_preprocessing_mode_get_type (void) +{ + static GType gtype = 0; + + if (!gtype) { + static const GEnumValue variants[] = { + {kKeepARCrop, "keep aspect ratio by cropping", "Keep AR Crop"}, + {kKeepARPad, "keep aspect ratio by adding pads", "Keep AR Pad"}, + {kDirectDownscale, "directly down scale the frame", "Direct Downscale"}, + {kMax, "use max width or height", "Max"}, + {0, NULL, NULL}, + }; + + gtype = g_enum_register_static ("PreprocessingMode", variants); + } + + return gtype; +} + +GType +gst_mle_snpe_runtime_type_get_type (void) +{ + static GType gtype = 0; + + if (!gtype) { + static const GEnumValue variants[] = { + {kSnpeCpu, "use CPU runtime", "CPU"}, + {kSnpeDsp, "use DSP runtime", "DSP"}, + {kSnpeGpu, "use GPU runtime", "GPU"}, + {kSnpeAip, "use API runtime", "AIP"}, + {0, NULL, NULL}, + }; + + gtype = g_enum_register_static ("Runtime", variants); + } + + return gtype; +} + +GType +gst_mle_preprocessing_accel_get_type (void) +{ + static GType gtype = 0; + if (!gtype) { + static const GEnumValue variants[] = { + {kPreprocessCpu, + "Execute with FastCV CPU performance option", + "CPU"}, + {kPreprocessDsp, + "Execute with FastCV performance option", + "DSP"}, + {kPreprocessGpu, + "Execute with C2D preprocess", + "GPU"}, + {0, NULL, NULL}, + }; + + gtype = g_enum_register_static ("PreprocessAccel", variants); + } + + return gtype; +} + +GType +gst_mle_tflite_delegate_type_get_type (void) +{ + static GType gtype = 0; + if (!gtype) { + static const GEnumValue variants[] = { + {kTfliteNnapi, "NNAPI delegate with DSP accelerator", "NNAPI"}, + {kTfliteNnapiNpu, "NNAPI delegate with NPU accelerator", "NPU"}, + {kTfliteHexagonNn, "HEXAGON NN delegate", "Hexagon NN"}, + {kTfliteGpu, "GPU delegate", "GPU"}, + {kTfliteXnnpack, "XNN PACK delegate", "XNN Pack"}, + {kTfliteCpu, "CPU runtime", "CPU"}, + {0, NULL, NULL}, + }; + + gtype = g_enum_register_static ("DelegateType", variants); + } + return gtype; +} + +}; // namespace mle diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_intf.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_intf.h new file mode 100644 index 0000000..adb1c92 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_intf.h @@ -0,0 +1,305 @@ +/* +* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace mle { + +#define GST_TYPE_MLE_INPUT_FORMAT (mle::gst_mle_input_format_get_type()) +#define GST_TYPE_MLE_PREPROCESSING_MODE (mle::gst_mle_preprocessing_mode_get_type()) +#define GST_TYPE_MLE_SNPE_RUNTIME_TYPE (mle::gst_mle_snpe_runtime_type_get_type()) +#define GST_TYPE_MLE_PREPROCESSING_ACCEL (mle::gst_mle_preprocessing_accel_get_type()) +#define GST_TYPE_MLE_TFLITE_DELEGATE_TYPE (mle::gst_mle_tflite_delegate_type_get_type()) + +GType gst_mle_input_format_get_type (void); +GType gst_mle_preprocessing_mode_get_type (void); +GType gst_mle_snpe_runtime_type_get_type (void); +GType gst_mle_preprocessing_accel_get_type (void); +GType gst_mle_tflite_delegate_type_get_type (void); + +/* +Input Format + kRgb - RGB + kBgr - BGR + kRgbFloat RGB Float + kBgrFloat - BGR Float + */ +enum { + kRgb, + kBgr, + kRgbFloat, + kBgrFloat +}; + +/* +MLE supports three pre-processing modes + kKeepARCrop - This mode crops from the original frame to match engine's input + aspect ratio. Objects outside the crop region will not be detected + kKeepARPad - This mode keeps original frame aspect ratio. In order to match + engine's input requirements, padding with mean value is added + kDirectDownscale - This mode doesn't keep the aspect ratio of the original + frame and shrinks it +*/ +enum { + kKeepARCrop, + kKeepARPad, + kDirectDownscale, + kMax +}; + +/* +Runtime supported by SNPE + kSnpeCpu - CPU runtime + kSnpeDsp - DSP runtime + kSnpeGpu - GPU runtime + kSnpeAip - AIP runtime + */ +enum { + kSnpeCpu, + kSnpeDsp, + kSnpeGpu, + kSnpeAip +}; + +/* +Preprocess Options + kPrerocessCpu - FastCV CPU performance mode + kPrerocessDsp - FastCV performance mode + kPrerocessGpu - C2D preprocess + */ +enum { + kPreprocessCpu, + kPreprocessDsp, + kPreprocessGpu +}; + +/* +Delegate supported by TFLite + kTfliteNnapi - NNAPI delegate no predefine accelerator + kTfliteNnapiNpu - NNAPI delegate NPU accelerator + kTfliteHexagonNn - Hexagon NN delegate + kTfliteGpu - GPU delegate + kTfliteXnnpack - XNN Pack delegate + kTfliteCpu - CPU runtime + */ +enum { + kTfliteNnapi, + kTfliteNnapiNpu, + kTfliteHexagonNn, + kTfliteGpu, + kTfliteXnnpack, + kTfliteCpu +}; + +enum MLEImageFormat { + mle_format_invalid = 0, + mle_format_nv12, + mle_format_nv21, + mle_format_RGB24, +}; + +enum MLEErrors { + MLE_OK = 0, + MLE_FAIL, + MLE_NULLPTR, + MLE_IMG_FORMAT_NOT_SUPPORTED +}; + +enum class NetworkIO { + kUserBuffer = 0, + kITensor +}; + +struct PreprocessingOffsets { + PreprocessingOffsets(): x_offset(0), + y_offset(0), + width(0), + height(0) {}; + uint32_t x_offset; + uint32_t y_offset; + uint32_t width; + uint32_t height; +}; + +struct PreprocessingBuffers { + uint8_t* scale_buf; + uint8_t* rgb_buf; +}; + +struct MLEInputParams { + uint32_t width; + uint32_t height; + MLEImageFormat format; +}; + +struct MLConfig { + + //applicable to SNPE + NetworkIO io_type; + + //Input image format for the desired network + uint32_t input_format; + + //Aspect ratio maintenance + uint32_t preprocess_mode; + uint32_t preprocess_accel; + + // normalization + float blue_mean; + float blue_sigma; + float green_mean; + float green_sigma; + float red_mean; + float red_sigma; + bool use_norm; + // end normalization + + // coordinate for people intrusion detection + uint32_t x_axis; + uint32_t y_axis; + uint32_t width; + uint32_t height; + + float conf_threshold; + float nms_threshold; + uint32_t max_detection_result; + + std::string model_file; + std::string labels_file; + + //snpe specific + uint32_t runtime; + + //tflite specific + uint32_t number_of_threads; + uint32_t delegate; + + //snpe layers + std::vector output_layers; +}; + +class MLEngine { + public: + MLEngine(MLConfig &config); + virtual ~MLEngine(){}; + int32_t Init(const MLEInputParams* source_info); + virtual void Deinit(); + int32_t PreProcess(GstVideoFrame *frame); + int32_t Process(GstVideoFrame *frame); + private: + virtual int32_t LoadModel(std::string& model_path) = 0; + virtual int32_t InitFramework() = 0; + virtual int32_t ExecuteModel() = 0; + virtual void* GetInputBuffer() = 0; + virtual int32_t PostProcess(GstBuffer* buffer) = 0; + int32_t ReadLabelsFile(const std::string& file_name, + std::vector& result, + size_t& found_label_count); + void PreProcessAccelerator(); + static bool fastcv_mode_is_set_; + static std::mutex fastcv_process_lock_; + + protected: + + virtual int32_t AllocateInternalBuffers(); + virtual void FreeInternalBuffers(); + + void DumpFrame(const uint8_t* buffer, const uint32_t& width, + const uint32_t& height, const uint32_t& size, const std::string& suffix); + + void Pad( + uint8_t* input_buf, + const uint32_t input_width, + const uint32_t input_height, + const uint32_t pad_width, + const uint32_t pad_height, + uint8_t* output_buf); + + void PreProcessColorConvertRGB( + uint8_t* pSrcLuma, + uint8_t* pSrcChroma, + uint8_t* pDst, + const uint32_t width, + const uint32_t height, + MLEImageFormat format); + + void PreProcessColorConvertBGR( + uint8_t* pSrc, + uint8_t* pDst, + const uint32_t width, + const uint32_t height); + + int32_t PreProcessScale( + uint8_t* pSrcLuma, + uint8_t* pSrcChroma, + uint8_t* pDst, + const uint32_t srcWidth, + const uint32_t srcHeight, + const uint32_t srcStride, + const uint32_t scaleWidth, + const uint32_t scaleHeight, + MLEImageFormat format); + + void MeanSubtract(uint8_t* input_buf, + const uint32_t width, + const uint32_t height, + const uint32_t pad_width, + const uint32_t pad_height, + float* processed_buf); + + MLConfig config_; + MLEInputParams source_params_; + //params for engine's input requirements + MLEInputParams engine_input_params_; + PreprocessingBuffers buffers_; + PreprocessingOffsets po_; + uint32_t scale_width_; + uint32_t scale_height_; + bool do_rescale_; + bool need_labels_; + uint32_t batch_size_; + std::vector labels_; + size_t label_count_; + GstC2dVideoConverter *c2dconvert_; + GstBufferPool *outpool_; + static bool use_c2d_preprocess_; + GstVideoFrame *c2d_buf_outframe_; + GstBuffer *gst_c2d_buf_; +}; + +}; // namespace mle diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.cc new file mode 100644 index 0000000..677ead0 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.cc @@ -0,0 +1,421 @@ +/* +* Copyright (c) 2021, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include + +#include "snpe_base.h" +#include "common_utils.h" + +namespace mle { + +SNPEBase::SNPEBase(MLConfig &config) : MLEngine(config) { + ConfigureRuntime(config); + config_.io_type = config.io_type; + config_.output_layers = config.output_layers; + need_labels_ = true; +} + +int32_t SNPEBase::ConfigureRuntime(MLConfig &config) { + switch (config.runtime) { + case kSnpeDsp: { + if (zdl::SNPE::SNPEFactory::isRuntimeAvailable( + zdl::DlSystem::Runtime_t::DSP)) { + runtime_ = zdl::DlSystem::Runtime_t::DSP; + MLE_LOGI("DSP runtime selected"); + } else { + runtime_ = zdl::DlSystem::Runtime_t::CPU; + MLE_LOGI("CPU runtime selected, but DSP was configured"); + } + break; + } + case kSnpeGpu: { + if (zdl::SNPE::SNPEFactory::isRuntimeAvailable( + zdl::DlSystem::Runtime_t::GPU)) { + runtime_ = zdl::DlSystem::Runtime_t::GPU; + MLE_LOGI("GPU runtime selected"); + } else { + runtime_ = zdl::DlSystem::Runtime_t::CPU; + MLE_LOGI("CPU runtime selected, but GPU was configured"); + } + break; + } + case kSnpeAip: { + if (zdl::SNPE::SNPEFactory::isRuntimeAvailable( + zdl::DlSystem::Runtime_t::AIP_FIXED8_TF)) { + runtime_ = zdl::DlSystem::Runtime_t::AIP_FIXED8_TF; + MLE_LOGI("AIP runtime selected"); + } else { + runtime_ = zdl::DlSystem::Runtime_t::CPU; + MLE_LOGI("CPU runtime selected, but AIP was configured"); + } + break; + } + case kSnpeCpu: { + runtime_ = zdl::DlSystem::Runtime_t::CPU; + MLE_LOGI("CPU runtime selected"); + break; + } + } + return MLE_OK; +} + +int32_t SNPEBase::ConfigureDimensions() { + zdl::DlSystem::Optional names_opt; + names_opt = snpe_params_.snpe->getInputTensorNames(); + const zdl::DlSystem::StringList& names = *names_opt; + const char * name = names.at(0); + auto uba_opt = snpe_params_.snpe->getInputOutputBufferAttributes(name); + const zdl::DlSystem::TensorShape& buffer_shape = (*uba_opt)->getDims(); + const zdl::DlSystem::Dimension* dims = buffer_shape.getDimensions(); + + batch_size_ = dims[0]; + engine_input_params_.height = dims[1]; + engine_input_params_.width = dims[2]; + + // Obtain input and result layer names + snpe_params_.input_layer = static_cast(names.at(0)); + + zdl::DlSystem::Optional out_names; + out_names = snpe_params_.snpe->getOutputTensorNames(); + const zdl::DlSystem::StringList& result_layers = *out_names; + + for (uint32_t i = 0; i < result_layers.size(); i++) { + MLE_LOGD("%s: result_layers[%d]:%s", __func__,i,result_layers.at(i)); + snpe_params_.result_layers.push_back(result_layers.at(i)); + } + return MLE_OK; +} + +std::unique_ptr SNPEBase::SetBuilderOptions() { + MLE_LOGI("%s: Enter", __func__); + std::unique_ptr snpe; + zdl::SNPE::SNPEBuilder snpeBuilder(snpe_params_.container.get()); + zdl::DlSystem::StringList output_layers; + + for (size_t i = 0; i < config_.output_layers.size(); i++) { + output_layers.append(config_.output_layers[i].c_str()); + } + + if (config_.io_type == NetworkIO::kUserBuffer) { + snpe = + snpeBuilder.setOutputLayers(output_layers).setRuntimeProcessor(runtime_) + .setUseUserSuppliedBuffers(true).setCPUFallbackMode(true).build(); + } else if (config_.io_type == NetworkIO::kITensor) { + snpe = + snpeBuilder.setOutputLayers(output_layers).setRuntimeProcessor(runtime_) + .setUseUserSuppliedBuffers(false).setCPUFallbackMode(true).build(); + } else { + MLE_LOGE("%s: Invalid Network IO value", __func__); + throw std::runtime_error("Invalid Network IO value"); + } + + MLE_LOGI("%s: Exit", __func__); + return snpe; +} + +int32_t SNPEBase::LoadModel(std::string& model_path) { + int32_t res = MLE_OK; + snpe_params_.container = LoadContainerFromFile(model_path); + if (nullptr == snpe_params_.container) { + PrintErrorStringAndExit(); + res = MLE_FAIL; + } + return res; +} + +std::unique_ptr SNPEBase::LoadContainerFromFile( + std::string container_path) { + std::unique_ptr container; + container = zdl::DlContainer::IDlContainer::open(container_path); + if (nullptr == container) { + MLE_LOGE("%s: Container loading failed", __func__); + return nullptr; + } + + return container; +} + +int32_t SNPEBase::PopulateMap(BufferType type) { + int32_t result = MLE_OK; + zdl::DlSystem::Optional names_opt; + + switch (type) { + case BufferType::kInput: + names_opt = snpe_params_.snpe->getInputTensorNames(); + break; + case BufferType::kOutput: + names_opt = snpe_params_.snpe->getOutputTensorNames(); + break; + default: + MLE_LOGE("Error obtaining tensor names"); + throw std::runtime_error("Error obtaining tensor names"); + } + + const zdl::DlSystem::StringList& names = *names_opt; + for (const char *name : names) { + if (config_.io_type == NetworkIO::kUserBuffer) { + result = CreateUserBuffer(type, name); + } else if (config_.io_type == NetworkIO::kITensor) { + result = CreateTensor(type, name); + } else { + MLE_LOGE("Invalid Network IO value %d", static_cast(config_.io_type)); + result = MLE_FAIL; + } + + if (MLE_OK != result) { + break; + } + } + return result; +} + +int32_t SNPEBase::CreateUserBuffer(BufferType type, const char * name) { + zdl::DlSystem::IUserBufferFactory& ub_factory = + zdl::SNPE::SNPEFactory::getUserBufferFactory(); + + auto uba_opt = snpe_params_.snpe->getInputOutputBufferAttributes(name); + if (!uba_opt) { + throw std::runtime_error( + std::string("Error obtaining attributes for tensor ") + name); + } + + auto m_encoding = (*uba_opt)->getEncoding(); + //auto enc_type = (*uba_opt)->getEncodingType(); + const zdl::DlSystem::TensorShape& buffer_shape = (*uba_opt)->getDims(); + + size_t elem_size = (*uba_opt)->getElementSize(); + + std::unique_ptr userBufferEncoding; + elem_size = sizeof(float); + userBufferEncoding = std::unique_ptr( + new zdl::DlSystem::UserBufferEncodingFloat()); + m_encoding = userBufferEncoding.get(); + + size_t buf_size = CalculateSizeFromDims(buffer_shape.rank(), + buffer_shape.getDimensions(), + elem_size); + + auto *heap_map = &snpe_params_.in_heap_map; + auto *ub_map = &snpe_params_.input_ub_map; + if (type == BufferType::kOutput) { + heap_map = &snpe_params_.out_heap_map; + ub_map = &snpe_params_.output_ub_map; + } + + heap_map->emplace(name, std::vector(buf_size / elem_size)); + + snpe_params_.ub_list.push_back(ub_factory.createUserBuffer( + heap_map->at(name).data(), buf_size, + GetStrides((*uba_opt)->getDims(), elem_size), m_encoding)); + ub_map->add(name, snpe_params_.ub_list.back().get()); + + return MLE_OK; +} + +int32_t SNPEBase::CreateTensor(BufferType type, const char* name) { + zdl::DlSystem::ITensorFactory& tensor_factory = + zdl::SNPE::SNPEFactory::getTensorFactory(); + + auto tensor_opt = snpe_params_.snpe->getInputOutputBufferAttributes(name); + if (!tensor_opt) { + throw std::runtime_error( + std::string("Error obtaining attributes for tensor ") + name); + } + const zdl::DlSystem::TensorShape& tensor_shape = (*tensor_opt)->getDims(); + + size_t elem_size = (*tensor_opt)->getElementSize(); + MLE_LOGI("Bufer type %d elements size in bytes: %zu", (int)type, elem_size); + + size_t buf_size = CalculateSizeFromDims(tensor_shape.rank(), + tensor_shape.getDimensions(), + elem_size); + auto *heap_map = &snpe_params_.in_heap_map; + auto *tensor_map = &snpe_params_.input_tensor_map; + if (type == BufferType::kOutput) { + heap_map = &snpe_params_.out_heap_map; + tensor_map = &snpe_params_.output_tensor_map; + } + + heap_map->emplace(name, std::vector(buf_size / elem_size)); + snpe_params_.tensor_list.push_back(tensor_factory.createTensor(tensor_shape)); + tensor_map->add(name, snpe_params_.tensor_list.back().get()); + + return MLE_OK; +} + +size_t SNPEBase::CalculateSizeFromDims(const size_t rank, + const zdl::DlSystem::Dimension* dims, + const size_t& element_size) { + if (0 == rank) { + return 0; + } + size_t size = element_size; + for (size_t i = 0; i < rank; i++) { + size *= dims[i]; + } + return size; +} + +std::vector SNPEBase::GetStrides(zdl::DlSystem::TensorShape dims, + const size_t& element_size) { + std::vector strides(dims.rank()); + strides[strides.size() - 1] = element_size; + size_t stride = strides[strides.size() - 1]; + + for (size_t i = dims.rank() - 1; i > 0; i--) { + stride *= dims[i]; + strides[i - 1] = stride; + } + + return strides; +} + +void* SNPEBase::GetInputBuffer() { + void* buf = + (void*)snpe_params_.in_heap_map[snpe_params_.input_layer.c_str()].data(); + + // memset buffer now to avoid it while padding/mean subtract + memset(buf, 0, snpe_params_.in_heap_map[snpe_params_.input_layer.c_str()].size()); + return buf; +} + +int32_t SNPEBase::ExecuteModel() { + MLE_LOGI("%s: Enter", __func__); + + if (config_.io_type == NetworkIO::kUserBuffer) { + if (!snpe_params_.snpe->execute(snpe_params_.input_ub_map, + snpe_params_.output_ub_map)) { + PrintErrorStringAndExit(); + return MLE_FAIL; + } + } else if (config_.io_type == NetworkIO::kITensor) { + snpe_params_.output_tensor_map.clear(); + if (!snpe_params_.snpe->execute(snpe_params_.input_tensor_map, + snpe_params_.output_tensor_map)) { + PrintErrorStringAndExit(); + return MLE_FAIL; + } + } else { + MLE_LOGE("%s: Invalid Network IO value", __func__); + return MLE_FAIL; + } + + MLE_LOGI("%s: Exit", __func__); + return MLE_OK; +} + +int32_t SNPEBase::PostProcess(GstBuffer* buffer) { + MLE_LOGI("%s: Enter", __func__); + + std::vector score_buf; + const zdl::DlSystem::StringList &output_buf_names = + snpe_params_.output_ub_map.getUserBufferNames(); + const zdl::DlSystem::StringList &output_tensor_names = + snpe_params_.output_tensor_map.getTensorNames(); + const zdl::DlSystem::StringList *output_names = &output_buf_names; + if (config_.io_type == NetworkIO::kITensor) { + output_names = &output_tensor_names; + } + std::for_each( + output_names->begin(), + output_names->end(), + [&](const char* name) + { + if (0 == std::strcmp(name, snpe_params_.result_layers[0].c_str())) { + if (config_.io_type == NetworkIO::kUserBuffer) { + score_buf = snpe_params_.out_heap_map.at(name); + } else if (config_.io_type == NetworkIO::kITensor) { + auto t = snpe_params_.output_tensor_map.getTensor(name); + for (auto it = t->begin(); it != t->end(); it++) { + score_buf.push_back(*it); + } + } + } + }); + + uint32_t top_score_idx = 0; + float top_score = 0.0; + + for (size_t i = 0; i < score_buf.size(); i++) { + if (score_buf[i] > top_score) { + top_score = score_buf[i]; + top_score_idx = i; + } + } + if (top_score_idx < labels_.size() && + top_score > config_.conf_threshold) { + + GstMLClassificationMeta *meta = + gst_buffer_add_classification_meta(buffer); + if (!meta) { + MLE_LOGE("Failed to create metadata"); + return MLE_NULLPTR; + } + + meta->result.confidence = top_score * 100; + uint32_t label_size = labels_.at(top_score_idx).size() + 1; + meta->result.name = (gchar *)malloc(label_size); + snprintf(meta->result.name, label_size, "%s", labels_.at(top_score_idx).c_str()); + } + + MLE_LOGI("%s: Exit", __func__); + return MLE_OK; +} + +void SNPEBase::PrintErrorStringAndExit() { + const char* const err = zdl::DlSystem::getLastErrorString(); + MLE_LOGE(" %s", err); +} + +int32_t SNPEBase::InitFramework() { + MLE_LOGI("%s Enter", __func__); + version_ = zdl::SNPE::SNPEFactory::getLibraryVersion(); + MLE_LOGI("SNPE version: %s", version_.toString().c_str()); + int32_t res = MLE_OK; + + snpe_params_.snpe = SetBuilderOptions(); + if (nullptr == snpe_params_.snpe) { + PrintErrorStringAndExit(); + res = MLE_FAIL; + } + + if (MLE_OK == res) { + ConfigureDimensions(); + res = PopulateMap(BufferType::kInput); + } + if (MLE_OK == res) { + res = PopulateMap(BufferType::kOutput); + } + + MLE_LOGI("%s Exit", __func__); + return res; +} + +}; // namespace mle diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.h new file mode 100644 index 0000000..b5e705e --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.h @@ -0,0 +1,121 @@ +/* +* Copyright (c) 2020, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "DlContainer/IDlContainer.hpp" +#include "SNPE/SNPE.hpp" +#include "SNPE/SNPEFactory.hpp" +#include "SNPE/SNPEBuilder.hpp" +#include "DlSystem/DlError.hpp" +#include "DlSystem/ITensorFactory.hpp" +#include "DlSystem/TensorMap.hpp" +#include "DlSystem/TensorShape.hpp" +#include "DlSystem/StringList.hpp" +#include "DlSystem/DlError.hpp" +#include "DlSystem/IUserBuffer.hpp" +#include "DlSystem/IUserBufferFactory.hpp" +#include "DlSystem/UserBufferMap.hpp" +#include "DlSystem/IBufferAttributes.hpp" +#include "ml_engine_intf.h" + +namespace mle { + +enum class BufferType { + kOutput = 0, + kInput +}; + +struct SNPEParams { + std::unique_ptr container; + std::unique_ptr snpe; + + std::vector> ub_list; + zdl::DlSystem::UserBufferMap output_ub_map; + zdl::DlSystem::UserBufferMap input_ub_map; + + std::vector> tensor_list; + zdl::DlSystem::TensorMap output_tensor_map; + zdl::DlSystem::TensorMap input_tensor_map; + + std::unordered_map> in_heap_map; + std::unordered_map> out_heap_map; + + std::string input_layer; + std::vector result_layers; +}; + +class SNPEBase : public MLEngine { + public: + SNPEBase(MLConfig &config); + virtual ~SNPEBase(){}; + + protected: + void PrintErrorStringAndExit(); + virtual int32_t PostProcess(GstBuffer* buffer); + SNPEParams snpe_params_; + zdl::DlSystem::Runtime_t runtime_; + zdl::DlSystem::Version_t version_; + + private: + int32_t LoadModel(std::string& model_path); + int32_t ConfigureRuntime(MLConfig &config); + int32_t ConfigureDimensions(); + int32_t InitFramework(); + int32_t ExecuteModel(); + void* GetInputBuffer(); + std::unique_ptr LoadContainerFromFile( + std::string container_path); + std::unique_ptr SetBuilderOptions(); + virtual size_t CalculateSizeFromDims(const size_t rank, + const zdl::DlSystem::Dimension* dims, + const size_t& element_size); + virtual std::vector GetStrides(zdl::DlSystem::TensorShape dims, + const size_t& element_size); + + int32_t PopulateMap(BufferType type); + int32_t CreateUserBuffer(BufferType type, const char* name); + int32_t CreateTensor(BufferType type, const char* name); +}; + +}; // namespace mle diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.cc new file mode 100644 index 0000000..70ce8e1 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.cc @@ -0,0 +1,154 @@ +/* +* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include "snpe_detection.h" +#include "common_utils.h" + +namespace mle { + +SNPEDetection::SNPEDetection(MLConfig &config) : SNPEBase(config) {} +SNPEDetection::~SNPEDetection() {} + +int32_t SNPEDetection::PostProcess(GstBuffer* buffer) { + MLE_LOGI("%s: Enter", __func__); + + std::vector score_buf; + std::vector box_buf; + std::vector class_buf; + + const zdl::DlSystem::StringList &output_buf_names = + snpe_params_.output_ub_map.getUserBufferNames(); + const zdl::DlSystem::StringList &output_tensor_names = + snpe_params_.output_tensor_map.getTensorNames(); + const zdl::DlSystem::StringList *output_names = &output_buf_names; + if (config_.io_type == NetworkIO::kITensor) { + output_names = &output_tensor_names; + } + std::for_each( + output_names->begin(), + output_names->end(), + [&](const char* name) + { + if (config_.io_type == NetworkIO::kUserBuffer) { + if (0 == std::strcmp(name, snpe_params_.result_layers[2].c_str())) { + score_buf = snpe_params_.out_heap_map.at(name); + } else if (0 == std::strcmp( + name, snpe_params_.result_layers[0].c_str())) { + box_buf = snpe_params_.out_heap_map.at(name); + } else if (0 == std::strcmp( + name, snpe_params_.result_layers[3].c_str())) { + class_buf = snpe_params_.out_heap_map.at(name); + } + } else if (config_.io_type == NetworkIO::kITensor) { + if (0 == std::strcmp(name, snpe_params_.result_layers[0].c_str())) { + auto t = snpe_params_.output_tensor_map.getTensor(name); + for (auto it = t->begin(); it != t->end(); it++) { + score_buf.push_back(*it); + } + } else if (0 == std::strcmp( + name, snpe_params_.result_layers[1].c_str())) { + auto t = snpe_params_.output_tensor_map.getTensor(name); + for (auto it = t->begin(); it != t->end(); it++) { + box_buf.push_back(*it); + } + } else if (0 == std::strcmp( + name, snpe_params_.result_layers[2].c_str())) { + auto t = snpe_params_.output_tensor_map.getTensor(name); + for (auto it = t->begin(); it != t->end(); it++) { + class_buf.push_back(*it); + } + } + } + }); + + uint32_t width = source_params_.width; + uint32_t height = source_params_.height; + + float scale_ratio_x = (float)engine_input_params_.width / scale_width_; + float scale_ratio_y = (float)engine_input_params_.height / scale_height_; + + if (config_.preprocess_mode == kKeepARCrop) { + width = po_.width; + height = po_.height; + } + + if (score_buf.size() && box_buf.size() && class_buf.size()) { + uint32_t num_obj = 0; + for (size_t i = 0; i < score_buf.size(); i++) { + if (score_buf[i] < config_.conf_threshold) { + continue; + } + GstMLDetectionMeta *meta = gst_buffer_add_detection_meta(buffer); + if (!meta) { + MLE_LOGE("Failed to create metadata"); + return MLE_NULLPTR; + } + + GstMLClassificationResult *box_info = (GstMLClassificationResult*)malloc( + sizeof(GstMLClassificationResult)); + + uint32_t label_size = labels_.at( + static_cast(class_buf[i] + 0.5)).size() + 1; + box_info->name = (gchar *)malloc(label_size); + snprintf(box_info->name, label_size, "%s", + labels_.at(static_cast(class_buf[i] + 0.5)).c_str()); + box_info->confidence = score_buf[i]; + meta->box_info = g_slist_append (meta->box_info, box_info); + + meta->bounding_box.x = + std::lround(box_buf[i * 4 + 1] * width * scale_ratio_x) + po_.x_offset; + meta->bounding_box.y = + std::lround(box_buf[i * 4] * height * scale_ratio_y) + po_.y_offset; + meta->bounding_box.width = + (std::lround(box_buf[i * 4 + 3] * width * scale_ratio_x) + + po_.x_offset) - meta->bounding_box.x; + meta->bounding_box.height = + (std::lround(box_buf[i * 4 + 2] * height * scale_ratio_y) + + po_.y_offset) - meta->bounding_box.y; + + num_obj++; + + MLE_LOGD("object info: name: %s , score %f, box x %d y %d w %d h %d", + box_info->name, box_info->confidence, + meta->bounding_box.x, + meta->bounding_box.y, + meta->bounding_box.width, + meta->bounding_box.height); + } + MLE_LOGI("Inference engine detected %d objects, highest score: %f", + num_obj, score_buf[0]); + } + + MLE_LOGI("%s: Exit", __func__); + return MLE_OK; +} + +}; // namespace mle diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.h new file mode 100644 index 0000000..213453c --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.h @@ -0,0 +1,43 @@ +/* +* Copyright (c) 2020, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include "snpe_base.h" + +namespace mle { + +class SNPEDetection : public SNPEBase { + public: + SNPEDetection(MLConfig &config); + ~SNPEDetection(); + int32_t PostProcess(GstBuffer* buffer); +}; + +}; // namespace mle diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc new file mode 100644 index 0000000..74ccf62 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc @@ -0,0 +1,308 @@ +/* +Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of Qualcomm Innovation Center, Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#include "snpe_yolodetection.h" +#include "common_utils.h" + +//#include + +//#include +//#include +//#include +//#include + +namespace Algorithm { + +static float fastPow(float p) { + float offset = (p < 0) ? 1.0f : 0.0f; + float clipp = (p < -126) ? -126.0f : p; + int32_t w = (int32_t)clipp; + float z = clipp - w + offset; + union { + uint32_t i; + float f; + } v = {(uint32_t)((1 << 23) * + (clipp + 121.2740575f + 27.7280233f / (4.84252568f - z) - + 1.49012907f * z))}; + + return v.f; +} + +inline float fastExp(float p) { return fastPow(1.442695040f * p); } + +inline float Sigmoid(float x) +{ + return (1 / (1 + exp(-x))); +} + +inline float fastSigmoid(float x) +{ + return (1 / (1 + fastExp(-x))); +} + +} // namespace Algorithm + +namespace mle { + +const float strides[3]= {8,16,32}; +const float anchors[3][6] = {{10, 13, 16, 30, 33, 23}, {30, 61, 62, 45, 59, 119}, {116, 90, 156, 198, 373, 326}}; + +std::vector boundingBoxDecode(const float *pbox, float x_o, float y_o, float anchor_w, float anchor_h, float stride) { + float x, y, xw, yh, x1, x2, y1, y2; + x = (Algorithm::Sigmoid(pbox[0]) + x_o) * stride; + y = (Algorithm::Sigmoid(pbox[1]) + y_o) * stride; + xw = exp(pbox[2]) * anchor_w; + yh = exp(pbox[3]) * anchor_h; + x1 = x - xw / 2; if(x1<0) x1=0; + y1 = y - yh / 2; if(y1<0) y1=0; + x2 = x + xw / 2; + y2 = y + yh / 2; + return {x1, y1, x2, y2}; +} + +std::vector fastBoundingBoxDecodeV5(const float *pbox, float x_o, float y_o, float anchor_w, float anchor_h, float stride) { + float x, y, xw, yh, x1, x2, y1, y2; + x = ( Algorithm::fastSigmoid(pbox[0]) * 2 - 0.5 + x_o) * stride; + y = ( Algorithm::fastSigmoid(pbox[1]) * 2 - 0.5 + y_o) * stride; + xw = Algorithm::fastSigmoid(pbox[2]); + xw = xw * xw * anchor_w * 4; + yh = Algorithm::fastSigmoid(pbox[3]); + yh = yh * yh * anchor_h * 4; + x1 = x - xw / 2; if(x1<0) x1=0; + y1 = y - yh / 2; if(y1<0) y1=0; + x2 = x + xw / 2; + y2 = y + yh / 2; + return {x1, y1, x2, y2}; +} + +static float computeIoU(const float *box1, const float *box2) { + float box1_xmin = box1[0], box2_xmin = box2[0]; + float box1_ymin = box1[1], box2_ymin = box2[1]; + float box1_xmax = box1[2], box2_xmax = box2[2]; + float box1_ymax = box1[3], box2_ymax = box2[3]; + float ixmin = std::max(box1_xmin, box2_xmin); + float iymin = std::max(box1_ymin, box2_ymin); + float ixmax = std::min(box1_xmax, box2_xmax); + float iymax = std::min(box1_ymax, box2_ymax); + + // if(!((box1_xmin <= box1_xmax) && (box1_ymin <= box1_ymax) && + // (box2_xmin <= box2_xmax) && (box2_ymin <= box2_ymax))) { + // return 0; + // } + + float iou = 0.0f; + if ((ixmin < ixmax) && (iymin < iymax)) { + float intersection_area = (ixmax - ixmin) * (iymax - iymin); + // union = area1 + area2 - intersection + float union_area = (box1_xmax - box1_xmin) * (box1_ymax - box1_ymin) + + (box2_xmax - box2_xmin) * (box2_ymax - box2_ymin) - + intersection_area; + iou = (union_area > 0.0f) ? intersection_area / union_area : 0.0f; + } + return iou; +} + +void SNPEYoloDetection::doNMS(std::vector> &boxes, float thres) { + // 0,1,2,3 - vertex, 4 - confidence + using box = std::vector; + // std::cout << boxes.size() << std::endl; + std::sort(boxes.begin(), boxes.end(), + [](const box &a, const box &b) { return a[4] > b[4]; }); + + for (auto it = boxes.begin(); it != boxes.end(); it++) { + box &cand1 = *it; + + for (auto jt = it + 1; jt != boxes.end();) { + box &cand2 = *jt; + if(cand1[5] != cand2[5]){ + jt++; + continue; // nms with same class only + } + if (computeIoU(&cand1[0], &cand2[0]) >= thres) + jt = boxes.erase(jt); // Possible candidate for optimization + else + jt++; + } + } + // std::cout << "Final number of discrete bounding boxes = "<< boxes.size() << + // std::endl; +} + +SNPEYoloDetection::SNPEYoloDetection(MLConfig &config) : SNPEBase(config) {} + +SNPEYoloDetection::~SNPEYoloDetection() {} + +int32_t SNPEYoloDetection::PostProcess(GstBuffer* buffer) { + MLE_LOGI("%s: Enter.", __func__); + + uint32_t detection_size = (NUM_COORDINATES + 1 + label_count_ ); //4 + 1 + 80 + std::vector> res_vec; + for (uint32_t i = 0; i < snpe_params_.result_layers.size(); i++) { + const char* name = snpe_params_.result_layers[i].c_str(); + std::vector _raw_buf; + if (config_.io_type == NetworkIO::kUserBuffer) { + _raw_buf= snpe_params_.out_heap_map.at(name); + MLE_LOGD("%s: out_heap_map: %s. size=%d", __func__,name,(uint32_t)_raw_buf.size()); + } else if (config_.io_type == NetworkIO::kITensor) { + auto t = snpe_params_.output_tensor_map.getTensor(name); + for (auto it = t->begin(); it != t->end(); it++) { + _raw_buf.push_back(*it); + } + MLE_LOGD("%s: output_tensor_map: %s. size=%d", __func__,name,(uint32_t)_raw_buf.size()); + } + + uint32_t outbuf_size=_raw_buf.size(); + for (uint32_t n = 0; n < DEFAULT_ANCHOR_BOXES; n++) + { + //uint32_t grid_size= engine_input_params_.height/strides[n]; + if (batch_size_*(engine_input_params_.height/strides[n])*(engine_input_params_.width/strides[n])*DEFAULT_ANCHOR_BOXES*detection_size == outbuf_size){ + anchorBoxProcess(n,_raw_buf.data(),res_vec); + } + } + } + doNMS(res_vec, config_.nms_threshold); + ShowDetectionOverlay(buffer, res_vec); + MLE_LOGI("%s: Exit", __func__); + return MLE_OK; +} + +int32_t SNPEYoloDetection::anchorBoxProcess(uint32_t anchorBoxIdx,float *pdata, std::vector> &res_vec) { + float stride = strides[anchorBoxIdx]; + uint32_t grid_height = engine_input_params_.height/stride; + uint32_t grid_width = engine_input_params_.width/stride; + uint32_t detection_size = (NUM_COORDINATES + 1 + label_count_ ); //4 + 1 + 80 + uint32_t cnt = 0; + for (uint32_t batchIdx = 0; batchIdx < batch_size_; batchIdx++) { + for (uint32_t h = 0; h < grid_height; h++) { + for (uint32_t w = 0; w < grid_width; w++) { + for (uint32_t winIdx = 0; winIdx < 3 ; winIdx++) { + float *pbox = pdata + cnt*detection_size; + float *pclass = pbox + NUM_COORDINATES + 1; + cnt++; + float objectConfidenceScore =Algorithm::fastSigmoid(pbox[4]); + if (objectConfidenceScore <= config_.conf_threshold) { + continue; + } + uint32_t max_class_index =0; + if(label_count_ > 1){ + max_class_index = std::distance(pclass, std::max_element(pclass, pclass + label_count_)); + } + float max_objprob =Algorithm::fastSigmoid(pclass[max_class_index]); + if (max_objprob * objectConfidenceScore <= config_.conf_threshold) { + continue; + } + float anchor_w= anchors[anchorBoxIdx][winIdx * 2]; + float anchor_h = anchors[anchorBoxIdx][(winIdx * 2) + 1]; + std::vector box = fastBoundingBoxDecodeV5(pbox, (float)w, (float)h, anchor_w, anchor_h,stride); + res_vec.emplace_back(std::initializer_list{ + box[0], box[1], box[2],box[3], max_objprob * objectConfidenceScore,(float)max_class_index}); + } + } + } + } + MLE_LOGI("anchorBoxIdx:%d grid:%dx%d: total_boxes:%d cnt:%d ",anchorBoxIdx, grid_width,grid_height,(int)res_vec.size(),cnt); + return MLE_OK; +} + +int32_t SNPEYoloDetection::ShowDetectionOverlay(GstBuffer* buffer, std::vector> &res_vec) +{ + uint32_t width = source_params_.width; + uint32_t height = source_params_.height; + + if (config_.preprocess_mode == kKeepARCrop) { + width = po_.width; + height = po_.height; + } + uint32_t total_boxes =res_vec.size(); + MLE_LOGI("width:%d: height:%d x_offset:%d y_offset:%d total_boxes:%d ",width,height,po_.x_offset,po_.y_offset,total_boxes); + + if(total_boxes > config_.max_detection_result) total_boxes = config_.max_detection_result; + for (uint32_t i = 0; i < total_boxes; i++) + { + float confidence= res_vec[i][4]; + float class_idx= res_vec[i][5]; + if (class_idx>=label_count_) continue; + // Only interested in people detection, Discard other classes + if(class_idx!=0.0) continue; + std::string class_name= labels_.at(static_cast(class_idx)); + uint32_t label_size = class_name.size() + 1; + + uint32_t bx= std::lround(res_vec[i][0] * width /scale_width_) + po_.x_offset; + uint32_t by= std::lround(res_vec[i][1] * height /scale_height_) + po_.y_offset; + uint32_t bw= std::lround(res_vec[i][2] * width /scale_width_) + po_.x_offset - bx; + uint32_t bh= std::lround(res_vec[i][3] * height /scale_height_) + po_.y_offset - by; + + if ((config_.x_axis != 0) || (config_.y_axis != 0) || (config_.width != 0) || + (config_.height != 0)) { + if (!(((bx > config_.x_axis) && (bx < config_.x_axis + config_.width)) || + ((bx + bw > config_.x_axis) && (bx + bw < config_.x_axis + config_.width)))) { + continue; + } + + if (!(((by > config_.y_axis) && (by < config_.y_axis + config_.height)) || + ((by + bh > config_.y_axis) && (by + bh < config_.y_axis + config_.height)))) { + continue; + } + } + + + MLE_LOGI("object info: name: %s , score %3f, box x %d y %d w %d h %d", + class_name.c_str(), confidence, bx, by, bw, bh); + + GstMLDetectionMeta *meta = gst_buffer_add_detection_meta(buffer); + if (!meta) { + MLE_LOGE("Failed to create metadata"); + return MLE_NULLPTR; + } + + GstMLClassificationResult *box_info = (GstMLClassificationResult*)malloc( + sizeof(GstMLClassificationResult)); + + box_info->name = (gchar *)malloc(label_size); + snprintf(box_info->name, label_size, "%s", class_name.c_str()); + box_info->confidence = confidence; + meta->box_info = g_slist_append (meta->box_info, box_info); + meta->bounding_box.x = bx; + meta->bounding_box.y = by; + meta->bounding_box.width = bw; + meta->bounding_box.height = bh; + + } + return MLE_OK; + +} + +}; // namespace mle diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.h new file mode 100644 index 0000000..fc0506d --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.h @@ -0,0 +1,58 @@ +/* +Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted (subject to the limitations in the +disclaimer below) provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above + copyright notice, this list of conditions and the following + disclaimer in the documentation and/or other materials provided + with the distribution. + + * Neither the name of Qualcomm Innovation Center, Inc. nor the names of its + contributors may be used to endorse or promote products derived + from this software without specific prior written permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE +GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT +HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED +WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. +IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE +GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER +IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR +OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#pragma once + +#include "snpe_base.h" + +// newmodel config +#define DEFAULT_ANCHOR_BOXES 3 +#define NUM_COORDINATES 4 + +namespace mle { + + class SNPEYoloDetection : public SNPEBase { + public: + SNPEYoloDetection(MLConfig &config); + ~SNPEYoloDetection(); + int32_t PostProcess(GstBuffer* buffer); + + private: + int32_t anchorBoxProcess(uint32_t anchorBoxIdx, float *pdata, std::vector> &res_vec); + void doNMS(std::vector> &boxes, float thres); + int32_t ShowDetectionOverlay(GstBuffer* buffer, std::vector> &res_vec); + + }; + +}; // namespace mle diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt new file mode 100644 index 0000000..b297ef4 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt @@ -0,0 +1,33 @@ +# GStreamer plugin. +set(GST_MLE_SNPE gstqtimlesnpe) + +add_library(${GST_MLE_SNPE} SHARED + mle_snpe.cc +) + +target_include_directories(${GST_MLE_SNPE} PUBLIC + ${GST_INCLUDE_DIRS} +) + +target_include_directories(${GST_MLE_SNPE} PRIVATE + ${KERNEL_BUILDDIR}/usr/include + ${SYSROOT_INCDIR}/ion_headers + ${CMAKE_SOURCE_DIR} +) + +target_link_libraries(${GST_MLE_SNPE} PRIVATE + ${GST_LIBRARIES} + ${GST_ALLOC_LIBRARIES} + ${GST_VIDEO_LIBRARIES} + qtimlmeta + Engine_MLE + gstqtivideobase +) + +install( + TARGETS ${GST_MLE_SNPE} + LIBRARY DESTINATION ${SYSROOT_AARCH_LIBDIR}/gstreamer-1.0 + PERMISSIONS OWNER_EXECUTE OWNER_WRITE OWNER_READ + GROUP_EXECUTE GROUP_READ + WORLD_EXECUTE WORLD_READ +) diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc new file mode 100644 index 0000000..3f1cff3 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc @@ -0,0 +1,862 @@ +/* +* Copyright (c) 2021, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#include +#include +#ifdef HAVE_CONFIG_H +#include "config.h" +#endif + +#include "mle_snpe.h" +#include "mle_engine/snpe_base.h" +#include "mle_engine/snpe_detection.h" +#include "mle_engine/snpe_yolodetection.h" + +#define GST_CAT_DEFAULT mle_snpe_debug +GST_DEBUG_CATEGORY_STATIC (mle_snpe_debug); + +#define gst_mle_snpe_parent_class parent_class +G_DEFINE_TYPE (GstMLESNPE, gst_mle_snpe, GST_TYPE_VIDEO_FILTER); + +#define GST_ML_VIDEO_FORMATS "{ NV12, NV21 }" + +#define DEFAULT_PROP_SNPE_INPUT_FORMAT mle::kBgrFloat +#define DEFAULT_PROP_MLE_MEAN_VALUE 128.0 +#define DEFAULT_PROP_MLE_SIGMA_VALUE 128.0 +#define DEFAULT_PROP_MLE_DETECTION_BOX 0 +#define DEFAULT_PROP_SNPE_RUNTIME mle::kSnpeDsp +#define DEFAULT_PROP_MLE_CONF_THRESHOLD 0.5 +#define DEFAULT_PROP_MLE_PREPROCESSING_TYPE mle::kKeepARPad +#define DEFAULT_PROP_MLE_PREPROCESS_ACCEL mle::kPreprocessGpu +#define GST_MLE_UNUSED(var) ((void)var) + +enum { + PROP_0, + PROP_MLE_PARSE_CONFIG, + PROP_MLE_FRAMEWORK_TYPE, + PROP_MLE_MODEL_FILENAME, + PROP_MLE_LABELS_FILENAME, + PROP_SNPE_INPUT_FORMAT, + PROP_MLE_POSTPROCESSING, + PROP_MLE_MEAN_VALUES, + PROP_MLE_SIGMA_VALUES, + PROP_SNPE_RUNTIME, + PROP_SNPE_OUTPUT_LAYERS, + PROP_MLE_PREPROCESSING_TYPE, + PROP_MLE_PREPROCESSING_ACCEL, + PROP_MLE_CONF_THRESHOLD, +}; + + +static GstStaticCaps gst_mle_snpe_format_caps = + GST_STATIC_CAPS (GST_VIDEO_CAPS_MAKE (GST_ML_VIDEO_FORMATS) ";" + GST_VIDEO_CAPS_MAKE_WITH_FEATURES ("ANY", GST_ML_VIDEO_FORMATS)); + +static void +gst_mle_set_property_mask(guint &mask, guint property_id) +{ + mask |= 1 << property_id; +} + +static gboolean +gst_mle_check_is_set(guint &mask, guint property_id) +{ + return (mask & 1 << property_id) ? true:false; +} + +static void +gst_mle_snpe_set_property(GObject *object, guint property_id, + const GValue *value, GParamSpec *pspec) +{ + GstMLESNPE *mle = GST_MLE_SNPE (object); + + GST_OBJECT_LOCK (mle); + switch (property_id) { + case PROP_MLE_PARSE_CONFIG: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->config_location = g_strdup(g_value_get_string (value)); + break; + case PROP_MLE_PREPROCESSING_TYPE: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->preprocessing_type = g_value_get_enum (value); + break; + case PROP_MLE_PREPROCESSING_ACCEL: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->preprocess_accel = g_value_get_enum (value); + break; + case PROP_MLE_MODEL_FILENAME: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->model_filename = g_strdup(g_value_get_string (value)); + break; + case PROP_MLE_LABELS_FILENAME: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->labels_filename = g_strdup(g_value_get_string (value)); + break; + case PROP_SNPE_INPUT_FORMAT: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->input_format = g_value_get_enum (value); + break; + case PROP_MLE_POSTPROCESSING: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->postprocessing = g_strdup(g_value_get_string (value)); + break; + case PROP_MLE_MEAN_VALUES: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->blue_mean = + g_value_get_double (gst_value_array_get_value (value, 0)); + mle->green_mean = + g_value_get_double (gst_value_array_get_value (value, 1)); + mle->red_mean = + g_value_get_double (gst_value_array_get_value (value, 2)); + break; + case PROP_MLE_SIGMA_VALUES: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->blue_sigma = + g_value_get_double (gst_value_array_get_value (value, 0)); + mle->green_sigma = + g_value_get_double (gst_value_array_get_value (value, 1)); + mle->red_sigma = + g_value_get_double (gst_value_array_get_value (value, 2)); + break; + case PROP_SNPE_RUNTIME: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->runtime = g_value_get_enum (value); + break; + case PROP_SNPE_OUTPUT_LAYERS: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->output_layers = g_strdup(g_value_get_string (value)); + break; + case PROP_MLE_CONF_THRESHOLD: + gst_mle_set_property_mask(mle->property_mask, property_id); + mle->conf_threshold = g_value_get_float (value); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); + break; + } + GST_OBJECT_UNLOCK (mle); +} + +static void +gst_mle_snpe_get_property(GObject *object, guint property_id, + GValue *value, GParamSpec *pspec) +{ + GstMLESNPE *mle = GST_MLE_SNPE (object); + + GST_OBJECT_LOCK (mle); + switch (property_id) { + case PROP_MLE_PARSE_CONFIG: + g_value_set_string (value, mle->config_location); + break; + case PROP_MLE_PREPROCESSING_TYPE: + g_value_set_enum (value, mle->preprocessing_type); + break; + case PROP_MLE_PREPROCESSING_ACCEL: + g_value_set_enum (value, mle->preprocess_accel); + break; + case PROP_MLE_MODEL_FILENAME: + g_value_set_string (value, mle->model_filename); + break; + case PROP_MLE_LABELS_FILENAME: + g_value_set_string (value, mle->labels_filename); + break; + case PROP_SNPE_INPUT_FORMAT: + g_value_set_enum (value, mle->input_format); + break; + case PROP_MLE_POSTPROCESSING: + g_value_set_string (value, mle->postprocessing); + break; + case PROP_MLE_MEAN_VALUES: { + GValue val = G_VALUE_INIT; + g_value_init (&val, G_TYPE_DOUBLE); + g_value_set_double (&val, mle->blue_mean); + gst_value_array_append_value (value, &val); + g_value_set_double (&val, mle->green_mean); + gst_value_array_append_value (value, &val); + g_value_set_double (&val, mle->red_mean); + gst_value_array_append_value (value, &val); + break; + } + case PROP_MLE_SIGMA_VALUES: { + GValue val = G_VALUE_INIT; + g_value_init (&val, G_TYPE_DOUBLE); + g_value_set_double (&val, mle->blue_sigma); + gst_value_array_append_value (value, &val); + g_value_set_double (&val, mle->green_sigma); + gst_value_array_append_value (value, &val); + g_value_set_double (&val, mle->red_sigma); + gst_value_array_append_value (value, &val); + break; + } + case PROP_SNPE_RUNTIME: + g_value_set_enum (value, mle->runtime); + break; + case PROP_SNPE_OUTPUT_LAYERS: + g_value_set_string (value, mle->output_layers); + break; + case PROP_MLE_CONF_THRESHOLD: + g_value_set_float (value, mle->conf_threshold); + break; + default: + G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec); + break; + } + GST_OBJECT_UNLOCK (mle); +} + +static void +gst_mle_snpe_finalize(GObject * object) +{ + GstMLESNPE *mle = GST_MLE_SNPE (object); + + if (mle->engine) { + mle->engine->Deinit(); + delete (mle->engine); + mle->engine = nullptr; + } + if (mle->output_layers) { + g_free(mle->output_layers); + } + if (mle->model_filename) { + g_free(mle->model_filename); + } + if (mle->labels_filename) { + g_free(mle->labels_filename); + } + if (mle->config_location) { + g_free(mle->config_location); + } + if (mle->postprocessing) { + g_free(mle->postprocessing); + } + + G_OBJECT_CLASS(parent_class)->finalize(G_OBJECT(mle)); +} + +static GstCaps * +gst_mle_snpe_caps(void) +{ + static GstCaps *caps = NULL; + static volatile gsize inited = 0; + if (g_once_init_enter(&inited)) { + caps = gst_static_caps_get(&gst_mle_snpe_format_caps); + g_once_init_leave(&inited, 1); + } + return caps; +} + +static GstPadTemplate * +gst_mle_src_template(void) +{ + return gst_pad_template_new("src", GST_PAD_SRC, GST_PAD_ALWAYS, + gst_mle_snpe_caps()); +} + +static GstPadTemplate * +gst_mle_sink_template (void) +{ + return gst_pad_template_new("sink", GST_PAD_SINK, GST_PAD_ALWAYS, + gst_mle_snpe_caps ()); +} + +static gboolean +gst_mle_snpe_parse_config(gchar *config_location, + mle::MLConfig &configuration) { + gboolean rc = FALSE; + GstStructure *structure = NULL; + + GValue gvalue = G_VALUE_INIT; + g_value_init (&gvalue, GST_TYPE_STRUCTURE); + + if (g_file_test (config_location, G_FILE_TEST_IS_REGULAR)) { + gchar *contents = NULL; + GError *error = NULL; + + if (!g_file_get_contents (config_location, &contents, NULL, &error)) { + GST_WARNING ("Failed to get config file contents, error: %s!", + GST_STR_NULL (error->message)); + g_clear_error (&error); + return FALSE; + } + + // Remove trailing space and replace new lines with a coma delimeter. + contents = g_strstrip (contents); + contents = g_strdelimit (contents, "\n", ','); + + rc = gst_value_deserialize (&gvalue, contents); + g_free (contents); + + if (!rc) { + GST_WARNING ("Failed to deserialize config file contents!"); + return rc; + } + } else if (!gst_value_deserialize (&gvalue, config_location)) { + GST_WARNING ("Failed to deserialize the config!"); + return FALSE; + } + + structure = GST_STRUCTURE (g_value_dup_boxed (&gvalue)); + g_value_unset (&gvalue); + + gint value = 0; + gdouble dvalue = 0.0; + gboolean bvalue = false; + + if (gst_structure_get_enum (structure, "input_format", + GST_TYPE_MLE_INPUT_FORMAT, &value)) + configuration.input_format = value; + + if (gst_structure_get_double (structure, "BlueMean", &dvalue)) + configuration.blue_mean = dvalue; + + if (gst_structure_get_double (structure, "BlueSigma", &dvalue)) + configuration.blue_sigma = dvalue; + + if (gst_structure_get_double (structure, "GreenMean", &dvalue)) + configuration.green_mean = dvalue; + + if (gst_structure_get_double (structure, "GreenSigma", &dvalue)) + configuration.green_sigma = dvalue; + + if (gst_structure_get_double (structure, "RedMean", &dvalue)) + configuration.red_mean = dvalue; + + if (gst_structure_get_double (structure, "RedSigma", &dvalue)) + configuration.red_sigma = dvalue; + + if (gst_structure_get_boolean (structure, "UseNorm", &bvalue)) + configuration.use_norm = dvalue; + + if (gst_structure_get_int (structure, "x_axis", &value)) + configuration.x_axis = value; + + if (gst_structure_get_int (structure, "y_axis", &value)) + configuration.y_axis = value; + + if (gst_structure_get_int (structure, "width", &value)) + configuration.width = value; + + if (gst_structure_get_int (structure, "height", &value)) + configuration.height = value; + + if (gst_structure_get_enum (structure, "preprocess_type", + GST_TYPE_MLE_PREPROCESSING_MODE, &value)) + configuration.preprocess_mode = value; + + if (gst_structure_get_enum (structure, "preprocess_accel", + GST_TYPE_MLE_PREPROCESSING_ACCEL, &value)) + configuration.preprocess_accel = value; + + if (gst_structure_get_double (structure, "confidence_threshold", &dvalue)) + configuration.conf_threshold = dvalue; + + if (gst_structure_get_double (structure, "nms_threshold", &dvalue)) + configuration.nms_threshold = dvalue; + + if (gst_structure_get_int (structure, "max_detection_result", &value)) + configuration.max_detection_result = value; + + if (gst_structure_get_int (structure, "num_threads", &value)) + configuration.number_of_threads = value; + + if (gst_structure_get_int (structure, "runtime", &value)) + configuration.runtime = value; + + + configuration.model_file = gst_structure_get_string (structure, "model"); + configuration.labels_file = gst_structure_get_string (structure, "labels"); + + const GValue *gtempvalue = + gst_structure_get_value (structure, "output_layers"); + if (gtempvalue != NULL && G_VALUE_HOLDS (gtempvalue, GST_TYPE_ARRAY)) { + guint num = 0; + + for (num = 0; num < gst_value_array_get_size (gtempvalue); num++) { + const GValue *val = gst_value_array_get_value (gtempvalue, num); + std::string str = g_value_get_string (val); + configuration.output_layers.push_back(str); + } + } + + gst_structure_free (structure); + + return rc; +} + +static void +gst_mle_print_config(GstMLESNPE *mle, + mle::MLConfig &configuration, + gchar *postprocessing) +{ + GST_DEBUG_OBJECT(mle, "==== Configuration Begin ===="); + GST_DEBUG_OBJECT(mle, "Model %s", configuration.model_file.c_str()); + GST_DEBUG_OBJECT(mle, "Labels %s", configuration.labels_file.c_str()); + GST_DEBUG_OBJECT(mle, "Pre-processing %d", + configuration.preprocess_mode); + GST_DEBUG_OBJECT(mle, "Pre-processing accelerator %d", + configuration.preprocess_accel); + GST_DEBUG_OBJECT(mle, "Mean(B,G,R): %f, %f, %f", configuration.blue_mean, + configuration.green_mean, + configuration.red_mean); + GST_DEBUG_OBJECT(mle, "Sigma(B,G,R): %f, %f, %f", configuration.blue_sigma, + configuration.green_sigma, + configuration.red_sigma); + GST_DEBUG_OBJECT(mle, "Confidence threshold %f", + configuration.conf_threshold); + GST_DEBUG_OBJECT(mle, "Input format %d", configuration.input_format); + GST_DEBUG_OBJECT(mle, "Runtime %d", configuration.runtime); + for (guint i = 0; i < configuration.output_layers.size(); i++) { + GST_DEBUG_OBJECT(mle, "Output layers[%d] %s", i, + configuration.output_layers[i].c_str()); + } + GST_DEBUG_OBJECT(mle, "Post-processing %s", postprocessing); + GST_DEBUG_OBJECT(mle, "Detection region coordinate %d, %d, %d %d", configuration.x_axis, + configuration.y_axis, + configuration.width, + configuration.height); + GST_DEBUG_OBJECT(mle, "==== Configuration End ===="); +} + +static void +gst_mle_parse_snpe_layers(gchar *src, std::vector &dst) +{ + gchar *pch; + gchar *saveptr; + + if (src) { + pch = strtok_r(src, " ,", &saveptr); + while (pch != NULL) { + dst.push_back(pch); + pch = strtok_r(NULL, " ,", &saveptr); + } + } +} + +static gboolean +gst_mle_create_engine(GstMLESNPE *mle) { + gboolean rc = TRUE; + gboolean parse = TRUE; + + // Configuration structure for MLE + // The order of priority is: default values < configuration file < property + mle::MLConfig configuration {}; + + // Set default configuration values + configuration.blue_mean = configuration.green_mean = configuration.red_mean = + DEFAULT_PROP_MLE_MEAN_VALUE; + configuration.blue_sigma = configuration.green_sigma = + configuration.red_sigma = DEFAULT_PROP_MLE_SIGMA_VALUE; + configuration.x_axis = configuration.y_axis = + configuration.width = configuration.height = DEFAULT_PROP_MLE_DETECTION_BOX; + configuration.input_format = mle->input_format; + configuration.use_norm = false; + configuration.runtime = mle->runtime; + configuration.preprocess_mode = mle->preprocessing_type; + configuration.preprocess_accel = mle->preprocess_accel; + configuration.conf_threshold = DEFAULT_PROP_MLE_CONF_THRESHOLD; + configuration.io_type = mle::NetworkIO::kUserBuffer; + + // Set configuration values from config file + if (mle->config_location) { + parse = gst_mle_snpe_parse_config(mle->config_location, configuration); + if (FALSE == parse) { + GST_DEBUG_OBJECT(mle, "Parsing configuration failed."); + } else { + GST_DEBUG_OBJECT(mle, "Parsing from file is successful!"); + } + } + + // Set configuration values only if property is set + if (gst_mle_check_is_set(mle->property_mask, PROP_MLE_MODEL_FILENAME)) { + configuration.model_file = mle->model_filename; + } + if (gst_mle_check_is_set(mle->property_mask, PROP_MLE_LABELS_FILENAME)) { + configuration.labels_file = mle->labels_filename; + } + if (gst_mle_check_is_set(mle->property_mask, PROP_MLE_CONF_THRESHOLD)) { + configuration.conf_threshold = mle->conf_threshold; + } + if (gst_mle_check_is_set(mle->property_mask, PROP_SNPE_INPUT_FORMAT)) { + configuration.input_format = mle->input_format; + } + if (gst_mle_check_is_set(mle->property_mask, PROP_MLE_MEAN_VALUES)) { + configuration.blue_mean = mle->blue_mean; + configuration.green_mean = mle->green_mean; + configuration.red_mean = mle->red_mean; + } + if (gst_mle_check_is_set(mle->property_mask, PROP_MLE_SIGMA_VALUES)) { + configuration.blue_sigma = mle->blue_sigma; + configuration.green_sigma = mle->green_sigma; + configuration.red_sigma = mle->red_sigma; + + //set normalization flag + configuration.use_norm = true; + } + if (gst_mle_check_is_set(mle->property_mask, PROP_SNPE_RUNTIME)) { + configuration.runtime = mle->runtime; + } + if (gst_mle_check_is_set(mle->property_mask, PROP_MLE_PREPROCESSING_TYPE)) { + configuration.preprocess_mode = mle->preprocessing_type; + } + + if (gst_mle_check_is_set(mle->property_mask, PROP_MLE_PREPROCESSING_ACCEL)) { + configuration.preprocess_accel = mle->preprocess_accel; + } + if (gst_mle_check_is_set(mle->property_mask, PROP_SNPE_OUTPUT_LAYERS)) { + configuration.output_layers.clear(); + } + gst_mle_parse_snpe_layers(mle->output_layers, configuration.output_layers); + + gst_mle_print_config(mle, configuration, mle->postprocessing); + + if (!g_strcmp0(mle->postprocessing, "classification")) { + mle->engine = new mle::SNPEBase(configuration); + if (nullptr == mle->engine) { + GST_ERROR_OBJECT (mle, "Failed to create SNPE instance."); + rc = FALSE; + } + } else if (!g_strcmp0(mle->postprocessing, "detection")) { + mle->engine = new mle::SNPEDetection(configuration); + if (nullptr == mle->engine) { + GST_ERROR_OBJECT (mle, "Failed to create SNPE instance."); + rc = FALSE; + } + } else if (!g_strcmp0(mle->postprocessing, "yolov3detection")) { + mle->engine = new mle::SNPEYoloDetection(configuration); + if (nullptr == mle->engine) { + GST_ERROR_OBJECT (mle, "Failed to create yolov3 SNPE instance."); + rc = FALSE; + } + } else if (!g_strcmp0(mle->postprocessing, "yolov5detection")) { + mle->engine = new mle::SNPEYoloDetection(configuration); + if (nullptr == mle->engine) { + GST_ERROR_OBJECT (mle, "Failed to create yolov5 SNPE instance."); + rc = FALSE; + } + } else { + GST_ERROR_OBJECT (mle, "Unsupported SNPE postprocessing."); + rc = FALSE; + } + + return rc; +} + +static mle::MLEImageFormat +gst_mle_get_video_format(GstVideoFormat &format) +{ + mle::MLEImageFormat mle_format = mle::MLEImageFormat::mle_format_invalid; + switch (format) { + case GST_VIDEO_FORMAT_NV12: + mle_format = mle::MLEImageFormat::mle_format_nv12; + break; + case GST_VIDEO_FORMAT_NV21: + mle_format = mle::MLEImageFormat::mle_format_nv21; + break; + default: + mle_format = mle::MLEImageFormat::mle_format_invalid; + } + return mle_format; +} + +static gboolean +gst_mle_snpe_set_info(GstVideoFilter *filter, GstCaps *in, + GstVideoInfo *ininfo, GstCaps *out, + GstVideoInfo *outinfo) +{ + GST_MLE_UNUSED(in); + GST_MLE_UNUSED(out); + GST_MLE_UNUSED(outinfo); + + gboolean rc = TRUE; + GstMLESNPE *mle = GST_MLE_SNPE (filter); + GstVideoFormat video_format = GST_VIDEO_INFO_FORMAT(ininfo); + + if (mle->engine && mle->is_init) { + if ((gint)mle->source_info.width != GST_VIDEO_INFO_WIDTH(ininfo) || + (gint)mle->source_info.height != GST_VIDEO_INFO_HEIGHT(ininfo) || + mle->source_info.format != gst_mle_get_video_format(video_format)) { + GST_DEBUG_OBJECT(mle, "Reinitializing due to source change."); + mle->engine->Deinit(); + delete (mle->engine); + mle->engine = nullptr; + mle->is_init = FALSE; + } else { + GST_DEBUG_OBJECT(mle, "Already initialized."); + return TRUE; + } + } + + gst_base_transform_set_passthrough (GST_BASE_TRANSFORM (filter), FALSE); + + mle->source_info.width = GST_VIDEO_INFO_WIDTH(ininfo); + mle->source_info.height = GST_VIDEO_INFO_HEIGHT(ininfo); + mle->source_info.format = gst_mle_get_video_format(video_format); + if (mle->source_info.format != mle::MLEImageFormat::mle_format_nv12 && + mle->source_info.format != mle::MLEImageFormat::mle_format_nv21) { + GST_ERROR_OBJECT (mle, "Video format not supported %d", video_format); + return FALSE; + } + + rc = gst_mle_create_engine(mle); + if (FALSE == rc) { + GST_ERROR_OBJECT (mle, "Failed to create MLE instance."); + return rc; + } + + gint ret = mle->engine->Init(&mle->source_info); + if (ret) { + GST_ERROR_OBJECT (mle, "MLE init failed."); + delete (mle->engine); + mle->engine = nullptr; + rc = FALSE; + } else { + GST_DEBUG_OBJECT (mle, "MLE instance created addr %p", mle->engine); + mle->is_init = TRUE; + } + + return rc; +} + +static GstFlowReturn gst_mle_snpe_transform_frame_ip(GstVideoFilter * filter, + GstVideoFrame * frame) +{ + GstMLESNPE *mle = GST_MLE_SNPE (filter); + gint ret = mle->engine->Process(frame); + if (ret) { + GST_ERROR_OBJECT (mle, "MLE Process failed."); + return GST_FLOW_ERROR; + } + + return GST_FLOW_OK; +} + +static void +gst_mle_snpe_class_init (GstMLESNPEClass * klass) +{ + GObjectClass *gobject = G_OBJECT_CLASS (klass); + GstElementClass *element = GST_ELEMENT_CLASS (klass); + GstVideoFilterClass *filter = GST_VIDEO_FILTER_CLASS (klass); + + gobject->set_property = GST_DEBUG_FUNCPTR(gst_mle_snpe_set_property); + gobject->get_property = GST_DEBUG_FUNCPTR(gst_mle_snpe_get_property); + gobject->finalize = GST_DEBUG_FUNCPTR(gst_mle_snpe_finalize); + + g_object_class_install_property( + gobject, + PROP_MLE_PARSE_CONFIG, + g_param_spec_string( + "config", + "Path to config file", + "Path to config file. Eg.: /data/misc/camera/mle_snpe.config", + NULL, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS ))); + + g_object_class_install_property( + gobject, + PROP_MLE_MODEL_FILENAME, + g_param_spec_string( + "model", + "Model file", + "Path to model file. Eg.: /data/misc/camera/model.dlc", + NULL, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property( + gobject, + PROP_MLE_LABELS_FILENAME, + g_param_spec_string( + "labels", + "Labels filename", + "Path to labels file. Eg.: /data/misc/camera/labels.txt", + NULL, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property( + gobject, + PROP_SNPE_INPUT_FORMAT, + g_param_spec_enum( + "input-format", + "Input format", + "Select input format", + GST_TYPE_MLE_INPUT_FORMAT, + DEFAULT_PROP_SNPE_INPUT_FORMAT, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property( + gobject, + PROP_MLE_POSTPROCESSING, + g_param_spec_string( + "postprocessing", + "Postprocessing", + "Supported Postprocessing: classification; detection; singlessd;" + " segmentation", + NULL, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject, PROP_MLE_MEAN_VALUES, + gst_param_spec_array ("mean", "Mean Subtraction", + "Channel Mean Subtraction values ('')", + g_param_spec_double ("value", "Mean Value", + "One of B, G or R value.", 0, 255, + DEFAULT_PROP_MLE_MEAN_VALUE, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS)), + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property (gobject, PROP_MLE_SIGMA_VALUES, + gst_param_spec_array ("sigma", "Sigma values", + "Channel divisor values ('')", + g_param_spec_double ("value", "Sigma Value", + "One of B, G or R divisors value.", 0, 255, + DEFAULT_PROP_MLE_SIGMA_VALUE, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS)), + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property( + gobject, + PROP_SNPE_RUNTIME, + g_param_spec_enum( + "runtime", + "SNPE Runtime", + "Select runtime", + GST_TYPE_MLE_SNPE_RUNTIME_TYPE, + DEFAULT_PROP_SNPE_RUNTIME, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property( + gobject, + PROP_SNPE_OUTPUT_LAYERS, + g_param_spec_string( + "output-layers", + "SNPE output layers", + "Model output layers, comma separated." + "Should be set if model have more than one output", + NULL, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property( + gobject, + PROP_MLE_PREPROCESSING_TYPE, + g_param_spec_enum( + "preprocess-type", + "Preprocess type", + "Select preprocess type", + GST_TYPE_MLE_PREPROCESSING_MODE, + DEFAULT_PROP_MLE_PREPROCESSING_TYPE, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property( + gobject, + PROP_MLE_PREPROCESSING_ACCEL, + g_param_spec_enum( + "preprocess-accel", + "Preprocessing accelerator", + "Select FastCV preprocessing accelerator", + GST_TYPE_MLE_PREPROCESSING_ACCEL, + DEFAULT_PROP_MLE_PREPROCESS_ACCEL, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + g_object_class_install_property( + gobject, + PROP_MLE_CONF_THRESHOLD, + g_param_spec_float( + "confidence-threshold", + "Confidence Threshold", + "Confidence Threshold value", + 0.0, + 1.0, + DEFAULT_PROP_MLE_CONF_THRESHOLD, + static_cast(G_PARAM_READWRITE | + G_PARAM_STATIC_STRINGS))); + + gst_element_class_set_static_metadata( + element, "MLE SNPE", "Execute SNPE NN models", + "Pre-process, execute NN model, post-process", "QTI"); + + gst_element_class_add_pad_template(element, + gst_mle_sink_template()); + gst_element_class_add_pad_template(element, + gst_mle_src_template()); + + filter->set_info = GST_DEBUG_FUNCPTR (gst_mle_snpe_set_info); + filter->transform_frame_ip = + GST_DEBUG_FUNCPTR (gst_mle_snpe_transform_frame_ip); +} + +static void +gst_mle_snpe_init (GstMLESNPE * mle) +{ + mle->engine = nullptr; + mle->config_location = nullptr; + mle->is_init = FALSE; + mle->input_format = DEFAULT_PROP_SNPE_INPUT_FORMAT; + mle->blue_mean = mle->green_mean = mle->red_mean = + DEFAULT_PROP_MLE_MEAN_VALUE; + mle->blue_sigma = mle->green_sigma = mle->red_sigma = + DEFAULT_PROP_MLE_SIGMA_VALUE; + mle->output_layers = nullptr; + mle->runtime = DEFAULT_PROP_SNPE_RUNTIME; + mle->preprocessing_type = DEFAULT_PROP_MLE_PREPROCESSING_TYPE; + mle->preprocess_accel = DEFAULT_PROP_MLE_PREPROCESS_ACCEL; + mle->conf_threshold = DEFAULT_PROP_MLE_CONF_THRESHOLD; + + GST_DEBUG_CATEGORY_INIT (mle_snpe_debug, "qtimlesnpe", 0, + "QTI Machine Learning Engine"); +} + +static gboolean +plugin_init (GstPlugin * plugin) +{ + return gst_element_register (plugin, "qtimlesnpe", GST_RANK_PRIMARY, + GST_TYPE_MLE_SNPE); +} + +GST_PLUGIN_DEFINE ( + GST_VERSION_MAJOR, + GST_VERSION_MINOR, + qtimlesnpe, + "Machine Learning Engine SNPE", + plugin_init, + PACKAGE_VERSION, + PACKAGE_LICENSE, + PACKAGE_SUMMARY, + PACKAGE_ORIGIN +) diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h new file mode 100644 index 0000000..9f730e7 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h @@ -0,0 +1,95 @@ +/* +* Copyright (c) 2020, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef __GST_MLE_SNPE_H__ +#define __GST_MLE_SNPE_H__ + +#include +#include +#include +#include +#include +#include "mle_engine/ml_engine_intf.h" + +G_BEGIN_DECLS + +#define GST_TYPE_MLE_SNPE \ + (gst_mle_snpe_get_type()) +#define GST_MLE_SNPE(obj) \ + (G_TYPE_CHECK_INSTANCE_CAST((obj),GST_TYPE_MLE_SNPE,GstMLESNPE)) +#define GST_MLE_SNPE_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_CAST((klass),GST_TYPE_MLE_SNPE,GstMLESNPEClass)) +#define GST_IS_MLE_SNPE(obj) \ + (G_TYPE_CHECK_INSTANCE_TYPE((obj),GST_TYPE_MLE_SNPE)) +#define GST_IS_MLE_SNPE_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_TYPE((klass),GST_TYPE_MLE_SNPE)) +#define GST_MLE_SNPE_CAST(obj) ((GstMLESNPE *)(obj)) + +typedef struct _GstMLESNPE GstMLESNPE; +typedef struct _GstMLESNPEClass GstMLESNPEClass; + +struct _GstMLESNPE { + GstVideoFilter parent; + + mle::MLEInputParams source_info; + mle::MLEngine* engine; + gboolean is_init; + guint property_mask; + + gchar *config_location; + gchar *model_filename; + gchar *labels_filename; + gchar *postprocessing; + guint input_format; + gfloat blue_mean; + gfloat green_mean; + gfloat red_mean; + gfloat blue_sigma; + gfloat green_sigma; + gfloat red_sigma; + guint x_axis; + guint y_axis; + guint width; + guint height; + guint runtime; + gchar *output_layers; + guint preprocess_accel; + guint preprocessing_type; + gfloat conf_threshold; +}; + +struct _GstMLESNPEClass { + GstVideoFilterClass parent; +}; + +G_GNUC_INTERNAL GType gst_mle_snpe_get_type(void); + +G_END_DECLS + +#endif // __GST_MLE_SNPE_H__ diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config new file mode 100644 index 0000000..b68e4ad --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config @@ -0,0 +1,17 @@ +org.codeaurora.mle.snpe +input_format = 3 +BlueMean = 0.0 +GreenMean = 0.0 +RedMean = 0.0 +BlueSigma = 255.0 +GreenSigma = 255.0 +RedSigma = 255.0 +UseNorm = true +preprocess_type = 1 +confidence_threshold = 0.4 +nms_threshold = 0.5 +max_detection_result = 10 +output_layers = < "Conv_296", "Conv_247", "Conv_198" > +runtime = 1 +model = "/data/misc/camera/yolov5n.dlc" +labels = "/data/misc/camera/coco_labels.txt" diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/c2d-video-converter.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/c2d-video-converter.h new file mode 100644 index 0000000..6a63e25 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/c2d-video-converter.h @@ -0,0 +1,210 @@ +/* + * Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions are + * met: + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of The Linux Foundation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT + * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS + * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR + * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF + * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR + * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, + * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE + * OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + * Changes from Qualcomm Innovation Center are provided under the following license: + * + * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted (subject to the limitations in the + * disclaimer below) provided that the following conditions are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * + * * Neither the name of Qualcomm Innovation Center, Inc. nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE + * GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT + * HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED + * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. + * IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR + * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL + * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE + * GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS + * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER + * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR + * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN + * IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef __GST_C2D_VIDEO_CONVERTER_H__ +#define __GST_C2D_VIDEO_CONVERTER_H__ + +#include +#include + +G_BEGIN_DECLS + +typedef struct _GstC2dRequest GstC2dRequest; + +/** + * GST_C2D_VIDEO_CONVERTER_OPT_SRC_RECTANGLES + * + * #GST_TYPE_ARRAY: Array of source rectangles. + * Default: NULL + * + * Not applicable for output. + */ +#define GST_C2D_VIDEO_CONVERTER_OPT_SRC_RECTANGLES \ + "GstC2dVideoConverter.source-rectangles" + +/** + * GST_C2D_VIDEO_CONVERTER_OPT_DEST_RECTANGLES + * + * #GST_TYPE_ARRAY: Array of destination rectangles. + * Default: NULL + * + * Not applicable for output. + */ +#define GST_C2D_VIDEO_CONVERTER_OPT_DEST_RECTANGLES \ + "GstC2dVideoConverter.destination-rectangles" + +/** + * GST_C2D_VIDEO_CONVERTER_OPT_FLIP_HORIZONTAL: + * + * #G_TYPE_BOOLEAN, flip output horizontally + * Default: FALSE + * + * Not applicable for output + */ +#define GST_C2D_VIDEO_CONVERTER_OPT_FLIP_HORIZONTAL \ + "GstC2dVideoConverter.flip-horizontal" + +/** + * GST_C2D_VIDEO_CONVERTER_OPT_FLIP_VERTICAL: + * + * #G_TYPE_BOOLEAN, flip output horizontally + * Default: FALSE + * + * Not applicable for output + */ +#define GST_C2D_VIDEO_CONVERTER_OPT_FLIP_VERTICAL \ + "GstC2dVideoConverter.flip-vertical" + +/** + * GstC2dVideoRotate: + * @GST_C2D_VIDEO_ROTATE_NONE: disable rotation of the output + * @GST_C2D_VIDEO_ROTATE_90_CW: rotate output 90 degrees clockwise + * @GST_C2D_VIDEO_ROTATE_90_CCW: rotate output 90 degrees counter-clockwise + * @GST_C2D_VIDEO_ROTATE_180: rotate output 180 degrees + * + * Different output rotation modes + */ +typedef enum { + GST_C2D_VIDEO_ROTATE_NONE, + GST_C2D_VIDEO_ROTATE_90_CW, + GST_C2D_VIDEO_ROTATE_90_CCW, + GST_C2D_VIDEO_ROTATE_180, +} GstC2dVideoRotate; + +GST_VIDEO_API GType gst_c2d_video_rotation_get_type (void); +#define GST_TYPE_C2D_VIDEO_ROTATION (gst_c2d_video_rotation_get_type()) + +/** + * GST_C2D_VIDEO_CONVERTER_OPT_ROTATION: + * + * #GST_TYPE_C2D_VIDEO_ROTATION, set the output rotation flags + * Default: #GST_C2D_VIDEO_ROTATE_NONE. + * + * Not applicable for output + */ +#define GST_C2D_VIDEO_CONVERTER_OPT_ROTATION \ + "GstC2dVideoConverter.rotation" + +/** + * GST_C2D_VIDEO_CONVERTER_OPT_ALPHA: + * + * #G_TYPE_DOUBLE, alpha channel occupancy + * Default: 1.0 + * + * Not applicable for output + */ +#define GST_C2D_VIDEO_CONVERTER_OPT_ALPHA \ + "GstC2dVideoConverter.alpha" + +/** + * GST_C2D_VIDEO_CONVERTER_OPT_BACKGROUND: + * + * #G_TYPE_UINT, background color + * Default: 0x00000000 + * + * Not applicable for input + */ +#define GST_C2D_VIDEO_CONVERTER_OPT_BACKGROUND \ + "GstC2dVideoConverter.background" + +/** + * GST_C2D_VIDEO_CONVERTER_OPT_UBWC_FORMAT: + * + * #G_TYPE_BOOLEAN, whether buffers have UBWC (Universal Bandwidth Compression) + * Default: FALSE + */ +#define GST_C2D_VIDEO_CONVERTER_OPT_UBWC_FORMAT \ + "GstC2dVideoConverter.ubwc-format" + +typedef struct _GstC2dVideoConverter GstC2dVideoConverter; + +GST_VIDEO_API GstC2dVideoConverter * +gst_c2d_video_converter_new (void); + +GST_VIDEO_API void +gst_c2d_video_converter_free (GstC2dVideoConverter *convert); + +GST_VIDEO_API gboolean +gst_c2d_video_converter_set_input_opts (GstC2dVideoConverter *convert, + guint index, GstStructure *opts); + +GST_VIDEO_API gboolean +gst_c2d_video_converter_set_output_opts (GstC2dVideoConverter *convert, + GstStructure *opts); + +GST_VIDEO_API gpointer +gst_c2d_video_converter_submit_request (GstC2dVideoConverter *convert, + const GstVideoFrame *inframes, + guint n_inputs, + GstVideoFrame *outframe); + +GST_VIDEO_API gboolean +gst_c2d_video_converter_wait_request (GstC2dVideoConverter *convert, + gpointer request_id); + +GST_VIDEO_API void +gst_c2d_video_converter_flush (GstC2dVideoConverter *convert); + +G_END_DECLS + +#endif /* __GST_C2D_VIDEO_CONVERTER_H__ */ diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/gstimagepool.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/gstimagepool.h new file mode 100644 index 0000000..6ab2a55 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/gstimagepool.h @@ -0,0 +1,88 @@ +/* +* Copyright (c) 2019-2020, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef __GST_IMAGE_POOL_H__ +#define __GST_IMAGE_POOL_H__ + +#include +#include + +G_BEGIN_DECLS + +#define GST_TYPE_IMAGE_BUFFER_POOL \ + (gst_image_buffer_pool_get_type ()) +#define GST_IMAGE_BUFFER_POOL(obj) \ + (G_TYPE_CHECK_INSTANCE_CAST ((obj), GST_TYPE_IMAGE_BUFFER_POOL, \ + GstImageBufferPool)) +#define GST_IMAGE_BUFFER_POOL_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_CAST ((klass), GST_TYPE_IMAGE_BUFFER_POOL, \ + GstImageBufferPoolClass)) +#define GST_IS_IMAGE_BUFFER_POOL(obj) \ + (G_TYPE_CHECK_INSTANCE_TYPE ((obj), GST_TYPE_IMAGE_BUFFER_POOL)) +#define GST_IS_IMAGE_BUFFER_POOL_CLASS(klass) \ + (G_TYPE_CHECK_CLASS_TYPE ((klass), GST_TYPE_IMAGE_BUFFER_POOL)) +#define GST_IMAGE_BUFFER_POOL_CAST(obj) ((GstImageBufferPool*)(obj)) + +/** + * GST_IMAGE_BUFFER_POOL_OPTION_UBWC_MODE: + * + * An option indicating that the allocated buffer must be UBWC. + */ +#define GST_IMAGE_BUFFER_POOL_OPTION_UBWC_MODE "GstBufferPoolOptionUBWCMode" + +typedef struct _GstImageBufferPool GstImageBufferPool; +typedef struct _GstImageBufferPoolClass GstImageBufferPoolClass; +typedef struct _GstImageBufferPoolPrivate GstImageBufferPoolPrivate; + +#define GST_IMAGE_BUFFER_POOL_TYPE_ION "GstBufferPoolTypeIonMemory" +#define GST_IMAGE_BUFFER_POOL_TYPE_GBM "GstBufferPoolTypeGbmMemory" + +struct _GstImageBufferPool +{ + GstBufferPool parent; + + GstImageBufferPoolPrivate *priv; +}; + +struct _GstImageBufferPoolClass +{ + GstBufferPoolClass parent; +}; + +GType gst_image_buffer_pool_get_type (void); + +/// Creates a buffer pool for managing video frames. +GstBufferPool * gst_image_buffer_pool_new (const gchar * type); + +/// Retrieve current set video configuration. +const GstVideoInfo * gst_image_buffer_pool_get_info (GstBufferPool * pool); + +G_END_DECLS + +#endif /* __GST_IMAGE_POOL_H__ */ diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/ml-meta/ml_meta.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/ml-meta/ml_meta.h new file mode 100644 index 0000000..a444ae6 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/ml-meta/ml_meta.h @@ -0,0 +1,365 @@ +/* +* Copyright (c) 2020-2021, The Linux Foundation. All rights reserved. +* +* Redistribution and use in source and binary forms, with or without +* modification, are permitted provided that the following conditions are +* met: +* * Redistributions of source code must retain the above copyright +* notice, this list of conditions and the following disclaimer. +* * Redistributions in binary form must reproduce the above +* copyright notice, this list of conditions and the following +* disclaimer in the documentation and/or other materials provided +* with the distribution. +* * Neither the name of The Linux Foundation nor the names of its +* contributors may be used to endorse or promote products derived +* from this software without specific prior written permission. +* +* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED +* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF +* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT +* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS +* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR +* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, +* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE +* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +*/ + +#ifndef __GST_ML_META_H__ +#define __GST_ML_META_H__ + +#include +#include + +G_BEGIN_DECLS + +typedef struct _GstMLClassificationResult GstMLClassificationResult; +typedef struct _GstMLBoundingBox GstMLBoundingBox; +typedef struct _GstMLDetectionMeta GstMLDetectionMeta; +typedef struct _GstMLSegmentationMeta GstMLSegmentationMeta; +typedef struct _GstMLClassificationMeta GstMLClassificationMeta; + +typedef struct _GstMLKeyPoint GstMLKeyPoint; +typedef struct _GstMLPose GstMLPose; +typedef struct _GstMLPoseNetMeta GstMLPoseNetMeta; + +typedef struct _GstCvpMotionVector GstCvpMotionVector; +typedef struct _GstCvpOpticalFlowMeta GstCvpOpticalFlowMeta; + +#define GST_ML_DETECTION_API_TYPE (gst_ml_detection_get_type()) +#define GST_ML_DETECTION_INFO (gst_ml_detection_get_info()) + +#define GST_ML_SEGMENTATION_API_TYPE (gst_ml_segmentation_get_type()) +#define GST_ML_SEGMENTATION_INFO (gst_ml_segmentation_get_info()) + +#define GST_ML_CLASSIFICATION_API_TYPE (gst_ml_classification_get_type()) +#define GST_ML_CLASSIFICATION_INFO (gst_ml_classification_get_info()) + +#define GST_ML_POSENET_API_TYPE (gst_ml_posenet_get_type()) +#define GST_ML_POSENET_INFO (gst_ml_posenet_get_info()) + +#define GST_CVP_OPTCLFLOW_API_TYPE (gst_cvp_optclflow_get_type()) +#define GST_CVP_OPTCLFLOW_INFO (gst_cvp_optclflow_get_info()) + + +/** + * GstMLBoundingBox: + * @x: horizontal start position + * @y: vertical start position + * @width: active window width + * @height: active window height + + * Bounding box properties + */ +struct _GstMLBoundingBox { + guint x; + guint y; + guint width; + guint height; +}; + +/** + * GstMLClassificationResult: + * @name: name for given object + * @confidence: confidence for given object + * + * Name and confidence handle + */ +struct _GstMLClassificationResult { + gchar *name; + gfloat confidence; +}; + +/** + * GstMLDetectionMeta: + * @parent: parent #GstMeta + * @box: bounding box coordinates + * @box_info: list of GstMLClassificationResult which handle names and confidences + * + * Machine learning SSD models properties + */ +struct _GstMLDetectionMeta { + GstMeta parent; + GstMLBoundingBox bounding_box; + GSList *box_info; +}; + +/** + * GstMLSegmentationMeta: + * @parent: parent #GstMeta + * @img_buffer: pointer to segmentation image data + * @img_width: the segmentation image width in pixels + * @img_height: the segmentation image height in pixels + * @img_size: size of image buffer in bytes + * @img_format: the segmentation image pixel format + * @img_stride: the segmentation image bytes per line + * + * Machine learning segmentation image models properties + */ +struct _GstMLSegmentationMeta { + GstMeta parent; + gpointer img_buffer; + guint img_width; + guint img_height; + guint img_size; + GstVideoFormat img_format; + guint img_stride; +}; + +/** + * GstMLClassificationMeta: + * @parent: parent #GstMeta + * @result: name and confidence + * @location: location in frame of location is CUSTOM then x/y are considered + * @x: horizontal start position if location is CUSTOM + * @y: vertical start position if location is CUSTOM + * + * Machine learning classification models properties + */ +struct _GstMLClassificationMeta { + GstMeta parent; + GstMLClassificationResult result; +}; + +/** + * GstMLKeyPoints - PoseNet key points + */ +enum GstMLKeyPointsType{ + NOSE, + LEFT_EYE, + RIGHT_EYE, + LEFT_EAR, + RIGHT_EAR, + LEFT_SHOULDER, + RIGHT_SHOULDER, + LEFT_ELBOW, + RIGHT_ELBOW, + LEFT_WRIST, + RIGHT_WRIST, + LEFT_HIP, + RIGHT_HIP, + LEFT_KNEE, + RIGHT_KNEE, + LEFT_ANKLE, + RIGHT_ANKLE, + KEY_POINTS_COUNT +}; + +/** + * GstMLKeyPoint: + * @x: x coordinate + * @y: y coordinate + * @score: score of given pose + * + * Machine learning PoseNet poses + */ +struct _GstMLKeyPoint { + gint x; + gint y; + gfloat score; +}; + +/** + * GstMLPoseNetMeta: + * @parent: parent #GstMeta + * @points: array of key points coordinates and score. + * Key points order corresponds to GstMLKeyPointsType. + * @score: score of all poses + * + * Machine learning PoseNet models properties + */ +struct _GstMLPoseNetMeta { + GstMeta parent; + GstMLKeyPoint points[KEY_POINTS_COUNT]; + gfloat score; +}; + +/** + * GstCvpMotionVector: + * @x : x for motion vector + * @y : y for motion vector + * @confidence : confidence of the motion vector + * @variance : variance of the motion vector + * @mean : mean of the motion vector + * @bestsad : the best SAD (sum of absolute difference) of the motion vector + * @sad : SAD of the motion vector + * + * CVP Motion vector properties for optical flow + * variance, mean, best SAD, and SAD are filled only if stats is enable + */ +struct _GstCvpMotionVector { + gint x; + gint y; + gint confidence; + guint variance; + guint mean; + guint bestsad; + guint sad; +}; + +/** + * GstCvpOpticalFlowMeta: + * @parent: parent #GstMeta + * @mvectors: motion vector for each 8x8 block + * @n_vectors: motion vector count + * + * CVP Optical flow output properties + */ +struct _GstCvpOpticalFlowMeta { + GstMeta parent; + + GstCvpMotionVector *mvectors; + gint n_vectors; +}; + + +GType gst_ml_detection_get_type (void); +const GstMetaInfo * gst_ml_detection_get_info (void); +GType gst_ml_segmentation_get_type (void); +const GstMetaInfo * gst_ml_segmentation_get_info (void); +GType gst_ml_classification_get_type (void); +const GstMetaInfo * gst_ml_classification_get_info (void); +GType gst_ml_posenet_get_type (void); +const GstMetaInfo * gst_ml_posenet_get_info (void); +GType gst_cvp_optclflow_get_type (void); +const GstMetaInfo * gst_cvp_optclflow_get_info (void); + +/** + * gst_buffer_add_detection_meta: + * @buffer: the buffer new metadata belongs to + * + * Creates new bounding detection entry and returns pointer to new + * entry. Metadata payload is not input parameter in order to avoid + * unnecessary copy of data. + * + */ +GST_EXPORT +GstMLDetectionMeta * gst_buffer_add_detection_meta (GstBuffer * buffer); + +/** + * gst_buffer_get_detection_meta: + * @buffer: the buffer metadata comes from + * + * Returns list of bounding detection entries. List payload should be + * considered as GstMLDetectionMeta. Caller is supposed to free the list. + * + */ +GST_EXPORT +GSList * gst_buffer_get_detection_meta (GstBuffer * buffer); + +/** + * gst_buffer_add_segmentation_meta: + * @buffer: the buffer new metadata belongs to + * + * Creates new segmentation metadata entry and returns pointer to new + * entry. Metadata payload is not input parameter in order to avoid + * unnecessary copy of data. + * + */ +GST_EXPORT +GstMLSegmentationMeta * gst_buffer_add_segmentation_meta (GstBuffer * buffer); + +/** + * gst_buffer_get_segmentation_meta: + * @buffer: the buffer metadata comes from + * + * Returns list of segmentation metadata entries. List payload should be + * considered as GstMLSegmentationMeta. Caller is supposed to free the list. + * + */ +GST_EXPORT +GSList * gst_buffer_get_segmentation_meta (GstBuffer * buffer); + +/** + * gst_buffer_add_classification_meta: + * @buffer: the buffer new metadata belongs to + * + * Creates new classification metadata entry and returns pointer to new + * entry. Metadata payload is not input parameter in order to avoid + * unnecessary copy of data. + * + */ +GST_EXPORT +GstMLClassificationMeta * gst_buffer_add_classification_meta (GstBuffer * buffer); + +/** + * gst_buffer_get_classification_meta: + * @buffer: the buffer metadata comes from + * + * Returns list of classification metadata entries. List payload should be + * considered as GstMLClassificationMeta. Caller is supposed to free the list. + * + */ +GST_EXPORT +GSList * gst_buffer_get_classification_meta (GstBuffer * buffer); + +/** + * gst_buffer_add_posenet_meta: + * @buffer: the buffer new metadata belongs to + * + * Creates new posenet metadata entry and returns pointer to new + * entry. Metadata payload is not input parameter in order to avoid + * unnecessary copy of data. + * + */ +GST_EXPORT +GstMLPoseNetMeta * gst_buffer_add_posenet_meta (GstBuffer * buffer); + +/** + * gst_buffer_get_posenet_meta: + * @buffer: the buffer metadata comes from + * + * Returns list of posenet metadata entries. List payload should be + * considered as GstMLPoseNetMeta. Caller is supposed to free the list. + * + */ +GST_EXPORT +GSList * gst_buffer_get_posenet_meta (GstBuffer * buffer); + +/** + * gst_buffer_add_optclflow_meta: + * @buffer: the buffer new meta belongs to + * + * Creates new optical flow meta entry and returns pointer to new + * entry. Metadata payload is not input parameter in order to avoid + * unnecessary copy of data. + * + */ +GST_EXPORT +GstCvpOpticalFlowMeta * gst_buffer_add_optclflow_meta (GstBuffer * buffer); + +/** + * gst_buffer_get_optclflow_meta: + * @buffer: the buffer metadata comes from + * + * Returns list of optical flow metadata entries. List payload should be + * considered as GstCvpOpticalFlowMeta. Caller is supposed to free the list. + * + */ +GST_EXPORT +GSList * gst_buffer_get_optclflow_meta (GstBuffer * buffer); +G_END_DECLS + +#endif /* __GST_ML_META_H__ */ From 354e00d0d0ace593499a6a570e658f88defa9bf0 Mon Sep 17 00:00:00 2001 From: quic-khrahul <131336334+quic-khrahul@users.noreply.github.com> Date: Fri, 28 Apr 2023 13:54:55 +0530 Subject: [PATCH 2/4] Create LICENSE Signed-off-by: quic-khrahul <131336334+quic-khrahul@users.noreply.github.com> --- .../People-Intrusion-Detection/LICENSE | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) create mode 100644 RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/LICENSE diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/LICENSE b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/LICENSE new file mode 100644 index 0000000..3183457 --- /dev/null +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/LICENSE @@ -0,0 +1,28 @@ +Copyright (c) 2023 Qualcomm Innovation Center, Inc. All Rights Reserved. + +Redistribution and use in source and binary forms, with or without modification, are permitted +provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this list of conditions + and the following disclaimer. +* Redistributions in binary form must reproduce the above copyright notice, this list of + conditions and the following disclaimer in the documentation and/or other materials + provided with the distribution. +* Neither the name of the copyright holder nor the names of its contributors may be used to + endorse or promote products derived from this software without specific prior written + permission. + +NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS +LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS +OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN +IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Where there is uncertainty as to how, or +where, to apply marks, open an OSR to escalate to OSG for review. + +SPDX-License-Identifier: BSD-3-Clause-Clear From c810fcc0fd521a17681ddf13762d4388a598a2fd Mon Sep 17 00:00:00 2001 From: quic-khrahul <131336334+quic-khrahul@users.noreply.github.com> Date: Fri, 28 Apr 2023 14:00:21 +0530 Subject: [PATCH 3/4] Update README.md Signed-off-by: quic-khrahul <131336334+quic-khrahul@users.noreply.github.com> --- .../People-Intrusion-Detection/README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md index deea678..96fb810 100644 --- a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md +++ b/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md @@ -32,7 +32,7 @@ Refer the steps given in the detailned documentation in the SDK for installation 4. A USB camera 5. A display monitor -![image](https://github.qualcomm.com/storage/user/12959/files/2d85cec3-913e-43af-9a06-e539690bb30c) +![image](https://user-images.githubusercontent.com/131336334/235096089-97d1d087-c0ba-43ad-821f-470c8f43beda.png) # Environment Setup to download Yolov5 Model: @@ -96,11 +96,11 @@ source bin/envsetup.sh -o $ONNX_DIR SNPE currently does not support 5D operator. It requires specify output nodes before 5D Reshape in convert command. The output nodes can be checked in the https://netron.app/. To check the output layer nodes, Open the model in the Netron app and click on Conv layer. -![image](https://github.qualcomm.com/storage/user/12959/files/2aa3e4af-6518-43df-a59b-c29130644554) +![image](https://user-images.githubusercontent.com/131336334/235096179-a2985d4f-1002-41e9-8e4c-9e7294551f1c.png) In attached yolov5m.onnx, the output nodes before 5D is onnx::443 (Conv_271), 496 (Conv_305) and 549 (Conv_339) -![image](https://github.qualcomm.com/storage/user/12959/files/e8f4be0d-06c5-4f16-ac58-83e9795315a0) +![image](https://user-images.githubusercontent.com/131336334/235096209-91c10024-6007-4665-9d4e-6179568dfef9.png) ## This implementation does below functions: * anchorBoxProcess: @@ -148,7 +148,7 @@ Config file is used to provide model information to qtimlesnpe plugin. Make chan Update the model layer information, label and dlc path in the configuration file. * https://github.com/quic/sample-apps-for-robotics-platforms/tree/master/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5m_quant.config -![image](https://github.qualcomm.com/storage/user/12959/files/a6c327d3-69fd-4fac-b863-0412f6b1dc0f) +![image](https://user-images.githubusercontent.com/131336334/235096273-1b44f81b-b790-488f-a40b-75fbbc10f703.png) #### To define the camera FOV, there is need to set restricted area. It can be set by change the x,y, width and height. These dimensions depends on the camera resolution so it need to be set accordingly. @@ -171,10 +171,10 @@ b. Plugin a keyboard and a mouse to the development board. c. Connect the USB camera module to the development board. # Gstreamer Pipeline -![image](https://github.qualcomm.com/storage/user/12959/files/6e7ac490-408d-41b2-a633-711b1d01f914) +![image](https://user-images.githubusercontent.com/131336334/235096309-3524b94b-f6bd-4a5c-ae47-eff97c8c002a.png) # Detailed flow diagram -![image](https://github.qualcomm.com/storage/user/12959/files/0ffa0c7c-32cb-4c68-9dbe-0114f8029e98) +![image](https://user-images.githubusercontent.com/131336334/235096337-64bcd074-e064-4d1f-a138-ed38285e5496.png) # Steps to Run Demo RB5 Board: @@ -253,4 +253,4 @@ gst-launch-1.0 qtiqmmfsrc ! video/x-raw\(memory:GBM\), format=NV12, width=1280, Add GST_DEBUG=qtiml*:5 in gstreamer launch command if you want to enable debug logs for qtimlesnpe plugin. ### Demo -![image](https://github.qualcomm.com/storage/user/12959/files/d71791e7-db2c-496b-8c8c-6ec9942ce506) +![image](https://user-images.githubusercontent.com/131336334/235096429-856c3ce6-e6ec-446b-b768-05ac7459b0b1.png) From c7b1a1ebfa45d23bd7b90324da25a7b71f10b11f Mon Sep 17 00:00:00 2001 From: Rahul Khandelwal Date: Fri, 28 Apr 2023 14:30:54 +0530 Subject: [PATCH 4/4] Rename DIrectory --- .../LICENSE | 0 .../README.md | 0 .../docs/GettingStarted.md | 0 .../docs/Install.md | 0 .../model/coco_labels.txt | 0 .../model/inputlist.txt | 0 .../model/mle_snpeyolov5m_quant.config | 0 .../model/mle_snpeyolov5n_quant.config | 0 .../src/gst-plugin-mle/CMakeLists.txt | 0 .../src/gst-plugin-mle/config.h.in | 0 .../src/gst-plugin-mle/mle_engine/CMakeLists.txt | 0 .../src/gst-plugin-mle/mle_engine/common_utils.h | 0 .../src/gst-plugin-mle/mle_engine/ml_engine_impl.cc | 0 .../src/gst-plugin-mle/mle_engine/ml_engine_intf.h | 0 .../src/gst-plugin-mle/mle_engine/snpe_base.cc | 0 .../src/gst-plugin-mle/mle_engine/snpe_base.h | 0 .../src/gst-plugin-mle/mle_engine/snpe_detection.cc | 0 .../src/gst-plugin-mle/mle_engine/snpe_detection.h | 0 .../src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc | 0 .../src/gst-plugin-mle/mle_engine/snpe_yolodetection.h | 0 .../src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt | 0 .../src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc | 0 .../src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h | 0 .../src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config | 0 .../src/include/gst/video/c2d-video-converter.h | 0 .../src/include/gst/video/gstimagepool.h | 0 .../src/include/ml-meta/ml_meta.h | 0 27 files changed, 0 insertions(+), 0 deletions(-) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/LICENSE (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/README.md (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/docs/GettingStarted.md (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/docs/Install.md (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/model/coco_labels.txt (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/model/inputlist.txt (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/model/mle_snpeyolov5m_quant.config (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/model/mle_snpeyolov5n_quant.config (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/CMakeLists.txt (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/config.h.in (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/CMakeLists.txt (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/common_utils.h (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/ml_engine_impl.cc (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/ml_engine_intf.h (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/snpe_base.cc (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/snpe_base.h (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/snpe_detection.cc (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/snpe_detection.h (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_engine/snpe_yolodetection.h (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/include/gst/video/c2d-video-converter.h (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/include/gst/video/gstimagepool.h (100%) rename RB5/linux_kernel_4_x/AI-ML-apps/{People-Intrusion-Detection => People-Detection-YoloV5}/src/include/ml-meta/ml_meta.h (100%) diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/LICENSE b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/LICENSE similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/LICENSE rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/LICENSE diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/README.md similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/README.md rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/README.md diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/GettingStarted.md b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/docs/GettingStarted.md similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/GettingStarted.md rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/docs/GettingStarted.md diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/Install.md b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/docs/Install.md similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/docs/Install.md rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/docs/Install.md diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/coco_labels.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/model/coco_labels.txt similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/coco_labels.txt rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/model/coco_labels.txt diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/inputlist.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/model/inputlist.txt similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/inputlist.txt rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/model/inputlist.txt diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5m_quant.config b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/model/mle_snpeyolov5m_quant.config similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5m_quant.config rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/model/mle_snpeyolov5m_quant.config diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5n_quant.config b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/model/mle_snpeyolov5n_quant.config similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/model/mle_snpeyolov5n_quant.config rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/model/mle_snpeyolov5n_quant.config diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/CMakeLists.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/CMakeLists.txt similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/CMakeLists.txt rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/CMakeLists.txt diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/config.h.in b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/config.h.in similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/config.h.in rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/config.h.in diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/CMakeLists.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/CMakeLists.txt similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/CMakeLists.txt rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/CMakeLists.txt diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/common_utils.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/common_utils.h similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/common_utils.h rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/common_utils.h diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_impl.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/ml_engine_impl.cc similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_impl.cc rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/ml_engine_impl.cc diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_intf.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/ml_engine_intf.h similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/ml_engine_intf.h rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/ml_engine_intf.h diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_base.cc similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.cc rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_base.cc diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_base.h similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_base.h rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_base.h diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_detection.cc similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.cc rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_detection.cc diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_detection.h similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_detection.h rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_detection.h diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_yolodetection.cc diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_yolodetection.h similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_engine/snpe_yolodetection.h rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_engine/snpe_yolodetection.h diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_gst_snpe/CMakeLists.txt diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.cc diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_gst_snpe/mle_snpe.h diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/gst-plugin-mle/mle_gst_snpe/mle_snpeyolov5n.config diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/c2d-video-converter.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/include/gst/video/c2d-video-converter.h similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/c2d-video-converter.h rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/include/gst/video/c2d-video-converter.h diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/gstimagepool.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/include/gst/video/gstimagepool.h similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/gst/video/gstimagepool.h rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/include/gst/video/gstimagepool.h diff --git a/RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/ml-meta/ml_meta.h b/RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/include/ml-meta/ml_meta.h similarity index 100% rename from RB5/linux_kernel_4_x/AI-ML-apps/People-Intrusion-Detection/src/include/ml-meta/ml_meta.h rename to RB5/linux_kernel_4_x/AI-ML-apps/People-Detection-YoloV5/src/include/ml-meta/ml_meta.h